aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/common/locomo.c10
-rw-r--r--arch/arm/mach-ixp23xx/include/mach/memory.h2
-rw-r--r--arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c4
-rw-r--r--arch/arm/mach-mmp/include/mach/uncompress.h5
-rw-r--r--arch/arm/mach-orion5x/wrt350n-v2-setup.c2
-rw-r--r--arch/arm/mach-pxa/Kconfig11
-rw-r--r--arch/arm/mach-pxa/imote2.c4
-rw-r--r--arch/arm/mach-pxa/include/mach/uncompress.h11
-rw-r--r--arch/arm/mach-pxa/raumfeld.c4
-rw-r--r--arch/arm/mach-pxa/stargate2.c5
-rw-r--r--arch/arm/tools/mach-types75
-rw-r--r--arch/cris/arch-v32/drivers/pci/bios.c2
-rw-r--r--arch/frv/mb93090-mb00/pci-frv.c6
-rw-r--r--arch/microblaze/Kconfig3
-rw-r--r--arch/microblaze/Makefile4
-rw-r--r--arch/microblaze/boot/Makefile6
-rw-r--r--arch/microblaze/include/asm/processor.h1
-rw-r--r--arch/microblaze/include/asm/segment.h49
-rw-r--r--arch/microblaze/include/asm/thread_info.h5
-rw-r--r--arch/microblaze/include/asm/tlbflush.h3
-rw-r--r--arch/microblaze/include/asm/uaccess.h447
-rw-r--r--arch/microblaze/kernel/dma.c2
-rw-r--r--arch/microblaze/kernel/head.S12
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S112
-rw-r--r--arch/microblaze/kernel/misc.S15
-rw-r--r--arch/microblaze/kernel/process.c10
-rw-r--r--arch/microblaze/kernel/setup.c24
-rw-r--r--arch/microblaze/kernel/traps.c6
-rw-r--r--arch/microblaze/lib/Makefile3
-rw-r--r--arch/microblaze/lib/fastcopy.S6
-rw-r--r--arch/microblaze/lib/memcpy.c2
-rw-r--r--arch/microblaze/lib/memset.c15
-rw-r--r--arch/microblaze/lib/uaccess.c48
-rw-r--r--arch/microblaze/lib/uaccess_old.S45
-rw-r--r--arch/microblaze/mm/fault.c24
-rw-r--r--arch/microblaze/mm/init.c9
-rw-r--r--arch/microblaze/mm/pgtable.c2
-rw-r--r--arch/powerpc/include/asm/asm-compat.h2
-rw-r--r--arch/powerpc/kernel/misc.S28
-rw-r--r--arch/s390/boot/compressed/misc.c8
-rw-r--r--arch/s390/include/asm/system.h9
-rw-r--r--arch/s390/kernel/head.S3
-rw-r--r--arch/s390/kernel/head64.S2
-rw-r--r--arch/s390/kernel/setup.c4
-rw-r--r--arch/s390/kernel/smp.c6
-rw-r--r--arch/s390/mm/maccess.c26
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c2
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c8
-rw-r--r--arch/sh/include/cpu-sh4/cpu/mmu_context.h2
-rw-r--r--arch/sh/include/cpu-sh4/cpu/watchdog.h6
-rw-r--r--arch/sh/kernel/dwarf.c4
-rw-r--r--arch/sh/kernel/idle.c2
-rw-r--r--arch/sh/kernel/perf_event.c2
-rw-r--r--arch/sh/kernel/process_64.c7
-rw-r--r--arch/sh/mm/pmb.c4
-rw-r--r--arch/sh/mm/tlb-pteaex.c2
-rw-r--r--arch/sh/mm/tlbflush_32.c21
-rw-r--r--arch/sparc/include/asm/stat.h4
-rw-r--r--arch/sparc/kernel/perf_event.c2
-rw-r--r--arch/sparc/kernel/sysfs.c4
-rw-r--r--arch/sparc/kernel/us2e_cpufreq.c8
-rw-r--r--arch/sparc/kernel/us3_cpufreq.c8
-rw-r--r--arch/x86/include/asm/fixmap.h6
-rw-r--r--arch/x86/include/asm/hw_irq.h1
-rw-r--r--arch/x86/include/asm/msr-index.h2
-rw-r--r--arch/x86/kernel/apic/io_apic.c8
-rw-r--r--arch/x86/kernel/cpu/perf_event.c54
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c84
-rw-r--r--arch/x86/kernel/dumpstack.h5
-rw-r--r--arch/x86/kernel/head32.c4
-rw-r--r--arch/x86/kernel/head64.c3
-rw-r--r--arch/x86/kernel/irqinit.c22
-rw-r--r--arch/x86/kernel/kgdb.c2
-rw-r--r--arch/x86/kernel/process.c32
-rw-r--r--arch/x86/kernel/setup.c10
-rw-r--r--arch/x86/kernel/smpboot.c6
-rw-r--r--arch/x86/kernel/vmlinux.lds.S2
-rw-r--r--arch/x86/mm/init.c32
-rw-r--r--arch/x86/pci/acpi.c22
-rw-r--r--arch/x86/pci/i386.c5
80 files changed, 859 insertions, 609 deletions
diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c
index 90ae00b631c2..9dff07c80ddb 100644
--- a/arch/arm/common/locomo.c
+++ b/arch/arm/common/locomo.c
@@ -290,7 +290,7 @@ static int locomo_suspend(struct platform_device *dev, pm_message_t state)
290 save->LCM_GPO = locomo_readl(lchip->base + LOCOMO_GPO); /* GPIO */ 290 save->LCM_GPO = locomo_readl(lchip->base + LOCOMO_GPO); /* GPIO */
291 locomo_writel(0x00, lchip->base + LOCOMO_GPO); 291 locomo_writel(0x00, lchip->base + LOCOMO_GPO);
292 save->LCM_SPICT = locomo_readl(lchip->base + LOCOMO_SPI + LOCOMO_SPICT); /* SPI */ 292 save->LCM_SPICT = locomo_readl(lchip->base + LOCOMO_SPI + LOCOMO_SPICT); /* SPI */
293 locomo_writel(0x40, lchip->base + LOCOMO_SPICT); 293 locomo_writel(0x40, lchip->base + LOCOMO_SPI + LOCOMO_SPICT);
294 save->LCM_GPE = locomo_readl(lchip->base + LOCOMO_GPE); /* GPIO */ 294 save->LCM_GPE = locomo_readl(lchip->base + LOCOMO_GPE); /* GPIO */
295 locomo_writel(0x00, lchip->base + LOCOMO_GPE); 295 locomo_writel(0x00, lchip->base + LOCOMO_GPE);
296 save->LCM_ASD = locomo_readl(lchip->base + LOCOMO_ASD); /* ADSTART */ 296 save->LCM_ASD = locomo_readl(lchip->base + LOCOMO_ASD); /* ADSTART */
@@ -418,7 +418,7 @@ __locomo_probe(struct device *me, struct resource *mem, int irq)
418 /* Longtime timer */ 418 /* Longtime timer */
419 locomo_writel(0, lchip->base + LOCOMO_LTINT); 419 locomo_writel(0, lchip->base + LOCOMO_LTINT);
420 /* SPI */ 420 /* SPI */
421 locomo_writel(0, lchip->base + LOCOMO_SPIIE); 421 locomo_writel(0, lchip->base + LOCOMO_SPI + LOCOMO_SPIIE);
422 422
423 locomo_writel(6 + 8 + 320 + 30 - 10, lchip->base + LOCOMO_ASD); 423 locomo_writel(6 + 8 + 320 + 30 - 10, lchip->base + LOCOMO_ASD);
424 r = locomo_readl(lchip->base + LOCOMO_ASD); 424 r = locomo_readl(lchip->base + LOCOMO_ASD);
@@ -707,7 +707,7 @@ void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int
707 udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ 707 udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */
708 if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ 708 if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */
709 printk(KERN_WARNING "locomo: m62332_senddata Error 1\n"); 709 printk(KERN_WARNING "locomo: m62332_senddata Error 1\n");
710 return; 710 goto out;
711 } 711 }
712 712
713 /* Send Sub address (LSB is channel select) */ 713 /* Send Sub address (LSB is channel select) */
@@ -735,7 +735,7 @@ void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int
735 udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ 735 udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */
736 if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ 736 if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */
737 printk(KERN_WARNING "locomo: m62332_senddata Error 2\n"); 737 printk(KERN_WARNING "locomo: m62332_senddata Error 2\n");
738 return; 738 goto out;
739 } 739 }
740 740
741 /* Send DAC data */ 741 /* Send DAC data */
@@ -760,9 +760,9 @@ void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int
760 udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ 760 udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */
761 if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ 761 if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */
762 printk(KERN_WARNING "locomo: m62332_senddata Error 3\n"); 762 printk(KERN_WARNING "locomo: m62332_senddata Error 3\n");
763 return;
764 } 763 }
765 764
765out:
766 /* stop */ 766 /* stop */
767 r = locomo_readl(mapbase + LOCOMO_DAC); 767 r = locomo_readl(mapbase + LOCOMO_DAC);
768 r &= ~(LOCOMO_DAC_SCLOEB); 768 r &= ~(LOCOMO_DAC_SCLOEB);
diff --git a/arch/arm/mach-ixp23xx/include/mach/memory.h b/arch/arm/mach-ixp23xx/include/mach/memory.h
index 94a3a86cfeb8..6ef65d813f16 100644
--- a/arch/arm/mach-ixp23xx/include/mach/memory.h
+++ b/arch/arm/mach-ixp23xx/include/mach/memory.h
@@ -19,7 +19,7 @@
19 */ 19 */
20#define PHYS_OFFSET (0x00000000) 20#define PHYS_OFFSET (0x00000000)
21 21
22#define IXP23XX_PCI_SDRAM_OFFSET (*((volatile int *)IXP23XX_PCI_SDRAM_BAR) & 0xfffffff0)) 22#define IXP23XX_PCI_SDRAM_OFFSET (*((volatile int *)IXP23XX_PCI_SDRAM_BAR) & 0xfffffff0)
23 23
24#define __phys_to_bus(x) ((x) + (IXP23XX_PCI_SDRAM_OFFSET - PHYS_OFFSET)) 24#define __phys_to_bus(x) ((x) + (IXP23XX_PCI_SDRAM_OFFSET - PHYS_OFFSET))
25#define __bus_to_phys(x) ((x) - (IXP23XX_PCI_SDRAM_OFFSET - PHYS_OFFSET)) 25#define __bus_to_phys(x) ((x) - (IXP23XX_PCI_SDRAM_OFFSET - PHYS_OFFSET))
diff --git a/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c b/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c
index 0358f45766cb..5e6f711b1c67 100644
--- a/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c
+++ b/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c
@@ -74,9 +74,9 @@ static struct gpio_keys_button mv88f6281gtw_ge_button_pins[] = {
74 .desc = "SWR Button", 74 .desc = "SWR Button",
75 .active_low = 1, 75 .active_low = 1,
76 }, { 76 }, {
77 .code = KEY_F1, 77 .code = KEY_WPS_BUTTON,
78 .gpio = 46, 78 .gpio = 46,
79 .desc = "WPS Button(F1)", 79 .desc = "WPS Button",
80 .active_low = 1, 80 .active_low = 1,
81 }, 81 },
82}; 82};
diff --git a/arch/arm/mach-mmp/include/mach/uncompress.h b/arch/arm/mach-mmp/include/mach/uncompress.h
index a7dcc5307216..85bd8a2d84b5 100644
--- a/arch/arm/mach-mmp/include/mach/uncompress.h
+++ b/arch/arm/mach-mmp/include/mach/uncompress.h
@@ -14,7 +14,7 @@
14#define UART2_BASE (APB_PHYS_BASE + 0x17000) 14#define UART2_BASE (APB_PHYS_BASE + 0x17000)
15#define UART3_BASE (APB_PHYS_BASE + 0x18000) 15#define UART3_BASE (APB_PHYS_BASE + 0x18000)
16 16
17static volatile unsigned long *UART = (unsigned long *)UART2_BASE; 17static volatile unsigned long *UART;
18 18
19static inline void putc(char c) 19static inline void putc(char c)
20{ 20{
@@ -37,6 +37,9 @@ static inline void flush(void)
37 37
38static inline void arch_decomp_setup(void) 38static inline void arch_decomp_setup(void)
39{ 39{
40 /* default to UART2 */
41 UART = (unsigned long *)UART2_BASE;
42
40 if (machine_is_avengers_lite()) 43 if (machine_is_avengers_lite())
41 UART = (unsigned long *)UART3_BASE; 44 UART = (unsigned long *)UART3_BASE;
42} 45}
diff --git a/arch/arm/mach-orion5x/wrt350n-v2-setup.c b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
index cb0feca193d4..f9f222ebb7ed 100644
--- a/arch/arm/mach-orion5x/wrt350n-v2-setup.c
+++ b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
@@ -77,7 +77,7 @@ static struct gpio_keys_button wrt350n_v2_buttons[] = {
77 .desc = "Reset Button", 77 .desc = "Reset Button",
78 .active_low = 1, 78 .active_low = 1,
79 }, { 79 }, {
80 .code = KEY_WLAN, 80 .code = KEY_WPS_BUTTON,
81 .gpio = 2, 81 .gpio = 2,
82 .desc = "WPS Button", 82 .desc = "WPS Button",
83 .active_low = 1, 83 .active_low = 1,
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 38fbd0a0e402..5b6ee46fa7f6 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -272,7 +272,6 @@ config MACH_H5000
272config MACH_HIMALAYA 272config MACH_HIMALAYA
273 bool "HTC Himalaya Support" 273 bool "HTC Himalaya Support"
274 select CPU_PXA26x 274 select CPU_PXA26x
275 select FB_W100
276 275
277config MACH_MAGICIAN 276config MACH_MAGICIAN
278 bool "Enable HTC Magician Support" 277 bool "Enable HTC Magician Support"
@@ -454,6 +453,13 @@ config PXA_SHARPSL
454config SHARPSL_PM 453config SHARPSL_PM
455 bool 454 bool
456 select APM_EMULATION 455 select APM_EMULATION
456 select SHARPSL_PM_MAX1111
457
458config SHARPSL_PM_MAX1111
459 bool
460 depends on !CORGI_SSP_DEPRECATED
461 select HWMON
462 select SENSORS_MAX1111
457 463
458config CORGI_SSP_DEPRECATED 464config CORGI_SSP_DEPRECATED
459 bool 465 bool
@@ -547,7 +553,6 @@ config MACH_E740
547 bool "Toshiba e740" 553 bool "Toshiba e740"
548 default y 554 default y
549 depends on ARCH_PXA_ESERIES 555 depends on ARCH_PXA_ESERIES
550 select FB_W100
551 help 556 help
552 Say Y here if you intend to run this kernel on a Toshiba 557 Say Y here if you intend to run this kernel on a Toshiba
553 e740 family PDA. 558 e740 family PDA.
@@ -556,7 +561,6 @@ config MACH_E750
556 bool "Toshiba e750" 561 bool "Toshiba e750"
557 default y 562 default y
558 depends on ARCH_PXA_ESERIES 563 depends on ARCH_PXA_ESERIES
559 select FB_W100
560 help 564 help
561 Say Y here if you intend to run this kernel on a Toshiba 565 Say Y here if you intend to run this kernel on a Toshiba
562 e750 family PDA. 566 e750 family PDA.
@@ -573,7 +577,6 @@ config MACH_E800
573 bool "Toshiba e800" 577 bool "Toshiba e800"
574 default y 578 default y
575 depends on ARCH_PXA_ESERIES 579 depends on ARCH_PXA_ESERIES
576 select FB_W100
577 help 580 help
578 Say Y here if you intend to run this kernel on a Toshiba 581 Say Y here if you intend to run this kernel on a Toshiba
579 e800 family PDA. 582 e800 family PDA.
diff --git a/arch/arm/mach-pxa/imote2.c b/arch/arm/mach-pxa/imote2.c
index b2f878bd460b..5161dca8ccc0 100644
--- a/arch/arm/mach-pxa/imote2.c
+++ b/arch/arm/mach-pxa/imote2.c
@@ -559,10 +559,6 @@ static void __init imote2_init(void)
559 pxa_set_btuart_info(NULL); 559 pxa_set_btuart_info(NULL);
560 pxa_set_stuart_info(NULL); 560 pxa_set_stuart_info(NULL);
561 561
562 /* SPI chip select directions - all other directions should
563 * be handled by drivers.*/
564 gpio_direction_output(37, 0);
565
566 platform_add_devices(imote2_devices, ARRAY_SIZE(imote2_devices)); 562 platform_add_devices(imote2_devices, ARRAY_SIZE(imote2_devices));
567 563
568 pxa2xx_set_spi_info(1, &pxa_ssp_master_0_info); 564 pxa2xx_set_spi_info(1, &pxa_ssp_master_0_info);
diff --git a/arch/arm/mach-pxa/include/mach/uncompress.h b/arch/arm/mach-pxa/include/mach/uncompress.h
index 5ef91d9d17e4..759b851ec985 100644
--- a/arch/arm/mach-pxa/include/mach/uncompress.h
+++ b/arch/arm/mach-pxa/include/mach/uncompress.h
@@ -16,9 +16,9 @@
16#define BTUART_BASE (0x40200000) 16#define BTUART_BASE (0x40200000)
17#define STUART_BASE (0x40700000) 17#define STUART_BASE (0x40700000)
18 18
19static unsigned long uart_base = FFUART_BASE; 19static unsigned long uart_base;
20static unsigned int uart_shift = 2; 20static unsigned int uart_shift;
21static unsigned int uart_is_pxa = 1; 21static unsigned int uart_is_pxa;
22 22
23static inline unsigned char uart_read(int offset) 23static inline unsigned char uart_read(int offset)
24{ 24{
@@ -56,6 +56,11 @@ static inline void flush(void)
56 56
57static inline void arch_decomp_setup(void) 57static inline void arch_decomp_setup(void)
58{ 58{
59 /* initialize to default */
60 uart_base = FFUART_BASE;
61 uart_shift = 2;
62 uart_is_pxa = 1;
63
59 if (machine_is_littleton() || machine_is_intelmote2() 64 if (machine_is_littleton() || machine_is_intelmote2()
60 || machine_is_csb726() || machine_is_stargate2() 65 || machine_is_csb726() || machine_is_stargate2()
61 || machine_is_cm_x300() || machine_is_balloon3()) 66 || machine_is_cm_x300() || machine_is_balloon3())
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index 3184bdc14526..44bb675e47f1 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -37,8 +37,6 @@
37#include <linux/lis3lv02d.h> 37#include <linux/lis3lv02d.h>
38#include <linux/pda_power.h> 38#include <linux/pda_power.h>
39#include <linux/power_supply.h> 39#include <linux/power_supply.h>
40#include <linux/pda_power.h>
41#include <linux/power_supply.h>
42#include <linux/regulator/max8660.h> 40#include <linux/regulator/max8660.h>
43#include <linux/regulator/machine.h> 41#include <linux/regulator/machine.h>
44#include <linux/regulator/fixed.h> 42#include <linux/regulator/fixed.h>
@@ -444,7 +442,7 @@ static struct gpio_keys_button gpio_keys_button[] = {
444 .active_low = 0, 442 .active_low = 0,
445 .wakeup = 0, 443 .wakeup = 0,
446 .debounce_interval = 5, /* ms */ 444 .debounce_interval = 5, /* ms */
447 .desc = "on/off button", 445 .desc = "on_off button",
448 }, 446 },
449}; 447};
450 448
diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c
index a98a434f0111..2041eb1d90ba 100644
--- a/arch/arm/mach-pxa/stargate2.c
+++ b/arch/arm/mach-pxa/stargate2.c
@@ -764,11 +764,6 @@ static void __init stargate2_init(void)
764 pxa_set_btuart_info(NULL); 764 pxa_set_btuart_info(NULL);
765 pxa_set_stuart_info(NULL); 765 pxa_set_stuart_info(NULL);
766 766
767 /* spi chip selects */
768 gpio_direction_output(37, 0);
769 gpio_direction_output(24, 0);
770 gpio_direction_output(39, 0);
771
772 platform_add_devices(ARRAY_AND_SIZE(stargate2_devices)); 767 platform_add_devices(ARRAY_AND_SIZE(stargate2_devices));
773 768
774 pxa2xx_set_spi_info(1, &pxa_ssp_master_0_info); 769 pxa2xx_set_spi_info(1, &pxa_ssp_master_0_info);
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 31c2f4c30a95..1536f1784cac 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
12# 12#
13# http://www.arm.linux.org.uk/developer/machines/?action=new 13# http://www.arm.linux.org.uk/developer/machines/?action=new
14# 14#
15# Last update: Sat Feb 20 14:16:15 2010 15# Last update: Sat Mar 20 15:35:41 2010
16# 16#
17# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number 17# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
18# 18#
@@ -2663,7 +2663,7 @@ reb01 MACH_REB01 REB01 2675
2663aquila MACH_AQUILA AQUILA 2676 2663aquila MACH_AQUILA AQUILA 2676
2664spark_sls_hw2 MACH_SPARK_SLS_HW2 SPARK_SLS_HW2 2677 2664spark_sls_hw2 MACH_SPARK_SLS_HW2 SPARK_SLS_HW2 2677
2665sheeva_esata MACH_ESATA_SHEEVAPLUG ESATA_SHEEVAPLUG 2678 2665sheeva_esata MACH_ESATA_SHEEVAPLUG ESATA_SHEEVAPLUG 2678
2666surf7x30 MACH_SURF7X30 SURF7X30 2679 2666msm7x30_surf MACH_MSM7X30_SURF MSM7X30_SURF 2679
2667micro2440 MACH_MICRO2440 MICRO2440 2680 2667micro2440 MACH_MICRO2440 MICRO2440 2680
2668am2440 MACH_AM2440 AM2440 2681 2668am2440 MACH_AM2440 AM2440 2681
2669tq2440 MACH_TQ2440 TQ2440 2682 2669tq2440 MACH_TQ2440 TQ2440 2682
@@ -2678,3 +2678,74 @@ vc088x MACH_VC088X VC088X 2690
2678mioa702 MACH_MIOA702 MIOA702 2691 2678mioa702 MACH_MIOA702 MIOA702 2691
2679hpmin MACH_HPMIN HPMIN 2692 2679hpmin MACH_HPMIN HPMIN 2692
2680ak880xak MACH_AK880XAK AK880XAK 2693 2680ak880xak MACH_AK880XAK AK880XAK 2693
2681arm926tomap850 MACH_ARM926TOMAP850 ARM926TOMAP850 2694
2682lkevm MACH_LKEVM LKEVM 2695
2683mw6410 MACH_MW6410 MW6410 2696
2684terastation_wxl MACH_TERASTATION_WXL TERASTATION_WXL 2697
2685cpu8000e MACH_CPU8000E CPU8000E 2698
2686catania MACH_CATANIA CATANIA 2699
2687tokyo MACH_TOKYO TOKYO 2700
2688msm7201a_surf MACH_MSM7201A_SURF MSM7201A_SURF 2701
2689msm7201a_ffa MACH_MSM7201A_FFA MSM7201A_FFA 2702
2690msm7x25_surf MACH_MSM7X25_SURF MSM7X25_SURF 2703
2691msm7x25_ffa MACH_MSM7X25_FFA MSM7X25_FFA 2704
2692msm7x27_surf MACH_MSM7X27_SURF MSM7X27_SURF 2705
2693msm7x27_ffa MACH_MSM7X27_FFA MSM7X27_FFA 2706
2694msm7x30_ffa MACH_MSM7X30_FFA MSM7X30_FFA 2707
2695qsd8x50_surf MACH_QSD8X50_SURF QSD8X50_SURF 2708
2696qsd8x50_comet MACH_QSD8X50_COMET QSD8X50_COMET 2709
2697qsd8x50_ffa MACH_QSD8X50_FFA QSD8X50_FFA 2710
2698qsd8x50a_surf MACH_QSD8X50A_SURF QSD8X50A_SURF 2711
2699qsd8x50a_ffa MACH_QSD8X50A_FFA QSD8X50A_FFA 2712
2700adx_xgcp10 MACH_ADX_XGCP10 ADX_XGCP10 2713
2701mcgwumts2a MACH_MCGWUMTS2A MCGWUMTS2A 2714
2702mobikt MACH_MOBIKT MOBIKT 2715
2703mx53_evk MACH_MX53_EVK MX53_EVK 2716
2704igep0030 MACH_IGEP0030 IGEP0030 2717
2705axell_h40_h50_ctrl MACH_AXELL_H40_H50_CTRL AXELL_H40_H50_CTRL 2718
2706dtcommod MACH_DTCOMMOD DTCOMMOD 2719
2707gould MACH_GOULD GOULD 2720
2708siberia MACH_SIBERIA SIBERIA 2721
2709sbc3530 MACH_SBC3530 SBC3530 2722
2710qarm MACH_QARM QARM 2723
2711mips MACH_MIPS MIPS 2724
2712mx27grb MACH_MX27GRB MX27GRB 2725
2713sbc8100 MACH_SBC8100 SBC8100 2726
2714saarb MACH_SAARB SAARB 2727
2715omap3mini MACH_OMAP3MINI OMAP3MINI 2728
2716cnmbook7se MACH_CNMBOOK7SE CNMBOOK7SE 2729
2717catan MACH_CATAN CATAN 2730
2718harmony MACH_HARMONY HARMONY 2731
2719tonga MACH_TONGA TONGA 2732
2720cybook_orizon MACH_CYBOOK_ORIZON CYBOOK_ORIZON 2733
2721htcrhodiumcdma MACH_HTCRHODIUMCDMA HTCRHODIUMCDMA 2734
2722epc_g45 MACH_EPC_G45 EPC_G45 2735
2723epc_lpc3250 MACH_EPC_LPC3250 EPC_LPC3250 2736
2724mxc91341evb MACH_MXC91341EVB MXC91341EVB 2737
2725rtw1000 MACH_RTW1000 RTW1000 2738
2726bobcat MACH_BOBCAT BOBCAT 2739
2727trizeps6 MACH_TRIZEPS6 TRIZEPS6 2740
2728msm7x30_fluid MACH_MSM7X30_FLUID MSM7X30_FLUID 2741
2729nedap9263 MACH_NEDAP9263 NEDAP9263 2742
2730netgear_ms2110 MACH_NETGEAR_MS2110 NETGEAR_MS2110 2743
2731bmx MACH_BMX BMX 2744
2732netstream MACH_NETSTREAM NETSTREAM 2745
2733vpnext_rcu MACH_VPNEXT_RCU VPNEXT_RCU 2746
2734vpnext_mpu MACH_VPNEXT_MPU VPNEXT_MPU 2747
2735bcmring_tablet_v1 MACH_BCMRING_TABLET_V1 BCMRING_TABLET_V1 2748
2736sgarm10 MACH_SGARM10 SGARM10 2749
2737cm_t3517 MACH_CM_T3517 CM_T3517 2750
2738omap3_cps MACH_OMAP3_CPS OMAP3_CPS 2751
2739axar1500_receiver MACH_AXAR1500_RECEIVER AXAR1500_RECEIVER 2752
2740wbd222 MACH_WBD222 WBD222 2753
2741mt65xx MACH_MT65XX MT65XX 2754
2742msm8x60_surf MACH_MSM8X60_SURF MSM8X60_SURF 2755
2743msm8x60_sim MACH_MSM8X60_SIM MSM8X60_SIM 2756
2744vmc300 MACH_VMC300 VMC300 2757
2745tcc8000_sdk MACH_TCC8000_SDK TCC8000_SDK 2758
2746nanos MACH_NANOS NANOS 2759
2747stamp9g10 MACH_STAMP9G10 STAMP9G10 2760
2748stamp9g45 MACH_STAMP9G45 STAMP9G45 2761
2749h6053 MACH_H6053 H6053 2762
2750smint01 MACH_SMINT01 SMINT01 2763
2751prtlvt2 MACH_PRTLVT2 PRTLVT2 2764
diff --git a/arch/cris/arch-v32/drivers/pci/bios.c b/arch/cris/arch-v32/drivers/pci/bios.c
index d4b9c36ddc0f..bc0cfdad1cbc 100644
--- a/arch/cris/arch-v32/drivers/pci/bios.c
+++ b/arch/cris/arch-v32/drivers/pci/bios.c
@@ -50,7 +50,7 @@ pcibios_align_resource(void *data, const struct resource *res,
50 if ((res->flags & IORESOURCE_IO) && (start & 0x300)) 50 if ((res->flags & IORESOURCE_IO) && (start & 0x300))
51 start = (start + 0x3ff) & ~0x3ff; 51 start = (start + 0x3ff) & ~0x3ff;
52 52
53 return start 53 return start;
54} 54}
55 55
56int pcibios_enable_resources(struct pci_dev *dev, int mask) 56int pcibios_enable_resources(struct pci_dev *dev, int mask)
diff --git a/arch/frv/mb93090-mb00/pci-frv.c b/arch/frv/mb93090-mb00/pci-frv.c
index 1ed15d7fea20..6b4fb28e9f99 100644
--- a/arch/frv/mb93090-mb00/pci-frv.c
+++ b/arch/frv/mb93090-mb00/pci-frv.c
@@ -41,7 +41,7 @@ pcibios_align_resource(void *data, const struct resource *res,
41 if ((res->flags & IORESOURCE_IO) && (start & 0x300)) 41 if ((res->flags & IORESOURCE_IO) && (start & 0x300))
42 start = (start + 0x3ff) & ~0x3ff; 42 start = (start + 0x3ff) & ~0x3ff;
43 43
44 return start 44 return start;
45} 45}
46 46
47 47
@@ -94,8 +94,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
94 r = &dev->resource[idx]; 94 r = &dev->resource[idx];
95 if (!r->start) 95 if (!r->start)
96 continue; 96 continue;
97 if (pci_claim_resource(dev, idx) < 0) 97 pci_claim_resource(dev, idx);
98 printk(KERN_ERR "PCI: Cannot allocate resource region %d of bridge %s\n", idx, pci_name(dev));
99 } 98 }
100 } 99 }
101 pcibios_allocate_bus_resources(&bus->children); 100 pcibios_allocate_bus_resources(&bus->children);
@@ -125,7 +124,6 @@ static void __init pcibios_allocate_resources(int pass)
125 DBG("PCI: Resource %08lx-%08lx (f=%lx, d=%d, p=%d)\n", 124 DBG("PCI: Resource %08lx-%08lx (f=%lx, d=%d, p=%d)\n",
126 r->start, r->end, r->flags, disabled, pass); 125 r->start, r->end, r->flags, disabled, pass);
127 if (pci_claim_resource(dev, idx) < 0) { 126 if (pci_claim_resource(dev, idx) < 0) {
128 printk(KERN_ERR "PCI: Cannot allocate resource region %d of device %s\n", idx, pci_name(dev));
129 /* We'll assign a new address later */ 127 /* We'll assign a new address later */
130 r->end -= r->start; 128 r->end -= r->start;
131 r->start = 0; 129 r->start = 0;
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 203ec61c6d4c..76818f926539 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -75,9 +75,6 @@ config LOCKDEP_SUPPORT
75config HAVE_LATENCYTOP_SUPPORT 75config HAVE_LATENCYTOP_SUPPORT
76 def_bool y 76 def_bool y
77 77
78config PCI
79 def_bool n
80
81config DTC 78config DTC
82 def_bool y 79 def_bool y
83 80
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index 836832dd9b26..72f6e8583746 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -84,7 +84,7 @@ define archhelp
84 echo '* linux.bin - Create raw binary' 84 echo '* linux.bin - Create raw binary'
85 echo ' linux.bin.gz - Create compressed raw binary' 85 echo ' linux.bin.gz - Create compressed raw binary'
86 echo ' simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in' 86 echo ' simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in'
87 echo ' - stripped elf with fdt blob 87 echo ' - stripped elf with fdt blob'
88 echo ' simpleImage.<dt>.unstrip - full ELF image with fdt blob' 88 echo ' simpleImage.<dt>.unstrip - full ELF image with fdt blob'
89 echo ' *_defconfig - Select default config from arch/microblaze/configs' 89 echo ' *_defconfig - Select default config from arch/microblaze/configs'
90 echo '' 90 echo ''
@@ -94,3 +94,5 @@ define archhelp
94 echo ' name of a dts file from the arch/microblaze/boot/dts/ directory' 94 echo ' name of a dts file from the arch/microblaze/boot/dts/ directory'
95 echo ' (minus the .dts extension).' 95 echo ' (minus the .dts extension).'
96endef 96endef
97
98MRPROPER_FILES += $(boot)/simpleImage.*
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
index 902cf9846c3c..57f50c2371c6 100644
--- a/arch/microblaze/boot/Makefile
+++ b/arch/microblaze/boot/Makefile
@@ -23,8 +23,6 @@ $(obj)/system.dtb: $(obj)/$(DTB).dtb
23endif 23endif
24 24
25$(obj)/linux.bin: vmlinux FORCE 25$(obj)/linux.bin: vmlinux FORCE
26 [ -n $(CONFIG_INITRAMFS_SOURCE) ] && [ ! -e $(CONFIG_INITRAMFS_SOURCE) ] && \
27 touch $(CONFIG_INITRAMFS_SOURCE) || echo "No CPIO image"
28 $(call if_changed,objcopy) 26 $(call if_changed,objcopy)
29 $(call if_changed,uimage) 27 $(call if_changed,uimage)
30 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' 28 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
@@ -62,6 +60,4 @@ quiet_cmd_dtc = DTC $@
62$(obj)/%.dtb: $(dtstree)/%.dts FORCE 60$(obj)/%.dtb: $(dtstree)/%.dts FORCE
63 $(call if_changed,dtc) 61 $(call if_changed,dtc)
64 62
65clean-kernel += linux.bin linux.bin.gz simpleImage.* 63clean-files += *.dtb simpleImage.*.unstrip linux.bin.ub
66
67clean-files += *.dtb simpleImage.*.unstrip
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index 563c6b9453f0..8eeb09211ece 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -14,7 +14,6 @@
14#include <asm/ptrace.h> 14#include <asm/ptrace.h>
15#include <asm/setup.h> 15#include <asm/setup.h>
16#include <asm/registers.h> 16#include <asm/registers.h>
17#include <asm/segment.h>
18#include <asm/entry.h> 17#include <asm/entry.h>
19#include <asm/current.h> 18#include <asm/current.h>
20 19
diff --git a/arch/microblaze/include/asm/segment.h b/arch/microblaze/include/asm/segment.h
deleted file mode 100644
index 0e7102c3fb11..000000000000
--- a/arch/microblaze/include/asm/segment.h
+++ /dev/null
@@ -1,49 +0,0 @@
1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef _ASM_MICROBLAZE_SEGMENT_H
12#define _ASM_MICROBLAZE_SEGMENT_H
13
14# ifndef __ASSEMBLY__
15
16typedef struct {
17 unsigned long seg;
18} mm_segment_t;
19
20/*
21 * On Microblaze the fs value is actually the top of the corresponding
22 * address space.
23 *
24 * The fs value determines whether argument validity checking should be
25 * performed or not. If get_fs() == USER_DS, checking is performed, with
26 * get_fs() == KERNEL_DS, checking is bypassed.
27 *
28 * For historical reasons, these macros are grossly misnamed.
29 *
30 * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
31 */
32# define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
33
34# ifndef CONFIG_MMU
35# define KERNEL_DS MAKE_MM_SEG(0)
36# define USER_DS KERNEL_DS
37# else
38# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
39# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
40# endif
41
42# define get_ds() (KERNEL_DS)
43# define get_fs() (current_thread_info()->addr_limit)
44# define set_fs(val) (current_thread_info()->addr_limit = (val))
45
46# define segment_eq(a, b) ((a).seg == (b).seg)
47
48# endif /* __ASSEMBLY__ */
49#endif /* _ASM_MICROBLAZE_SEGMENT_H */
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index 6e92885d381a..b2ca80f64640 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -19,7 +19,6 @@
19#ifndef __ASSEMBLY__ 19#ifndef __ASSEMBLY__
20# include <linux/types.h> 20# include <linux/types.h>
21# include <asm/processor.h> 21# include <asm/processor.h>
22# include <asm/segment.h>
23 22
24/* 23/*
25 * low level task data that entry.S needs immediate access to 24 * low level task data that entry.S needs immediate access to
@@ -60,6 +59,10 @@ struct cpu_context {
60 __u32 fsr; 59 __u32 fsr;
61}; 60};
62 61
62typedef struct {
63 unsigned long seg;
64} mm_segment_t;
65
63struct thread_info { 66struct thread_info {
64 struct task_struct *task; /* main task structure */ 67 struct task_struct *task; /* main task structure */
65 struct exec_domain *exec_domain; /* execution domain */ 68 struct exec_domain *exec_domain; /* execution domain */
diff --git a/arch/microblaze/include/asm/tlbflush.h b/arch/microblaze/include/asm/tlbflush.h
index bcb8b41d55af..2e1353c2d18d 100644
--- a/arch/microblaze/include/asm/tlbflush.h
+++ b/arch/microblaze/include/asm/tlbflush.h
@@ -24,6 +24,7 @@ extern void _tlbie(unsigned long address);
24extern void _tlbia(void); 24extern void _tlbia(void);
25 25
26#define __tlbia() { preempt_disable(); _tlbia(); preempt_enable(); } 26#define __tlbia() { preempt_disable(); _tlbia(); preempt_enable(); }
27#define __tlbie(x) { _tlbie(x); }
27 28
28static inline void local_flush_tlb_all(void) 29static inline void local_flush_tlb_all(void)
29 { __tlbia(); } 30 { __tlbia(); }
@@ -31,7 +32,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
31 { __tlbia(); } 32 { __tlbia(); }
32static inline void local_flush_tlb_page(struct vm_area_struct *vma, 33static inline void local_flush_tlb_page(struct vm_area_struct *vma,
33 unsigned long vmaddr) 34 unsigned long vmaddr)
34 { _tlbie(vmaddr); } 35 { __tlbie(vmaddr); }
35static inline void local_flush_tlb_range(struct vm_area_struct *vma, 36static inline void local_flush_tlb_range(struct vm_area_struct *vma,
36 unsigned long start, unsigned long end) 37 unsigned long start, unsigned long end)
37 { __tlbia(); } 38 { __tlbia(); }
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 371bd6e56d9a..446bec29b142 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -22,101 +22,73 @@
22#include <asm/mmu.h> 22#include <asm/mmu.h>
23#include <asm/page.h> 23#include <asm/page.h>
24#include <asm/pgtable.h> 24#include <asm/pgtable.h>
25#include <asm/segment.h>
26#include <linux/string.h> 25#include <linux/string.h>
27 26
28#define VERIFY_READ 0 27#define VERIFY_READ 0
29#define VERIFY_WRITE 1 28#define VERIFY_WRITE 1
30 29
31#define __clear_user(addr, n) (memset((void *)(addr), 0, (n)), 0) 30/*
32 31 * On Microblaze the fs value is actually the top of the corresponding
33#ifndef CONFIG_MMU 32 * address space.
34 33 *
35extern int ___range_ok(unsigned long addr, unsigned long size); 34 * The fs value determines whether argument validity checking should be
36 35 * performed or not. If get_fs() == USER_DS, checking is performed, with
37#define __range_ok(addr, size) \ 36 * get_fs() == KERNEL_DS, checking is bypassed.
38 ___range_ok((unsigned long)(addr), (unsigned long)(size)) 37 *
39 38 * For historical reasons, these macros are grossly misnamed.
40#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0) 39 *
41#define __access_ok(add, size) (__range_ok((addr), (size)) == 0) 40 * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
42 41 */
43/* Undefined function to trigger linker error */ 42# define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
44extern int bad_user_access_length(void);
45
46/* FIXME this is function for optimalization -> memcpy */
47#define __get_user(var, ptr) \
48({ \
49 int __gu_err = 0; \
50 switch (sizeof(*(ptr))) { \
51 case 1: \
52 case 2: \
53 case 4: \
54 (var) = *(ptr); \
55 break; \
56 case 8: \
57 memcpy((void *) &(var), (ptr), 8); \
58 break; \
59 default: \
60 (var) = 0; \
61 __gu_err = __get_user_bad(); \
62 break; \
63 } \
64 __gu_err; \
65})
66 43
67#define __get_user_bad() (bad_user_access_length(), (-EFAULT)) 44# ifndef CONFIG_MMU
45# define KERNEL_DS MAKE_MM_SEG(0)
46# define USER_DS KERNEL_DS
47# else
48# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
49# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
50# endif
68 51
69/* FIXME is not there defined __pu_val */ 52# define get_ds() (KERNEL_DS)
70#define __put_user(var, ptr) \ 53# define get_fs() (current_thread_info()->addr_limit)
71({ \ 54# define set_fs(val) (current_thread_info()->addr_limit = (val))
72 int __pu_err = 0; \
73 switch (sizeof(*(ptr))) { \
74 case 1: \
75 case 2: \
76 case 4: \
77 *(ptr) = (var); \
78 break; \
79 case 8: { \
80 typeof(*(ptr)) __pu_val = (var); \
81 memcpy(ptr, &__pu_val, sizeof(__pu_val)); \
82 } \
83 break; \
84 default: \
85 __pu_err = __put_user_bad(); \
86 break; \
87 } \
88 __pu_err; \
89})
90 55
91#define __put_user_bad() (bad_user_access_length(), (-EFAULT)) 56# define segment_eq(a, b) ((a).seg == (b).seg)
92 57
93#define put_user(x, ptr) __put_user((x), (ptr)) 58/*
94#define get_user(x, ptr) __get_user((x), (ptr)) 59 * The exception table consists of pairs of addresses: the first is the
60 * address of an instruction that is allowed to fault, and the second is
61 * the address at which the program should continue. No registers are
62 * modified, so it is entirely up to the continuation code to figure out
63 * what to do.
64 *
65 * All the routines below use bits of fixup code that are out of line
66 * with the main instruction path. This means when everything is well,
67 * we don't even have to jump over them. Further, they do not intrude
68 * on our cache or tlb entries.
69 */
70struct exception_table_entry {
71 unsigned long insn, fixup;
72};
95 73
96#define copy_to_user(to, from, n) (memcpy((to), (from), (n)), 0) 74/* Returns 0 if exception not found and fixup otherwise. */
97#define copy_from_user(to, from, n) (memcpy((to), (from), (n)), 0) 75extern unsigned long search_exception_table(unsigned long);
98 76
99#define __copy_to_user(to, from, n) (copy_to_user((to), (from), (n))) 77#ifndef CONFIG_MMU
100#define __copy_from_user(to, from, n) (copy_from_user((to), (from), (n)))
101#define __copy_to_user_inatomic(to, from, n) \
102 (__copy_to_user((to), (from), (n)))
103#define __copy_from_user_inatomic(to, from, n) \
104 (__copy_from_user((to), (from), (n)))
105 78
106static inline unsigned long clear_user(void *addr, unsigned long size) 79/* Check against bounds of physical memory */
80static inline int ___range_ok(unsigned long addr, unsigned long size)
107{ 81{
108 if (access_ok(VERIFY_WRITE, addr, size)) 82 return ((addr < memory_start) ||
109 size = __clear_user(addr, size); 83 ((addr + size) > memory_end));
110 return size;
111} 84}
112 85
113/* Returns 0 if exception not found and fixup otherwise. */ 86#define __range_ok(addr, size) \
114extern unsigned long search_exception_table(unsigned long); 87 ___range_ok((unsigned long)(addr), (unsigned long)(size))
115 88
116extern long strncpy_from_user(char *dst, const char *src, long count); 89#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
117extern long strnlen_user(const char *src, long count);
118 90
119#else /* CONFIG_MMU */ 91#else
120 92
121/* 93/*
122 * Address is valid if: 94 * Address is valid if:
@@ -129,24 +101,88 @@ extern long strnlen_user(const char *src, long count);
129/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n", 101/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
130 type?"WRITE":"READ",addr,size,get_fs().seg)) */ 102 type?"WRITE":"READ",addr,size,get_fs().seg)) */
131 103
132/* 104#endif
133 * All the __XXX versions macros/functions below do not perform
134 * access checking. It is assumed that the necessary checks have been
135 * already performed before the finction (macro) is called.
136 */
137 105
138#define get_user(x, ptr) \ 106#ifdef CONFIG_MMU
139({ \ 107# define __FIXUP_SECTION ".section .fixup,\"ax\"\n"
140 access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \ 108# define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n"
141 ? __get_user((x), (ptr)) : -EFAULT; \ 109#else
142}) 110# define __FIXUP_SECTION ".section .discard,\"ax\"\n"
111# define __EX_TABLE_SECTION ".section .discard,\"a\"\n"
112#endif
143 113
144#define put_user(x, ptr) \ 114extern unsigned long __copy_tofrom_user(void __user *to,
145({ \ 115 const void __user *from, unsigned long size);
146 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \ 116
147 ? __put_user((x), (ptr)) : -EFAULT; \ 117/* Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail. */
118static inline unsigned long __must_check __clear_user(void __user *to,
119 unsigned long n)
120{
121 /* normal memset with two words to __ex_table */
122 __asm__ __volatile__ ( \
123 "1: sb r0, %2, r0;" \
124 " addik %0, %0, -1;" \
125 " bneid %0, 1b;" \
126 " addik %2, %2, 1;" \
127 "2: " \
128 __EX_TABLE_SECTION \
129 ".word 1b,2b;" \
130 ".previous;" \
131 : "=r"(n) \
132 : "0"(n), "r"(to)
133 );
134 return n;
135}
136
137static inline unsigned long __must_check clear_user(void __user *to,
138 unsigned long n)
139{
140 might_sleep();
141 if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
142 return n;
143
144 return __clear_user(to, n);
145}
146
147/* put_user and get_user macros */
148extern long __user_bad(void);
149
150#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
151({ \
152 __asm__ __volatile__ ( \
153 "1:" insn " %1, %2, r0;" \
154 " addk %0, r0, r0;" \
155 "2: " \
156 __FIXUP_SECTION \
157 "3: brid 2b;" \
158 " addik %0, r0, %3;" \
159 ".previous;" \
160 __EX_TABLE_SECTION \
161 ".word 1b,3b;" \
162 ".previous;" \
163 : "=&r"(__gu_err), "=r"(__gu_val) \
164 : "r"(__gu_ptr), "i"(-EFAULT) \
165 ); \
148}) 166})
149 167
168/**
169 * get_user: - Get a simple variable from user space.
170 * @x: Variable to store result.
171 * @ptr: Source address, in user space.
172 *
173 * Context: User context only. This function may sleep.
174 *
175 * This macro copies a single simple variable from user space to kernel
176 * space. It supports simple types like char and int, but not larger
177 * data types like structures or arrays.
178 *
179 * @ptr must have pointer-to-simple-variable type, and the result of
180 * dereferencing @ptr must be assignable to @x without a cast.
181 *
182 * Returns zero on success, or -EFAULT on error.
183 * On error, the variable @x is set to zero.
184 */
185
150#define __get_user(x, ptr) \ 186#define __get_user(x, ptr) \
151({ \ 187({ \
152 unsigned long __gu_val; \ 188 unsigned long __gu_val; \
@@ -163,30 +199,74 @@ extern long strnlen_user(const char *src, long count);
163 __get_user_asm("lw", (ptr), __gu_val, __gu_err); \ 199 __get_user_asm("lw", (ptr), __gu_val, __gu_err); \
164 break; \ 200 break; \
165 default: \ 201 default: \
166 __gu_val = 0; __gu_err = -EINVAL; \ 202 /* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\
167 } \ 203 } \
168 x = (__typeof__(*(ptr))) __gu_val; \ 204 x = (__typeof__(*(ptr))) __gu_val; \
169 __gu_err; \ 205 __gu_err; \
170}) 206})
171 207
172#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \ 208
209#define get_user(x, ptr) \
173({ \ 210({ \
174 __asm__ __volatile__ ( \ 211 access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \
175 "1:" insn " %1, %2, r0; \ 212 ? __get_user((x), (ptr)) : -EFAULT; \
176 addk %0, r0, r0; \ 213})
177 2: \ 214
178 .section .fixup,\"ax\"; \ 215#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
179 3: brid 2b; \ 216({ \
180 addik %0, r0, %3; \ 217 __asm__ __volatile__ ( \
181 .previous; \ 218 "1:" insn " %1, %2, r0;" \
182 .section __ex_table,\"a\"; \ 219 " addk %0, r0, r0;" \
183 .word 1b,3b; \ 220 "2: " \
184 .previous;" \ 221 __FIXUP_SECTION \
185 : "=r"(__gu_err), "=r"(__gu_val) \ 222 "3: brid 2b;" \
186 : "r"(__gu_ptr), "i"(-EFAULT) \ 223 " addik %0, r0, %3;" \
187 ); \ 224 ".previous;" \
225 __EX_TABLE_SECTION \
226 ".word 1b,3b;" \
227 ".previous;" \
228 : "=&r"(__gu_err) \
229 : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
230 ); \
188}) 231})
189 232
233#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \
234({ \
235 __asm__ __volatile__ (" lwi %0, %1, 0;" \
236 "1: swi %0, %2, 0;" \
237 " lwi %0, %1, 4;" \
238 "2: swi %0, %2, 4;" \
239 " addk %0, r0, r0;" \
240 "3: " \
241 __FIXUP_SECTION \
242 "4: brid 3b;" \
243 " addik %0, r0, %3;" \
244 ".previous;" \
245 __EX_TABLE_SECTION \
246 ".word 1b,4b,2b,4b;" \
247 ".previous;" \
248 : "=&r"(__gu_err) \
249 : "r"(&__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
250 ); \
251})
252
253/**
254 * put_user: - Write a simple value into user space.
255 * @x: Value to copy to user space.
256 * @ptr: Destination address, in user space.
257 *
258 * Context: User context only. This function may sleep.
259 *
260 * This macro copies a single simple value from kernel space to user
261 * space. It supports simple types like char and int, but not larger
262 * data types like structures or arrays.
263 *
264 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
265 * to the result of dereferencing @ptr.
266 *
267 * Returns zero on success, or -EFAULT on error.
268 */
269
190#define __put_user(x, ptr) \ 270#define __put_user(x, ptr) \
191({ \ 271({ \
192 __typeof__(*(ptr)) volatile __gu_val = (x); \ 272 __typeof__(*(ptr)) volatile __gu_val = (x); \
@@ -195,7 +275,7 @@ extern long strnlen_user(const char *src, long count);
195 case 1: \ 275 case 1: \
196 __put_user_asm("sb", (ptr), __gu_val, __gu_err); \ 276 __put_user_asm("sb", (ptr), __gu_val, __gu_err); \
197 break; \ 277 break; \
198 case 2: \ 278 case 2: \
199 __put_user_asm("sh", (ptr), __gu_val, __gu_err); \ 279 __put_user_asm("sh", (ptr), __gu_val, __gu_err); \
200 break; \ 280 break; \
201 case 4: \ 281 case 4: \
@@ -205,121 +285,82 @@ extern long strnlen_user(const char *src, long count);
205 __put_user_asm_8((ptr), __gu_val, __gu_err); \ 285 __put_user_asm_8((ptr), __gu_val, __gu_err); \
206 break; \ 286 break; \
207 default: \ 287 default: \
208 __gu_err = -EINVAL; \ 288 /*__gu_err = -EINVAL;*/ __gu_err = __user_bad(); \
209 } \ 289 } \
210 __gu_err; \ 290 __gu_err; \
211}) 291})
212 292
213#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \ 293#ifndef CONFIG_MMU
214({ \
215__asm__ __volatile__ (" lwi %0, %1, 0; \
216 1: swi %0, %2, 0; \
217 lwi %0, %1, 4; \
218 2: swi %0, %2, 4; \
219 addk %0,r0,r0; \
220 3: \
221 .section .fixup,\"ax\"; \
222 4: brid 3b; \
223 addik %0, r0, %3; \
224 .previous; \
225 .section __ex_table,\"a\"; \
226 .word 1b,4b,2b,4b; \
227 .previous;" \
228 : "=&r"(__gu_err) \
229 : "r"(&__gu_val), \
230 "r"(__gu_ptr), "i"(-EFAULT) \
231 ); \
232})
233 294
234#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \ 295#define put_user(x, ptr) __put_user((x), (ptr))
235({ \
236 __asm__ __volatile__ ( \
237 "1:" insn " %1, %2, r0; \
238 addk %0, r0, r0; \
239 2: \
240 .section .fixup,\"ax\"; \
241 3: brid 2b; \
242 addik %0, r0, %3; \
243 .previous; \
244 .section __ex_table,\"a\"; \
245 .word 1b,3b; \
246 .previous;" \
247 : "=r"(__gu_err) \
248 : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
249 ); \
250})
251 296
252/* 297#else /* CONFIG_MMU */
253 * Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail.
254 */
255static inline int clear_user(char *to, int size)
256{
257 if (size && access_ok(VERIFY_WRITE, to, size)) {
258 __asm__ __volatile__ (" \
259 1: \
260 sb r0, %2, r0; \
261 addik %0, %0, -1; \
262 bneid %0, 1b; \
263 addik %2, %2, 1; \
264 2: \
265 .section __ex_table,\"a\"; \
266 .word 1b,2b; \
267 .section .text;" \
268 : "=r"(size) \
269 : "0"(size), "r"(to)
270 );
271 }
272 return size;
273}
274 298
275#define __copy_from_user(to, from, n) copy_from_user((to), (from), (n)) 299#define put_user(x, ptr) \
300({ \
301 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \
302 ? __put_user((x), (ptr)) : -EFAULT; \
303})
304#endif /* CONFIG_MMU */
305
306/* copy_to_from_user */
307#define __copy_from_user(to, from, n) \
308 __copy_tofrom_user((__force void __user *)(to), \
309 (void __user *)(from), (n))
276#define __copy_from_user_inatomic(to, from, n) \ 310#define __copy_from_user_inatomic(to, from, n) \
277 copy_from_user((to), (from), (n)) 311 copy_from_user((to), (from), (n))
278 312
279#define copy_to_user(to, from, n) \ 313static inline long copy_from_user(void *to,
280 (access_ok(VERIFY_WRITE, (to), (n)) ? \ 314 const void __user *from, unsigned long n)
281 __copy_tofrom_user((void __user *)(to), \ 315{
282 (__force const void __user *)(from), (n)) \ 316 might_sleep();
283 : -EFAULT) 317 if (access_ok(VERIFY_READ, from, n))
318 return __copy_from_user(to, from, n);
319 return n;
320}
284 321
285#define __copy_to_user(to, from, n) copy_to_user((to), (from), (n)) 322#define __copy_to_user(to, from, n) \
323 __copy_tofrom_user((void __user *)(to), \
324 (__force const void __user *)(from), (n))
286#define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n)) 325#define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n))
287 326
288#define copy_from_user(to, from, n) \ 327static inline long copy_to_user(void __user *to,
289 (access_ok(VERIFY_READ, (from), (n)) ? \ 328 const void *from, unsigned long n)
290 __copy_tofrom_user((__force void __user *)(to), \ 329{
291 (void __user *)(from), (n)) \ 330 might_sleep();
292 : -EFAULT) 331 if (access_ok(VERIFY_WRITE, to, n))
332 return __copy_to_user(to, from, n);
333 return n;
334}
293 335
336/*
337 * Copy a null terminated string from userspace.
338 */
294extern int __strncpy_user(char *to, const char __user *from, int len); 339extern int __strncpy_user(char *to, const char __user *from, int len);
295extern int __strnlen_user(const char __user *sstr, int len);
296 340
297#define strncpy_from_user(to, from, len) \ 341#define __strncpy_from_user __strncpy_user
298 (access_ok(VERIFY_READ, from, 1) ? \
299 __strncpy_user(to, from, len) : -EFAULT)
300#define strnlen_user(str, len) \
301 (access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0)
302 342
303#endif /* CONFIG_MMU */ 343static inline long
304 344strncpy_from_user(char *dst, const char __user *src, long count)
305extern unsigned long __copy_tofrom_user(void __user *to, 345{
306 const void __user *from, unsigned long size); 346 if (!access_ok(VERIFY_READ, src, 1))
347 return -EFAULT;
348 return __strncpy_from_user(dst, src, count);
349}
307 350
308/* 351/*
309 * The exception table consists of pairs of addresses: the first is the 352 * Return the size of a string (including the ending 0)
310 * address of an instruction that is allowed to fault, and the second is
311 * the address at which the program should continue. No registers are
312 * modified, so it is entirely up to the continuation code to figure out
313 * what to do.
314 * 353 *
315 * All the routines below use bits of fixup code that are out of line 354 * Return 0 on exception, a value greater than N if too long
316 * with the main instruction path. This means when everything is well,
317 * we don't even have to jump over them. Further, they do not intrude
318 * on our cache or tlb entries.
319 */ 355 */
320struct exception_table_entry { 356extern int __strnlen_user(const char __user *sstr, int len);
321 unsigned long insn, fixup; 357
322}; 358static inline long strnlen_user(const char __user *src, long n)
359{
360 if (!access_ok(VERIFY_READ, src, 1))
361 return 0;
362 return __strnlen_user(src, n);
363}
323 364
324#endif /* __ASSEMBLY__ */ 365#endif /* __ASSEMBLY__ */
325#endif /* __KERNEL__ */ 366#endif /* __KERNEL__ */
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index b1084974fccd..4d5b0311601b 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -37,7 +37,7 @@ static inline void __dma_sync_page(unsigned long paddr, unsigned long offset,
37 37
38static unsigned long get_dma_direct_offset(struct device *dev) 38static unsigned long get_dma_direct_offset(struct device *dev)
39{ 39{
40 if (dev) 40 if (likely(dev))
41 return (unsigned long)dev->archdata.dma_data; 41 return (unsigned long)dev->archdata.dma_data;
42 42
43 return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */ 43 return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index cb7815cfe5ab..da6a5f5dc766 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -51,6 +51,12 @@ swapper_pg_dir:
51 51
52 .text 52 .text
53ENTRY(_start) 53ENTRY(_start)
54#if CONFIG_KERNEL_BASE_ADDR == 0
55 brai TOPHYS(real_start)
56 .org 0x100
57real_start:
58#endif
59
54 mfs r1, rmsr 60 mfs r1, rmsr
55 andi r1, r1, ~2 61 andi r1, r1, ~2
56 mts rmsr, r1 62 mts rmsr, r1
@@ -99,8 +105,8 @@ no_fdt_arg:
99 tophys(r4,r4) /* convert to phys address */ 105 tophys(r4,r4) /* convert to phys address */
100 ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */ 106 ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
101_copy_command_line: 107_copy_command_line:
102 lbu r2, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */ 108 lbu r2, r5, r6 /* r2=r5+r6 - r5 contain pointer to command line */
103 sb r2, r4, r6 /* addr[r4+r6]= r7*/ 109 sb r2, r4, r6 /* addr[r4+r6]= r2*/
104 addik r6, r6, 1 /* increment counting */ 110 addik r6, r6, 1 /* increment counting */
105 bgtid r3, _copy_command_line /* loop for all entries */ 111 bgtid r3, _copy_command_line /* loop for all entries */
106 addik r3, r3, -1 /* descrement loop */ 112 addik r3, r3, -1 /* descrement loop */
@@ -128,7 +134,7 @@ _copy_bram:
128 * virtual to physical. 134 * virtual to physical.
129 */ 135 */
130 nop 136 nop
131 addik r3, r0, 63 /* Invalidate all TLB entries */ 137 addik r3, r0, MICROBLAZE_TLB_SIZE -1 /* Invalidate all TLB entries */
132_invalidate: 138_invalidate:
133 mts rtlbx, r3 139 mts rtlbx, r3
134 mts rtlbhi, r0 /* flush: ensure V is clear */ 140 mts rtlbhi, r0 /* flush: ensure V is clear */
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 2b86c03aa841..995a2123635b 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -313,13 +313,13 @@ _hw_exception_handler:
313 mfs r5, rmsr; 313 mfs r5, rmsr;
314 nop 314 nop
315 swi r5, r1, 0; 315 swi r5, r1, 0;
316 mfs r3, resr 316 mfs r4, resr
317 nop 317 nop
318 mfs r4, rear; 318 mfs r3, rear;
319 nop 319 nop
320 320
321#ifndef CONFIG_MMU 321#ifndef CONFIG_MMU
322 andi r5, r3, 0x1000; /* Check ESR[DS] */ 322 andi r5, r4, 0x1000; /* Check ESR[DS] */
323 beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */ 323 beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */
324 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ 324 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
325 nop 325 nop
@@ -327,13 +327,14 @@ not_in_delay_slot:
327 swi r17, r1, PT_R17 327 swi r17, r1, PT_R17
328#endif 328#endif
329 329
330 andi r5, r3, 0x1F; /* Extract ESR[EXC] */ 330 andi r5, r4, 0x1F; /* Extract ESR[EXC] */
331 331
332#ifdef CONFIG_MMU 332#ifdef CONFIG_MMU
333 /* Calculate exception vector offset = r5 << 2 */ 333 /* Calculate exception vector offset = r5 << 2 */
334 addk r6, r5, r5; /* << 1 */ 334 addk r6, r5, r5; /* << 1 */
335 addk r6, r6, r6; /* << 2 */ 335 addk r6, r6, r6; /* << 2 */
336 336
337#ifdef DEBUG
337/* counting which exception happen */ 338/* counting which exception happen */
338 lwi r5, r0, 0x200 + TOPHYS(r0_ram) 339 lwi r5, r0, 0x200 + TOPHYS(r0_ram)
339 addi r5, r5, 1 340 addi r5, r5, 1
@@ -341,6 +342,7 @@ not_in_delay_slot:
341 lwi r5, r6, 0x200 + TOPHYS(r0_ram) 342 lwi r5, r6, 0x200 + TOPHYS(r0_ram)
342 addi r5, r5, 1 343 addi r5, r5, 1
343 swi r5, r6, 0x200 + TOPHYS(r0_ram) 344 swi r5, r6, 0x200 + TOPHYS(r0_ram)
345#endif
344/* end */ 346/* end */
345 /* Load the HW Exception vector */ 347 /* Load the HW Exception vector */
346 lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable) 348 lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable)
@@ -376,7 +378,7 @@ handle_other_ex: /* Handle Other exceptions here */
376 swi r18, r1, PT_R18 378 swi r18, r1, PT_R18
377 379
378 or r5, r1, r0 380 or r5, r1, r0
379 andi r6, r3, 0x1F; /* Load ESR[EC] */ 381 andi r6, r4, 0x1F; /* Load ESR[EC] */
380 lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */ 382 lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */
381 swi r7, r1, PT_MODE 383 swi r7, r1, PT_MODE
382 mfs r7, rfsr 384 mfs r7, rfsr
@@ -426,11 +428,11 @@ handle_other_ex: /* Handle Other exceptions here */
426 */ 428 */
427handle_unaligned_ex: 429handle_unaligned_ex:
428 /* Working registers already saved: R3, R4, R5, R6 430 /* Working registers already saved: R3, R4, R5, R6
429 * R3 = ESR 431 * R4 = ESR
430 * R4 = EAR 432 * R3 = EAR
431 */ 433 */
432#ifdef CONFIG_MMU 434#ifdef CONFIG_MMU
433 andi r6, r3, 0x1000 /* Check ESR[DS] */ 435 andi r6, r4, 0x1000 /* Check ESR[DS] */
434 beqi r6, _no_delayslot /* Branch if ESR[DS] not set */ 436 beqi r6, _no_delayslot /* Branch if ESR[DS] not set */
435 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ 437 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
436 nop 438 nop
@@ -439,7 +441,7 @@ _no_delayslot:
439 RESTORE_STATE; 441 RESTORE_STATE;
440 bri unaligned_data_trap 442 bri unaligned_data_trap
441#endif 443#endif
442 andi r6, r3, 0x3E0; /* Mask and extract the register operand */ 444 andi r6, r4, 0x3E0; /* Mask and extract the register operand */
443 srl r6, r6; /* r6 >> 5 */ 445 srl r6, r6; /* r6 >> 5 */
444 srl r6, r6; 446 srl r6, r6;
445 srl r6, r6; 447 srl r6, r6;
@@ -448,33 +450,33 @@ _no_delayslot:
448 /* Store the register operand in a temporary location */ 450 /* Store the register operand in a temporary location */
449 sbi r6, r0, TOPHYS(ex_reg_op); 451 sbi r6, r0, TOPHYS(ex_reg_op);
450 452
451 andi r6, r3, 0x400; /* Extract ESR[S] */ 453 andi r6, r4, 0x400; /* Extract ESR[S] */
452 bnei r6, ex_sw; 454 bnei r6, ex_sw;
453ex_lw: 455ex_lw:
454 andi r6, r3, 0x800; /* Extract ESR[W] */ 456 andi r6, r4, 0x800; /* Extract ESR[W] */
455 beqi r6, ex_lhw; 457 beqi r6, ex_lhw;
456 lbui r5, r4, 0; /* Exception address in r4 */ 458 lbui r5, r3, 0; /* Exception address in r3 */
457 /* Load a word, byte-by-byte from destination address 459 /* Load a word, byte-by-byte from destination address
458 and save it in tmp space */ 460 and save it in tmp space */
459 sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); 461 sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);
460 lbui r5, r4, 1; 462 lbui r5, r3, 1;
461 sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); 463 sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);
462 lbui r5, r4, 2; 464 lbui r5, r3, 2;
463 sbi r5, r0, TOPHYS(ex_tmp_data_loc_2); 465 sbi r5, r0, TOPHYS(ex_tmp_data_loc_2);
464 lbui r5, r4, 3; 466 lbui r5, r3, 3;
465 sbi r5, r0, TOPHYS(ex_tmp_data_loc_3); 467 sbi r5, r0, TOPHYS(ex_tmp_data_loc_3);
466 /* Get the destination register value into r3 */ 468 /* Get the destination register value into r4 */
467 lwi r3, r0, TOPHYS(ex_tmp_data_loc_0); 469 lwi r4, r0, TOPHYS(ex_tmp_data_loc_0);
468 bri ex_lw_tail; 470 bri ex_lw_tail;
469ex_lhw: 471ex_lhw:
470 lbui r5, r4, 0; /* Exception address in r4 */ 472 lbui r5, r3, 0; /* Exception address in r3 */
471 /* Load a half-word, byte-by-byte from destination 473 /* Load a half-word, byte-by-byte from destination
472 address and save it in tmp space */ 474 address and save it in tmp space */
473 sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); 475 sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);
474 lbui r5, r4, 1; 476 lbui r5, r3, 1;
475 sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); 477 sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);
476 /* Get the destination register value into r3 */ 478 /* Get the destination register value into r4 */
477 lhui r3, r0, TOPHYS(ex_tmp_data_loc_0); 479 lhui r4, r0, TOPHYS(ex_tmp_data_loc_0);
478ex_lw_tail: 480ex_lw_tail:
479 /* Get the destination register number into r5 */ 481 /* Get the destination register number into r5 */
480 lbui r5, r0, TOPHYS(ex_reg_op); 482 lbui r5, r0, TOPHYS(ex_reg_op);
@@ -502,25 +504,25 @@ ex_sw_tail:
502 andi r6, r6, 0x800; /* Extract ESR[W] */ 504 andi r6, r6, 0x800; /* Extract ESR[W] */
503 beqi r6, ex_shw; 505 beqi r6, ex_shw;
504 /* Get the word - delay slot */ 506 /* Get the word - delay slot */
505 swi r3, r0, TOPHYS(ex_tmp_data_loc_0); 507 swi r4, r0, TOPHYS(ex_tmp_data_loc_0);
506 /* Store the word, byte-by-byte into destination address */ 508 /* Store the word, byte-by-byte into destination address */
507 lbui r3, r0, TOPHYS(ex_tmp_data_loc_0); 509 lbui r4, r0, TOPHYS(ex_tmp_data_loc_0);
508 sbi r3, r4, 0; 510 sbi r4, r3, 0;
509 lbui r3, r0, TOPHYS(ex_tmp_data_loc_1); 511 lbui r4, r0, TOPHYS(ex_tmp_data_loc_1);
510 sbi r3, r4, 1; 512 sbi r4, r3, 1;
511 lbui r3, r0, TOPHYS(ex_tmp_data_loc_2); 513 lbui r4, r0, TOPHYS(ex_tmp_data_loc_2);
512 sbi r3, r4, 2; 514 sbi r4, r3, 2;
513 lbui r3, r0, TOPHYS(ex_tmp_data_loc_3); 515 lbui r4, r0, TOPHYS(ex_tmp_data_loc_3);
514 sbi r3, r4, 3; 516 sbi r4, r3, 3;
515 bri ex_handler_done; 517 bri ex_handler_done;
516 518
517ex_shw: 519ex_shw:
518 /* Store the lower half-word, byte-by-byte into destination address */ 520 /* Store the lower half-word, byte-by-byte into destination address */
519 swi r3, r0, TOPHYS(ex_tmp_data_loc_0); 521 swi r4, r0, TOPHYS(ex_tmp_data_loc_0);
520 lbui r3, r0, TOPHYS(ex_tmp_data_loc_2); 522 lbui r4, r0, TOPHYS(ex_tmp_data_loc_2);
521 sbi r3, r4, 0; 523 sbi r4, r3, 0;
522 lbui r3, r0, TOPHYS(ex_tmp_data_loc_3); 524 lbui r4, r0, TOPHYS(ex_tmp_data_loc_3);
523 sbi r3, r4, 1; 525 sbi r4, r3, 1;
524ex_sw_end: /* Exception handling of store word, ends. */ 526ex_sw_end: /* Exception handling of store word, ends. */
525 527
526ex_handler_done: 528ex_handler_done:
@@ -560,21 +562,16 @@ ex_handler_done:
560 */ 562 */
561 mfs r11, rpid 563 mfs r11, rpid
562 nop 564 nop
563 bri 4
564 mfs r3, rear /* Get faulting address */
565 nop
566 /* If we are faulting a kernel address, we have to use the 565 /* If we are faulting a kernel address, we have to use the
567 * kernel page tables. 566 * kernel page tables.
568 */ 567 */
569 ori r4, r0, CONFIG_KERNEL_START 568 ori r5, r0, CONFIG_KERNEL_START
570 cmpu r4, r3, r4 569 cmpu r5, r3, r5
571 bgti r4, ex3 570 bgti r5, ex3
572 /* First, check if it was a zone fault (which means a user 571 /* First, check if it was a zone fault (which means a user
573 * tried to access a kernel or read-protected page - always 572 * tried to access a kernel or read-protected page - always
574 * a SEGV). All other faults here must be stores, so no 573 * a SEGV). All other faults here must be stores, so no
575 * need to check ESR_S as well. */ 574 * need to check ESR_S as well. */
576 mfs r4, resr
577 nop
578 andi r4, r4, 0x800 /* ESR_Z - zone protection */ 575 andi r4, r4, 0x800 /* ESR_Z - zone protection */
579 bnei r4, ex2 576 bnei r4, ex2
580 577
@@ -589,8 +586,6 @@ ex_handler_done:
589 * tried to access a kernel or read-protected page - always 586 * tried to access a kernel or read-protected page - always
590 * a SEGV). All other faults here must be stores, so no 587 * a SEGV). All other faults here must be stores, so no
591 * need to check ESR_S as well. */ 588 * need to check ESR_S as well. */
592 mfs r4, resr
593 nop
594 andi r4, r4, 0x800 /* ESR_Z */ 589 andi r4, r4, 0x800 /* ESR_Z */
595 bnei r4, ex2 590 bnei r4, ex2
596 /* get current task address */ 591 /* get current task address */
@@ -665,8 +660,6 @@ ex_handler_done:
665 * R3 = ESR 660 * R3 = ESR
666 */ 661 */
667 662
668 mfs r3, rear /* Get faulting address */
669 nop
670 RESTORE_STATE; 663 RESTORE_STATE;
671 bri page_fault_instr_trap 664 bri page_fault_instr_trap
672 665
@@ -677,18 +670,15 @@ ex_handler_done:
677 */ 670 */
678 handle_data_tlb_miss_exception: 671 handle_data_tlb_miss_exception:
679 /* Working registers already saved: R3, R4, R5, R6 672 /* Working registers already saved: R3, R4, R5, R6
680 * R3 = ESR 673 * R3 = EAR, R4 = ESR
681 */ 674 */
682 mfs r11, rpid 675 mfs r11, rpid
683 nop 676 nop
684 bri 4
685 mfs r3, rear /* Get faulting address */
686 nop
687 677
688 /* If we are faulting a kernel address, we have to use the 678 /* If we are faulting a kernel address, we have to use the
689 * kernel page tables. */ 679 * kernel page tables. */
690 ori r4, r0, CONFIG_KERNEL_START 680 ori r6, r0, CONFIG_KERNEL_START
691 cmpu r4, r3, r4 681 cmpu r4, r3, r6
692 bgti r4, ex5 682 bgti r4, ex5
693 ori r4, r0, swapper_pg_dir 683 ori r4, r0, swapper_pg_dir
694 mts rpid, r0 /* TLB will have 0 TID */ 684 mts rpid, r0 /* TLB will have 0 TID */
@@ -731,9 +721,8 @@ ex_handler_done:
731 * Many of these bits are software only. Bits we don't set 721 * Many of these bits are software only. Bits we don't set
732 * here we (properly should) assume have the appropriate value. 722 * here we (properly should) assume have the appropriate value.
733 */ 723 */
724 brid finish_tlb_load
734 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ 725 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
735
736 bri finish_tlb_load
737 ex7: 726 ex7:
738 /* The bailout. Restore registers to pre-exception conditions 727 /* The bailout. Restore registers to pre-exception conditions
739 * and call the heavyweights to help us out. 728 * and call the heavyweights to help us out.
@@ -754,9 +743,6 @@ ex_handler_done:
754 */ 743 */
755 mfs r11, rpid 744 mfs r11, rpid
756 nop 745 nop
757 bri 4
758 mfs r3, rear /* Get faulting address */
759 nop
760 746
761 /* If we are faulting a kernel address, we have to use the 747 /* If we are faulting a kernel address, we have to use the
762 * kernel page tables. 748 * kernel page tables.
@@ -792,7 +778,7 @@ ex_handler_done:
792 lwi r4, r5, 0 /* Get Linux PTE */ 778 lwi r4, r5, 0 /* Get Linux PTE */
793 779
794 andi r6, r4, _PAGE_PRESENT 780 andi r6, r4, _PAGE_PRESENT
795 beqi r6, ex7 781 beqi r6, ex10
796 782
797 ori r4, r4, _PAGE_ACCESSED 783 ori r4, r4, _PAGE_ACCESSED
798 swi r4, r5, 0 784 swi r4, r5, 0
@@ -805,9 +791,8 @@ ex_handler_done:
805 * Many of these bits are software only. Bits we don't set 791 * Many of these bits are software only. Bits we don't set
806 * here we (properly should) assume have the appropriate value. 792 * here we (properly should) assume have the appropriate value.
807 */ 793 */
794 brid finish_tlb_load
808 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ 795 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
809
810 bri finish_tlb_load
811 ex10: 796 ex10:
812 /* The bailout. Restore registers to pre-exception conditions 797 /* The bailout. Restore registers to pre-exception conditions
813 * and call the heavyweights to help us out. 798 * and call the heavyweights to help us out.
@@ -837,9 +822,9 @@ ex_handler_done:
837 andi r5, r5, (MICROBLAZE_TLB_SIZE-1) 822 andi r5, r5, (MICROBLAZE_TLB_SIZE-1)
838 ori r6, r0, 1 823 ori r6, r0, 1
839 cmp r31, r5, r6 824 cmp r31, r5, r6
840 blti r31, sem 825 blti r31, ex12
841 addik r5, r6, 1 826 addik r5, r6, 1
842 sem: 827 ex12:
843 /* MS: save back current TLB index */ 828 /* MS: save back current TLB index */
844 swi r5, r0, TOPHYS(tlb_index) 829 swi r5, r0, TOPHYS(tlb_index)
845 830
@@ -859,7 +844,6 @@ ex_handler_done:
859 nop 844 nop
860 845
861 /* Done...restore registers and get out of here. */ 846 /* Done...restore registers and get out of here. */
862 ex12:
863 mts rpid, r11 847 mts rpid, r11
864 nop 848 nop
865 bri 4 849 bri 4
diff --git a/arch/microblaze/kernel/misc.S b/arch/microblaze/kernel/misc.S
index df16c6287a8e..7cf86498326c 100644
--- a/arch/microblaze/kernel/misc.S
+++ b/arch/microblaze/kernel/misc.S
@@ -26,9 +26,10 @@
26 * We avoid flushing the pinned 0, 1 and possibly 2 entries. 26 * We avoid flushing the pinned 0, 1 and possibly 2 entries.
27 */ 27 */
28.globl _tlbia; 28.globl _tlbia;
29.type _tlbia, @function
29.align 4; 30.align 4;
30_tlbia: 31_tlbia:
31 addik r12, r0, 63 /* flush all entries (63 - 3) */ 32 addik r12, r0, MICROBLAZE_TLB_SIZE - 1 /* flush all entries (63 - 3) */
32 /* isync */ 33 /* isync */
33_tlbia_1: 34_tlbia_1:
34 mts rtlbx, r12 35 mts rtlbx, r12
@@ -41,11 +42,13 @@ _tlbia_1:
41 /* sync */ 42 /* sync */
42 rtsd r15, 8 43 rtsd r15, 8
43 nop 44 nop
45 .size _tlbia, . - _tlbia
44 46
45/* 47/*
46 * Flush MMU TLB for a particular address (in r5) 48 * Flush MMU TLB for a particular address (in r5)
47 */ 49 */
48.globl _tlbie; 50.globl _tlbie;
51.type _tlbie, @function
49.align 4; 52.align 4;
50_tlbie: 53_tlbie:
51 mts rtlbsx, r5 /* look up the address in TLB */ 54 mts rtlbsx, r5 /* look up the address in TLB */
@@ -59,17 +62,20 @@ _tlbie_1:
59 rtsd r15, 8 62 rtsd r15, 8
60 nop 63 nop
61 64
65 .size _tlbie, . - _tlbie
66
62/* 67/*
63 * Allocate TLB entry for early console 68 * Allocate TLB entry for early console
64 */ 69 */
65.globl early_console_reg_tlb_alloc; 70.globl early_console_reg_tlb_alloc;
71.type early_console_reg_tlb_alloc, @function
66.align 4; 72.align 4;
67early_console_reg_tlb_alloc: 73early_console_reg_tlb_alloc:
68 /* 74 /*
69 * Load a TLB entry for the UART, so that microblaze_progress() can use 75 * Load a TLB entry for the UART, so that microblaze_progress() can use
70 * the UARTs nice and early. We use a 4k real==virtual mapping. 76 * the UARTs nice and early. We use a 4k real==virtual mapping.
71 */ 77 */
72 ori r4, r0, 63 78 ori r4, r0, MICROBLAZE_TLB_SIZE - 1
73 mts rtlbx, r4 /* TLB slot 2 */ 79 mts rtlbx, r4 /* TLB slot 2 */
74 80
75 or r4,r5,r0 81 or r4,r5,r0
@@ -86,6 +92,8 @@ early_console_reg_tlb_alloc:
86 rtsd r15, 8 92 rtsd r15, 8
87 nop 93 nop
88 94
95 .size early_console_reg_tlb_alloc, . - early_console_reg_tlb_alloc
96
89/* 97/*
90 * Copy a whole page (4096 bytes). 98 * Copy a whole page (4096 bytes).
91 */ 99 */
@@ -104,6 +112,7 @@ early_console_reg_tlb_alloc:
104#define DCACHE_LINE_BYTES (4 * 4) 112#define DCACHE_LINE_BYTES (4 * 4)
105 113
106.globl copy_page; 114.globl copy_page;
115.type copy_page, @function
107.align 4; 116.align 4;
108copy_page: 117copy_page:
109 ori r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1 118 ori r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1
@@ -118,3 +127,5 @@ _copy_page_loop:
118 addik r11, r11, -1 127 addik r11, r11, -1
119 rtsd r15, 8 128 rtsd r15, 8
120 nop 129 nop
130
131 .size copy_page, . - copy_page
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 812f1bf06c9e..09bed44dfcd3 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -15,6 +15,7 @@
15#include <linux/bitops.h> 15#include <linux/bitops.h>
16#include <asm/system.h> 16#include <asm/system.h>
17#include <asm/pgalloc.h> 17#include <asm/pgalloc.h>
18#include <asm/uaccess.h> /* for USER_DS macros */
18#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
19 20
20void show_regs(struct pt_regs *regs) 21void show_regs(struct pt_regs *regs)
@@ -74,7 +75,10 @@ __setup("hlt", hlt_setup);
74 75
75void default_idle(void) 76void default_idle(void)
76{ 77{
77 if (!hlt_counter) { 78 if (likely(hlt_counter)) {
79 while (!need_resched())
80 cpu_relax();
81 } else {
78 clear_thread_flag(TIF_POLLING_NRFLAG); 82 clear_thread_flag(TIF_POLLING_NRFLAG);
79 smp_mb__after_clear_bit(); 83 smp_mb__after_clear_bit();
80 local_irq_disable(); 84 local_irq_disable();
@@ -82,9 +86,7 @@ void default_idle(void)
82 cpu_sleep(); 86 cpu_sleep();
83 local_irq_enable(); 87 local_irq_enable();
84 set_thread_flag(TIF_POLLING_NRFLAG); 88 set_thread_flag(TIF_POLLING_NRFLAG);
85 } else 89 }
86 while (!need_resched())
87 cpu_relax();
88} 90}
89 91
90void cpu_idle(void) 92void cpu_idle(void)
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index f974ec7aa357..17c98dbcec88 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -92,6 +92,12 @@ inline unsigned get_romfs_len(unsigned *addr)
92} 92}
93#endif /* CONFIG_MTD_UCLINUX_EBSS */ 93#endif /* CONFIG_MTD_UCLINUX_EBSS */
94 94
95#if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE)
96#define eprintk early_printk
97#else
98#define eprintk printk
99#endif
100
95void __init machine_early_init(const char *cmdline, unsigned int ram, 101void __init machine_early_init(const char *cmdline, unsigned int ram,
96 unsigned int fdt, unsigned int msr) 102 unsigned int fdt, unsigned int msr)
97{ 103{
@@ -139,32 +145,32 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
139 setup_early_printk(NULL); 145 setup_early_printk(NULL);
140#endif 146#endif
141 147
142 early_printk("Ramdisk addr 0x%08x, ", ram); 148 eprintk("Ramdisk addr 0x%08x, ", ram);
143 if (fdt) 149 if (fdt)
144 early_printk("FDT at 0x%08x\n", fdt); 150 eprintk("FDT at 0x%08x\n", fdt);
145 else 151 else
146 early_printk("Compiled-in FDT at 0x%08x\n", 152 eprintk("Compiled-in FDT at 0x%08x\n",
147 (unsigned int)_fdt_start); 153 (unsigned int)_fdt_start);
148 154
149#ifdef CONFIG_MTD_UCLINUX 155#ifdef CONFIG_MTD_UCLINUX
150 early_printk("Found romfs @ 0x%08x (0x%08x)\n", 156 eprintk("Found romfs @ 0x%08x (0x%08x)\n",
151 romfs_base, romfs_size); 157 romfs_base, romfs_size);
152 early_printk("#### klimit %p ####\n", old_klimit); 158 eprintk("#### klimit %p ####\n", old_klimit);
153 BUG_ON(romfs_size < 0); /* What else can we do? */ 159 BUG_ON(romfs_size < 0); /* What else can we do? */
154 160
155 early_printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n", 161 eprintk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
156 romfs_size, romfs_base, (unsigned)&_ebss); 162 romfs_size, romfs_base, (unsigned)&_ebss);
157 163
158 early_printk("New klimit: 0x%08x\n", (unsigned)klimit); 164 eprintk("New klimit: 0x%08x\n", (unsigned)klimit);
159#endif 165#endif
160 166
161#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 167#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
162 if (msr) 168 if (msr)
163 early_printk("!!!Your kernel has setup MSR instruction but " 169 eprintk("!!!Your kernel has setup MSR instruction but "
164 "CPU don't have it %d\n", msr); 170 "CPU don't have it %d\n", msr);
165#else 171#else
166 if (!msr) 172 if (!msr)
167 early_printk("!!!Your kernel not setup MSR instruction but " 173 eprintk("!!!Your kernel not setup MSR instruction but "
168 "CPU have it %d\n", msr); 174 "CPU have it %d\n", msr);
169#endif 175#endif
170 176
diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c
index eaaaf805f31b..5e4570ef515c 100644
--- a/arch/microblaze/kernel/traps.c
+++ b/arch/microblaze/kernel/traps.c
@@ -22,13 +22,11 @@ void trap_init(void)
22 __enable_hw_exceptions(); 22 __enable_hw_exceptions();
23} 23}
24 24
25static int kstack_depth_to_print = 24; 25static unsigned long kstack_depth_to_print = 24;
26 26
27static int __init kstack_setup(char *s) 27static int __init kstack_setup(char *s)
28{ 28{
29 kstack_depth_to_print = strict_strtoul(s, 0, NULL); 29 return !strict_strtoul(s, 0, &kstack_depth_to_print);
30
31 return 1;
32} 30}
33__setup("kstack=", kstack_setup); 31__setup("kstack=", kstack_setup);
34 32
diff --git a/arch/microblaze/lib/Makefile b/arch/microblaze/lib/Makefile
index b579db068c06..4dfe47d3cd91 100644
--- a/arch/microblaze/lib/Makefile
+++ b/arch/microblaze/lib/Makefile
@@ -10,5 +10,4 @@ else
10lib-y += memcpy.o memmove.o 10lib-y += memcpy.o memmove.o
11endif 11endif
12 12
13lib-$(CONFIG_NO_MMU) += uaccess.o 13lib-y += uaccess_old.o
14lib-$(CONFIG_MMU) += uaccess_old.o
diff --git a/arch/microblaze/lib/fastcopy.S b/arch/microblaze/lib/fastcopy.S
index 02e3ab4eddf3..fdc48bb065d8 100644
--- a/arch/microblaze/lib/fastcopy.S
+++ b/arch/microblaze/lib/fastcopy.S
@@ -30,8 +30,9 @@
30 */ 30 */
31 31
32#include <linux/linkage.h> 32#include <linux/linkage.h>
33 33 .text
34 .globl memcpy 34 .globl memcpy
35 .type memcpy, @function
35 .ent memcpy 36 .ent memcpy
36 37
37memcpy: 38memcpy:
@@ -345,9 +346,11 @@ a_done:
345 rtsd r15, 8 346 rtsd r15, 8
346 nop 347 nop
347 348
349.size memcpy, . - memcpy
348.end memcpy 350.end memcpy
349/*----------------------------------------------------------------------------*/ 351/*----------------------------------------------------------------------------*/
350 .globl memmove 352 .globl memmove
353 .type memmove, @function
351 .ent memmove 354 .ent memmove
352 355
353memmove: 356memmove:
@@ -659,4 +662,5 @@ d_done:
659 rtsd r15, 8 662 rtsd r15, 8
660 nop 663 nop
661 664
665.size memmove, . - memmove
662.end memmove 666.end memmove
diff --git a/arch/microblaze/lib/memcpy.c b/arch/microblaze/lib/memcpy.c
index cc2108b6b260..014bac92bdff 100644
--- a/arch/microblaze/lib/memcpy.c
+++ b/arch/microblaze/lib/memcpy.c
@@ -53,7 +53,7 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
53 const uint32_t *i_src; 53 const uint32_t *i_src;
54 uint32_t *i_dst; 54 uint32_t *i_dst;
55 55
56 if (c >= 4) { 56 if (likely(c >= 4)) {
57 unsigned value, buf_hold; 57 unsigned value, buf_hold;
58 58
59 /* Align the dstination to a word boundry. */ 59 /* Align the dstination to a word boundry. */
diff --git a/arch/microblaze/lib/memset.c b/arch/microblaze/lib/memset.c
index 4df851d41a29..ecfb663e1fc1 100644
--- a/arch/microblaze/lib/memset.c
+++ b/arch/microblaze/lib/memset.c
@@ -33,22 +33,23 @@
33#ifdef __HAVE_ARCH_MEMSET 33#ifdef __HAVE_ARCH_MEMSET
34void *memset(void *v_src, int c, __kernel_size_t n) 34void *memset(void *v_src, int c, __kernel_size_t n)
35{ 35{
36
37 char *src = v_src; 36 char *src = v_src;
38#ifdef CONFIG_OPT_LIB_FUNCTION 37#ifdef CONFIG_OPT_LIB_FUNCTION
39 uint32_t *i_src; 38 uint32_t *i_src;
40 uint32_t w32; 39 uint32_t w32 = 0;
41#endif 40#endif
42 /* Truncate c to 8 bits */ 41 /* Truncate c to 8 bits */
43 c = (c & 0xFF); 42 c = (c & 0xFF);
44 43
45#ifdef CONFIG_OPT_LIB_FUNCTION 44#ifdef CONFIG_OPT_LIB_FUNCTION
46 /* Make a repeating word out of it */ 45 if (unlikely(c)) {
47 w32 = c; 46 /* Make a repeating word out of it */
48 w32 |= w32 << 8; 47 w32 = c;
49 w32 |= w32 << 16; 48 w32 |= w32 << 8;
49 w32 |= w32 << 16;
50 }
50 51
51 if (n >= 4) { 52 if (likely(n >= 4)) {
52 /* Align the destination to a word boundary */ 53 /* Align the destination to a word boundary */
53 /* This is done in an endian independant manner */ 54 /* This is done in an endian independant manner */
54 switch ((unsigned) src & 3) { 55 switch ((unsigned) src & 3) {
diff --git a/arch/microblaze/lib/uaccess.c b/arch/microblaze/lib/uaccess.c
deleted file mode 100644
index a853fe089c44..000000000000
--- a/arch/microblaze/lib/uaccess.c
+++ /dev/null
@@ -1,48 +0,0 @@
1/*
2 * Copyright (C) 2006 Atmark Techno, Inc.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 */
8
9#include <linux/string.h>
10#include <asm/uaccess.h>
11
12#include <asm/bug.h>
13
14long strnlen_user(const char __user *src, long count)
15{
16 return strlen(src) + 1;
17}
18
19#define __do_strncpy_from_user(dst, src, count, res) \
20 do { \
21 char *tmp; \
22 strncpy(dst, src, count); \
23 for (tmp = dst; *tmp && count > 0; tmp++, count--) \
24 ; \
25 res = (tmp - dst); \
26 } while (0)
27
28long __strncpy_from_user(char *dst, const char __user *src, long count)
29{
30 long res;
31 __do_strncpy_from_user(dst, src, count, res);
32 return res;
33}
34
35long strncpy_from_user(char *dst, const char __user *src, long count)
36{
37 long res = -EFAULT;
38 if (access_ok(VERIFY_READ, src, 1))
39 __do_strncpy_from_user(dst, src, count, res);
40 return res;
41}
42
43unsigned long __copy_tofrom_user(void __user *to,
44 const void __user *from, unsigned long size)
45{
46 memcpy(to, from, size);
47 return 0;
48}
diff --git a/arch/microblaze/lib/uaccess_old.S b/arch/microblaze/lib/uaccess_old.S
index 67f991c14b8a..5810cec54a7a 100644
--- a/arch/microblaze/lib/uaccess_old.S
+++ b/arch/microblaze/lib/uaccess_old.S
@@ -22,6 +22,7 @@
22 22
23 .text 23 .text
24.globl __strncpy_user; 24.globl __strncpy_user;
25.type __strncpy_user, @function
25.align 4; 26.align 4;
26__strncpy_user: 27__strncpy_user:
27 28
@@ -50,7 +51,7 @@ __strncpy_user:
503: 513:
51 rtsd r15,8 52 rtsd r15,8
52 nop 53 nop
53 54 .size __strncpy_user, . - __strncpy_user
54 55
55 .section .fixup, "ax" 56 .section .fixup, "ax"
56 .align 2 57 .align 2
@@ -72,6 +73,7 @@ __strncpy_user:
72 73
73 .text 74 .text
74.globl __strnlen_user; 75.globl __strnlen_user;
76.type __strnlen_user, @function
75.align 4; 77.align 4;
76__strnlen_user: 78__strnlen_user:
77 addik r3,r6,0 79 addik r3,r6,0
@@ -90,7 +92,7 @@ __strnlen_user:
903: 923:
91 rtsd r15,8 93 rtsd r15,8
92 nop 94 nop
93 95 .size __strnlen_user, . - __strnlen_user
94 96
95 .section .fixup,"ax" 97 .section .fixup,"ax"
964: 984:
@@ -108,6 +110,7 @@ __strnlen_user:
108 */ 110 */
109 .text 111 .text
110.globl __copy_tofrom_user; 112.globl __copy_tofrom_user;
113.type __copy_tofrom_user, @function
111.align 4; 114.align 4;
112__copy_tofrom_user: 115__copy_tofrom_user:
113 /* 116 /*
@@ -116,20 +119,34 @@ __copy_tofrom_user:
116 * r7, r3 - count 119 * r7, r3 - count
117 * r4 - tempval 120 * r4 - tempval
118 */ 121 */
119 addik r3,r7,0 122 beqid r7, 3f /* zero size is not likely */
120 beqi r3,3f 123 andi r3, r7, 0x3 /* filter add count */
1211: 124 bneid r3, 4f /* if is odd value then byte copying */
122 lbu r4,r6,r0 125 or r3, r5, r6 /* find if is any to/from unaligned */
123 addik r6,r6,1 126 andi r3, r3, 0x3 /* mask unaligned */
1242: 127 bneid r3, 1f /* it is unaligned -> then jump */
125 sb r4,r5,r0 128 or r3, r0, r0
126 addik r3,r3,-1 129
127 bneid r3,1b 130/* at least one 4 byte copy */
128 addik r5,r5,1 /* delay slot */ 1315: lw r4, r6, r3
1326: sw r4, r5, r3
133 addik r7, r7, -4
134 bneid r7, 5b
135 addik r3, r3, 4
136 addik r3, r7, 0
137 rtsd r15, 8
138 nop
1394: or r3, r0, r0
1401: lbu r4,r6,r3
1412: sb r4,r5,r3
142 addik r7,r7,-1
143 bneid r7,1b
144 addik r3,r3,1 /* delay slot */
1293: 1453:
146 addik r3,r7,0
130 rtsd r15,8 147 rtsd r15,8
131 nop 148 nop
132 149 .size __copy_tofrom_user, . - __copy_tofrom_user
133 150
134 .section __ex_table,"a" 151 .section __ex_table,"a"
135 .word 1b,3b,2b,3b 152 .word 1b,3b,2b,3b,5b,3b,6b,3b
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index d9d249a66ff2..7af87f4b2c2c 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -106,7 +106,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
106 regs->esr = error_code; 106 regs->esr = error_code;
107 107
108 /* On a kernel SLB miss we can only check for a valid exception entry */ 108 /* On a kernel SLB miss we can only check for a valid exception entry */
109 if (kernel_mode(regs) && (address >= TASK_SIZE)) { 109 if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) {
110 printk(KERN_WARNING "kernel task_size exceed"); 110 printk(KERN_WARNING "kernel task_size exceed");
111 _exception(SIGSEGV, regs, code, address); 111 _exception(SIGSEGV, regs, code, address);
112 } 112 }
@@ -122,7 +122,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
122 } 122 }
123#endif /* CONFIG_KGDB */ 123#endif /* CONFIG_KGDB */
124 124
125 if (in_atomic() || !mm) { 125 if (unlikely(in_atomic() || !mm)) {
126 if (kernel_mode(regs)) 126 if (kernel_mode(regs))
127 goto bad_area_nosemaphore; 127 goto bad_area_nosemaphore;
128 128
@@ -150,7 +150,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
150 * source. If this is invalid we can skip the address space check, 150 * source. If this is invalid we can skip the address space check,
151 * thus avoiding the deadlock. 151 * thus avoiding the deadlock.
152 */ 152 */
153 if (!down_read_trylock(&mm->mmap_sem)) { 153 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
154 if (kernel_mode(regs) && !search_exception_tables(regs->pc)) 154 if (kernel_mode(regs) && !search_exception_tables(regs->pc))
155 goto bad_area_nosemaphore; 155 goto bad_area_nosemaphore;
156 156
@@ -158,16 +158,16 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
158 } 158 }
159 159
160 vma = find_vma(mm, address); 160 vma = find_vma(mm, address);
161 if (!vma) 161 if (unlikely(!vma))
162 goto bad_area; 162 goto bad_area;
163 163
164 if (vma->vm_start <= address) 164 if (vma->vm_start <= address)
165 goto good_area; 165 goto good_area;
166 166
167 if (!(vma->vm_flags & VM_GROWSDOWN)) 167 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
168 goto bad_area; 168 goto bad_area;
169 169
170 if (!is_write) 170 if (unlikely(!is_write))
171 goto bad_area; 171 goto bad_area;
172 172
173 /* 173 /*
@@ -179,7 +179,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
179 * before setting the user r1. Thus we allow the stack to 179 * before setting the user r1. Thus we allow the stack to
180 * expand to 1MB without further checks. 180 * expand to 1MB without further checks.
181 */ 181 */
182 if (address + 0x100000 < vma->vm_end) { 182 if (unlikely(address + 0x100000 < vma->vm_end)) {
183 183
184 /* get user regs even if this fault is in kernel mode */ 184 /* get user regs even if this fault is in kernel mode */
185 struct pt_regs *uregs = current->thread.regs; 185 struct pt_regs *uregs = current->thread.regs;
@@ -209,15 +209,15 @@ good_area:
209 code = SEGV_ACCERR; 209 code = SEGV_ACCERR;
210 210
211 /* a write */ 211 /* a write */
212 if (is_write) { 212 if (unlikely(is_write)) {
213 if (!(vma->vm_flags & VM_WRITE)) 213 if (unlikely(!(vma->vm_flags & VM_WRITE)))
214 goto bad_area; 214 goto bad_area;
215 /* a read */ 215 /* a read */
216 } else { 216 } else {
217 /* protection fault */ 217 /* protection fault */
218 if (error_code & 0x08000000) 218 if (unlikely(error_code & 0x08000000))
219 goto bad_area; 219 goto bad_area;
220 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 220 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC))))
221 goto bad_area; 221 goto bad_area;
222 } 222 }
223 223
@@ -235,7 +235,7 @@ survive:
235 goto do_sigbus; 235 goto do_sigbus;
236 BUG(); 236 BUG();
237 } 237 }
238 if (fault & VM_FAULT_MAJOR) 238 if (unlikely(fault & VM_FAULT_MAJOR))
239 current->maj_flt++; 239 current->maj_flt++;
240 else 240 else
241 current->min_flt++; 241 current->min_flt++;
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 1608e2e1a44a..40bc10ede097 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -165,7 +165,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
165 for (addr = begin; addr < end; addr += PAGE_SIZE) { 165 for (addr = begin; addr < end; addr += PAGE_SIZE) {
166 ClearPageReserved(virt_to_page(addr)); 166 ClearPageReserved(virt_to_page(addr));
167 init_page_count(virt_to_page(addr)); 167 init_page_count(virt_to_page(addr));
168 memset((void *)addr, 0xcc, PAGE_SIZE);
169 free_page(addr); 168 free_page(addr);
170 totalram_pages++; 169 totalram_pages++;
171 } 170 }
@@ -208,14 +207,6 @@ void __init mem_init(void)
208} 207}
209 208
210#ifndef CONFIG_MMU 209#ifndef CONFIG_MMU
211/* Check against bounds of physical memory */
212int ___range_ok(unsigned long addr, unsigned long size)
213{
214 return ((addr < memory_start) ||
215 ((addr + size) > memory_end));
216}
217EXPORT_SYMBOL(___range_ok);
218
219int page_is_ram(unsigned long pfn) 210int page_is_ram(unsigned long pfn)
220{ 211{
221 return __range_ok(pfn, 0); 212 return __range_ok(pfn, 0);
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index 63a6fd07c48f..d31312cde6ea 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -154,7 +154,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
154 err = 0; 154 err = 0;
155 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, 155 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
156 __pgprot(flags))); 156 __pgprot(flags)));
157 if (mem_init_done) 157 if (unlikely(mem_init_done))
158 flush_HPTE(0, va, pmd_val(*pd)); 158 flush_HPTE(0, va, pmd_val(*pd));
159 /* flush_HPTE(0, va, pg); */ 159 /* flush_HPTE(0, va, pg); */
160 } 160 }
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index c1b475a941eb..a9b91ed3d4b9 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -28,6 +28,7 @@
28#define PPC_LLARX(t, a, b, eh) PPC_LDARX(t, a, b, eh) 28#define PPC_LLARX(t, a, b, eh) PPC_LDARX(t, a, b, eh)
29#define PPC_STLCX stringify_in_c(stdcx.) 29#define PPC_STLCX stringify_in_c(stdcx.)
30#define PPC_CNTLZL stringify_in_c(cntlzd) 30#define PPC_CNTLZL stringify_in_c(cntlzd)
31#define PPC_LR_STKOFF 16
31 32
32/* Move to CR, single-entry optimized version. Only available 33/* Move to CR, single-entry optimized version. Only available
33 * on POWER4 and later. 34 * on POWER4 and later.
@@ -51,6 +52,7 @@
51#define PPC_STLCX stringify_in_c(stwcx.) 52#define PPC_STLCX stringify_in_c(stwcx.)
52#define PPC_CNTLZL stringify_in_c(cntlzw) 53#define PPC_CNTLZL stringify_in_c(cntlzw)
53#define PPC_MTOCRF stringify_in_c(mtcrf) 54#define PPC_MTOCRF stringify_in_c(mtcrf)
55#define PPC_LR_STKOFF 4
54 56
55#endif 57#endif
56 58
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S
index 2d29752cbe16..b485a87c94e1 100644
--- a/arch/powerpc/kernel/misc.S
+++ b/arch/powerpc/kernel/misc.S
@@ -127,3 +127,31 @@ _GLOBAL(__setup_cpu_power7)
127_GLOBAL(__restore_cpu_power7) 127_GLOBAL(__restore_cpu_power7)
128 /* place holder */ 128 /* place holder */
129 blr 129 blr
130
131#ifdef CONFIG_EVENT_TRACING
132/*
133 * Get a minimal set of registers for our caller's nth caller.
134 * r3 = regs pointer, r5 = n.
135 *
136 * We only get R1 (stack pointer), NIP (next instruction pointer)
137 * and LR (link register). These are all we can get in the
138 * general case without doing complicated stack unwinding, but
139 * fortunately they are enough to do a stack backtrace, which
140 * is all we need them for.
141 */
142_GLOBAL(perf_arch_fetch_caller_regs)
143 mr r6,r1
144 cmpwi r5,0
145 mflr r4
146 ble 2f
147 mtctr r5
1481: PPC_LL r6,0(r6)
149 bdnz 1b
150 PPC_LL r4,PPC_LR_STKOFF(r6)
1512: PPC_LL r7,0(r6)
152 PPC_LL r7,PPC_LR_STKOFF(r7)
153 PPC_STL r6,GPR1-STACK_FRAME_OVERHEAD(r3)
154 PPC_STL r4,_NIP-STACK_FRAME_OVERHEAD(r3)
155 PPC_STL r7,_LINK-STACK_FRAME_OVERHEAD(r3)
156 blr
157#endif /* CONFIG_EVENT_TRACING */
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index a97d69525829..14e0479d3888 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -24,8 +24,8 @@
24/* Symbols defined by linker scripts */ 24/* Symbols defined by linker scripts */
25extern char input_data[]; 25extern char input_data[];
26extern int input_len; 26extern int input_len;
27extern int _text; 27extern char _text, _end;
28extern int _end; 28extern char _bss, _ebss;
29 29
30static void error(char *m); 30static void error(char *m);
31 31
@@ -129,12 +129,12 @@ unsigned long decompress_kernel(void)
129 unsigned long output_addr; 129 unsigned long output_addr;
130 unsigned char *output; 130 unsigned char *output;
131 131
132 check_ipl_parmblock((void *) 0, (unsigned long) output + SZ__bss_start);
133 memset(&_bss, 0, &_ebss - &_bss);
132 free_mem_ptr = (unsigned long)&_end; 134 free_mem_ptr = (unsigned long)&_end;
133 free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; 135 free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
134 output = (unsigned char *) ((free_mem_end_ptr + 4095UL) & -4096UL); 136 output = (unsigned char *) ((free_mem_end_ptr + 4095UL) & -4096UL);
135 137
136 check_ipl_parmblock((void *) 0, (unsigned long) output + SZ__bss_start);
137
138#ifdef CONFIG_BLK_DEV_INITRD 138#ifdef CONFIG_BLK_DEV_INITRD
139 /* 139 /*
140 * Move the initrd right behind the end of the decompressed 140 * Move the initrd right behind the end of the decompressed
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 67ee6c3c6bb3..1741c1556a4e 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -110,6 +110,7 @@ extern void pfault_fini(void);
110#endif /* CONFIG_PFAULT */ 110#endif /* CONFIG_PFAULT */
111 111
112extern void cmma_init(void); 112extern void cmma_init(void);
113extern int memcpy_real(void *, void *, size_t);
113 114
114#define finish_arch_switch(prev) do { \ 115#define finish_arch_switch(prev) do { \
115 set_fs(current->thread.mm_segment); \ 116 set_fs(current->thread.mm_segment); \
@@ -218,8 +219,8 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
218 " l %0,%2\n" 219 " l %0,%2\n"
219 "0: nr %0,%5\n" 220 "0: nr %0,%5\n"
220 " lr %1,%0\n" 221 " lr %1,%0\n"
221 " or %0,%2\n" 222 " or %0,%3\n"
222 " or %1,%3\n" 223 " or %1,%4\n"
223 " cs %0,%1,%2\n" 224 " cs %0,%1,%2\n"
224 " jnl 1f\n" 225 " jnl 1f\n"
225 " xr %1,%0\n" 226 " xr %1,%0\n"
@@ -239,8 +240,8 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
239 " l %0,%2\n" 240 " l %0,%2\n"
240 "0: nr %0,%5\n" 241 "0: nr %0,%5\n"
241 " lr %1,%0\n" 242 " lr %1,%0\n"
242 " or %0,%2\n" 243 " or %0,%3\n"
243 " or %1,%3\n" 244 " or %1,%4\n"
244 " cs %0,%1,%2\n" 245 " cs %0,%1,%2\n"
245 " jnl 1f\n" 246 " jnl 1f\n"
246 " xr %1,%0\n" 247 " xr %1,%0\n"
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index ca4a62bd862f..9d1f76702d47 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -517,7 +517,10 @@ startup:
517 lhi %r1,2 # mode 2 = esame (dump) 517 lhi %r1,2 # mode 2 = esame (dump)
518 sigp %r1,%r0,0x12 # switch to esame mode 518 sigp %r1,%r0,0x12 # switch to esame mode
519 sam64 # switch to 64 bit mode 519 sam64 # switch to 64 bit mode
520 larl %r13,4f
521 lmh %r0,%r15,0(%r13) # clear high-order half
520 jg startup_continue 522 jg startup_continue
5234: .fill 16,4,0x0
521#else 524#else
522 mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) 525 mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
523 l %r13,4f-.LPG0(%r13) 526 l %r13,4f-.LPG0(%r13)
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 39580e768658..1f70970de0aa 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -21,7 +21,6 @@ startup_continue:
21 larl %r1,sched_clock_base_cc 21 larl %r1,sched_clock_base_cc
22 mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK 22 mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK
23 larl %r13,.LPG1 # get base 23 larl %r13,.LPG1 # get base
24 lmh %r0,%r15,.Lzero64-.LPG1(%r13) # clear high-order half
25 lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers 24 lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
26 lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area 25 lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
27 # move IPL device to lowcore 26 # move IPL device to lowcore
@@ -67,7 +66,6 @@ startup_continue:
67.L4malign:.quad 0xffffffffffc00000 66.L4malign:.quad 0xffffffffffc00000
68.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 67.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
69.Lnop: .long 0x07000700 68.Lnop: .long 0x07000700
70.Lzero64:.fill 16,4,0x0
71.Lparmaddr: 69.Lparmaddr:
72 .quad PARMAREA 70 .quad PARMAREA
73 .align 64 71 .align 64
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 77a63ae419f0..ba363d99de43 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -401,7 +401,7 @@ setup_lowcore(void)
401 * Setup lowcore for boot cpu 401 * Setup lowcore for boot cpu
402 */ 402 */
403 BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); 403 BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096);
404 lc = __alloc_bootmem(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); 404 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
405 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 405 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
406 lc->restart_psw.addr = 406 lc->restart_psw.addr =
407 PSW_ADDR_AMODE | (unsigned long) restart_int_handler; 407 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
@@ -433,7 +433,7 @@ setup_lowcore(void)
433#ifndef CONFIG_64BIT 433#ifndef CONFIG_64BIT
434 if (MACHINE_HAS_IEEE) { 434 if (MACHINE_HAS_IEEE) {
435 lc->extended_save_area_addr = (__u32) 435 lc->extended_save_area_addr = (__u32)
436 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0); 436 __alloc_bootmem_low(PAGE_SIZE, PAGE_SIZE, 0);
437 /* enable extended save area */ 437 /* enable extended save area */
438 __ctl_set_bit(14, 29); 438 __ctl_set_bit(14, 29);
439 } 439 }
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 29f65bce55e1..d7d24fc3d6b7 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -292,9 +292,9 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
292 zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL); 292 zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL);
293 while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy) 293 while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy)
294 cpu_relax(); 294 cpu_relax();
295 memcpy(zfcpdump_save_areas[cpu], 295 memcpy_real(zfcpdump_save_areas[cpu],
296 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, 296 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
297 sizeof(struct save_area)); 297 sizeof(struct save_area));
298} 298}
299 299
300struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; 300struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 81756271dc44..a8c2af8c650f 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -59,3 +59,29 @@ long probe_kernel_write(void *dst, void *src, size_t size)
59 } 59 }
60 return copied < 0 ? -EFAULT : 0; 60 return copied < 0 ? -EFAULT : 0;
61} 61}
62
63int memcpy_real(void *dest, void *src, size_t count)
64{
65 register unsigned long _dest asm("2") = (unsigned long) dest;
66 register unsigned long _len1 asm("3") = (unsigned long) count;
67 register unsigned long _src asm("4") = (unsigned long) src;
68 register unsigned long _len2 asm("5") = (unsigned long) count;
69 unsigned long flags;
70 int rc = -EFAULT;
71
72 if (!count)
73 return 0;
74 flags = __raw_local_irq_stnsm(0xf8UL);
75 asm volatile (
76 "0: mvcle %1,%2,0x0\n"
77 "1: jo 0b\n"
78 " lhi %0,0x0\n"
79 "2:\n"
80 EX_TABLE(1b,2b)
81 : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
82 "+d" (_len2), "=m" (*((long *) dest))
83 : "m" (*((long *) src))
84 : "cc", "memory");
85 __raw_local_irq_ssm(flags);
86 return rc;
87}
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 39ed8722d11a..6c13b92742e8 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -836,6 +836,8 @@ static void __init sh_eth_init(struct sh_eth_plat_data *pd)
836 pd->mac_addr[i] = mac_read(a, 0x10 + i); 836 pd->mac_addr[i] = mac_read(a, 0x10 + i);
837 msleep(10); 837 msleep(10);
838 } 838 }
839
840 i2c_put_adapter(a);
839} 841}
840#else 842#else
841static void __init sh_eth_init(struct sh_eth_plat_data *pd) 843static void __init sh_eth_init(struct sh_eth_plat_data *pd)
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 66cdbc3c7af9..ccaa290e9aba 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -52,6 +52,13 @@
52 * and change SW41 to use 720p 52 * and change SW41 to use 720p
53 */ 53 */
54 54
55/*
56 * about sound
57 *
58 * This setup.c supports FSI slave mode.
59 * Please change J20, J21, J22 pin to 1-2 connection.
60 */
61
55/* Heartbeat */ 62/* Heartbeat */
56static struct resource heartbeat_resource = { 63static struct resource heartbeat_resource = {
57 .start = PA_LED, 64 .start = PA_LED,
@@ -276,6 +283,7 @@ static struct clk fsimcka_clk = {
276 .rate = 0, /* unknown */ 283 .rate = 0, /* unknown */
277}; 284};
278 285
286/* change J20, J21, J22 pin to 1-2 connection to use slave mode */
279struct sh_fsi_platform_info fsi_info = { 287struct sh_fsi_platform_info fsi_info = {
280 .porta_flags = SH_FSI_BRS_INV | 288 .porta_flags = SH_FSI_BRS_INV |
281 SH_FSI_OUT_SLAVE_MODE | 289 SH_FSI_OUT_SLAVE_MODE |
diff --git a/arch/sh/include/cpu-sh4/cpu/mmu_context.h b/arch/sh/include/cpu-sh4/cpu/mmu_context.h
index 03ea75c5315d..310ec92f2759 100644
--- a/arch/sh/include/cpu-sh4/cpu/mmu_context.h
+++ b/arch/sh/include/cpu-sh4/cpu/mmu_context.h
@@ -19,6 +19,8 @@
19 19
20#define MMUCR 0xFF000010 /* MMU Control Register */ 20#define MMUCR 0xFF000010 /* MMU Control Register */
21 21
22#define MMU_ITLB_ADDRESS_ARRAY 0xF2000000
23#define MMU_ITLB_ADDRESS_ARRAY2 0xF2800000
22#define MMU_UTLB_ADDRESS_ARRAY 0xF6000000 24#define MMU_UTLB_ADDRESS_ARRAY 0xF6000000
23#define MMU_UTLB_ADDRESS_ARRAY2 0xF6800000 25#define MMU_UTLB_ADDRESS_ARRAY2 0xF6800000
24#define MMU_PAGE_ASSOC_BIT 0x80 26#define MMU_PAGE_ASSOC_BIT 0x80
diff --git a/arch/sh/include/cpu-sh4/cpu/watchdog.h b/arch/sh/include/cpu-sh4/cpu/watchdog.h
index 7672301d0c70..7f62b9380938 100644
--- a/arch/sh/include/cpu-sh4/cpu/watchdog.h
+++ b/arch/sh/include/cpu-sh4/cpu/watchdog.h
@@ -21,6 +21,12 @@
21#define WTCNT 0xffcc0000 /*WDTST*/ 21#define WTCNT 0xffcc0000 /*WDTST*/
22#define WTST WTCNT 22#define WTST WTCNT
23#define WTBST 0xffcc0008 /*WDTBST*/ 23#define WTBST 0xffcc0008 /*WDTBST*/
24/* Register definitions */
25#elif defined(CONFIG_CPU_SUBTYPE_SH7722) || \
26 defined(CONFIG_CPU_SUBTYPE_SH7723) || \
27 defined(CONFIG_CPU_SUBTYPE_SH7724)
28#define WTCNT 0xa4520000
29#define WTCSR 0xa4520004
24#else 30#else
25/* Register definitions */ 31/* Register definitions */
26#define WTCNT 0xffc00008 32#define WTCNT 0xffc00008
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index bd1c497280a6..94739ee7aa74 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -727,7 +727,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
727 unsigned char *end, struct module *mod) 727 unsigned char *end, struct module *mod)
728{ 728{
729 struct rb_node **rb_node = &cie_root.rb_node; 729 struct rb_node **rb_node = &cie_root.rb_node;
730 struct rb_node *parent; 730 struct rb_node *parent = *rb_node;
731 struct dwarf_cie *cie; 731 struct dwarf_cie *cie;
732 unsigned long flags; 732 unsigned long flags;
733 int count; 733 int count;
@@ -856,7 +856,7 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
856 unsigned char *end, struct module *mod) 856 unsigned char *end, struct module *mod)
857{ 857{
858 struct rb_node **rb_node = &fde_root.rb_node; 858 struct rb_node **rb_node = &fde_root.rb_node;
859 struct rb_node *parent; 859 struct rb_node *parent = *rb_node;
860 struct dwarf_fde *fde; 860 struct dwarf_fde *fde;
861 struct dwarf_cie *cie; 861 struct dwarf_cie *cie;
862 unsigned long flags; 862 unsigned long flags;
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 0fd7b41f0a22..273f890b17ae 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -112,7 +112,7 @@ void cpu_idle(void)
112 } 112 }
113} 113}
114 114
115void __cpuinit select_idle_routine(void) 115void __init select_idle_routine(void)
116{ 116{
117 /* 117 /*
118 * If a platform has set its own idle routine, leave it alone. 118 * If a platform has set its own idle routine, leave it alone.
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
index 9f253e9cce01..81b6de41ae5d 100644
--- a/arch/sh/kernel/perf_event.c
+++ b/arch/sh/kernel/perf_event.c
@@ -315,7 +315,7 @@ void hw_perf_disable(void)
315 sh_pmu->disable_all(); 315 sh_pmu->disable_all();
316} 316}
317 317
318int register_sh_pmu(struct sh_pmu *pmu) 318int __cpuinit register_sh_pmu(struct sh_pmu *pmu)
319{ 319{
320 if (sh_pmu) 320 if (sh_pmu)
321 return -EBUSY; 321 return -EBUSY;
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index c90957a459ac..c0d40f671ecd 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -504,13 +504,6 @@ out:
504 return error; 504 return error;
505} 505}
506 506
507/*
508 * These bracket the sleeping functions..
509 */
510extern void interruptible_sleep_on(wait_queue_head_t *q);
511
512#define mid_sched ((unsigned long) interruptible_sleep_on)
513
514#ifdef CONFIG_FRAME_POINTER 507#ifdef CONFIG_FRAME_POINTER
515static int in_sh64_switch_to(unsigned long pc) 508static int in_sh64_switch_to(unsigned long pc)
516{ 509{
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index a4662e2782c3..3cc21933063b 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -323,6 +323,7 @@ static void __clear_pmb_entry(struct pmb_entry *pmbe)
323 writel_uncached(data_val & ~PMB_V, data); 323 writel_uncached(data_val & ~PMB_V, data);
324} 324}
325 325
326#ifdef CONFIG_PM
326static void set_pmb_entry(struct pmb_entry *pmbe) 327static void set_pmb_entry(struct pmb_entry *pmbe)
327{ 328{
328 unsigned long flags; 329 unsigned long flags;
@@ -331,6 +332,7 @@ static void set_pmb_entry(struct pmb_entry *pmbe)
331 __set_pmb_entry(pmbe); 332 __set_pmb_entry(pmbe);
332 spin_unlock_irqrestore(&pmbe->lock, flags); 333 spin_unlock_irqrestore(&pmbe->lock, flags);
333} 334}
335#endif /* CONFIG_PM */
334 336
335int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, 337int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
336 unsigned long size, pgprot_t prot) 338 unsigned long size, pgprot_t prot)
@@ -802,7 +804,7 @@ void __init pmb_init(void)
802 writel_uncached(0, PMB_IRMCR); 804 writel_uncached(0, PMB_IRMCR);
803 805
804 /* Flush out the TLB */ 806 /* Flush out the TLB */
805 __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR); 807 local_flush_tlb_all();
806 ctrl_barrier(); 808 ctrl_barrier();
807} 809}
808 810
diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c
index 32dc674c550c..bdd0982b56ee 100644
--- a/arch/sh/mm/tlb-pteaex.c
+++ b/arch/sh/mm/tlb-pteaex.c
@@ -73,5 +73,7 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
73 jump_to_uncached(); 73 jump_to_uncached();
74 __raw_writel(page, MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT); 74 __raw_writel(page, MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT);
75 __raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT); 75 __raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
76 __raw_writel(page, MMU_ITLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT);
77 __raw_writel(asid, MMU_ITLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
76 back_to_cached(); 78 back_to_cached();
77} 79}
diff --git a/arch/sh/mm/tlbflush_32.c b/arch/sh/mm/tlbflush_32.c
index 004bb3f25b5f..77dc5efa7127 100644
--- a/arch/sh/mm/tlbflush_32.c
+++ b/arch/sh/mm/tlbflush_32.c
@@ -123,18 +123,27 @@ void local_flush_tlb_mm(struct mm_struct *mm)
123void local_flush_tlb_all(void) 123void local_flush_tlb_all(void)
124{ 124{
125 unsigned long flags, status; 125 unsigned long flags, status;
126 int i;
126 127
127 /* 128 /*
128 * Flush all the TLB. 129 * Flush all the TLB.
129 *
130 * Write to the MMU control register's bit:
131 * TF-bit for SH-3, TI-bit for SH-4.
132 * It's same position, bit #2.
133 */ 130 */
134 local_irq_save(flags); 131 local_irq_save(flags);
132 jump_to_uncached();
133
135 status = __raw_readl(MMUCR); 134 status = __raw_readl(MMUCR);
136 status |= 0x04; 135 status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
137 __raw_writel(status, MMUCR); 136
137 if (status == 0)
138 status = MMUCR_URB_NENTRIES;
139
140 for (i = 0; i < status; i++)
141 __raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
142
143 for (i = 0; i < 4; i++)
144 __raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
145
146 back_to_cached();
138 ctrl_barrier(); 147 ctrl_barrier();
139 local_irq_restore(flags); 148 local_irq_restore(flags);
140} 149}
diff --git a/arch/sparc/include/asm/stat.h b/arch/sparc/include/asm/stat.h
index 39327d6a57eb..a232e9e1f4e5 100644
--- a/arch/sparc/include/asm/stat.h
+++ b/arch/sparc/include/asm/stat.h
@@ -53,8 +53,8 @@ struct stat {
53 ino_t st_ino; 53 ino_t st_ino;
54 mode_t st_mode; 54 mode_t st_mode;
55 short st_nlink; 55 short st_nlink;
56 uid16_t st_uid; 56 unsigned short st_uid;
57 gid16_t st_gid; 57 unsigned short st_gid;
58 unsigned short st_rdev; 58 unsigned short st_rdev;
59 off_t st_size; 59 off_t st_size;
60 time_t st_atime; 60 time_t st_atime;
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 68cb9b42088f..e2771939341d 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1337,7 +1337,7 @@ static void perf_callchain_user_32(struct pt_regs *regs,
1337 callchain_store(entry, PERF_CONTEXT_USER); 1337 callchain_store(entry, PERF_CONTEXT_USER);
1338 callchain_store(entry, regs->tpc); 1338 callchain_store(entry, regs->tpc);
1339 1339
1340 ufp = regs->u_regs[UREG_I6]; 1340 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
1341 do { 1341 do {
1342 struct sparc_stackf32 *usf, sf; 1342 struct sparc_stackf32 *usf, sf;
1343 unsigned long pc; 1343 unsigned long pc;
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index ca39c606fe8e..1eb8b00aed75 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -107,12 +107,12 @@ static unsigned long run_on_cpu(unsigned long cpu,
107 unsigned long ret; 107 unsigned long ret;
108 108
109 /* should return -EINVAL to userspace */ 109 /* should return -EINVAL to userspace */
110 if (set_cpus_allowed(current, cpumask_of_cpu(cpu))) 110 if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
111 return 0; 111 return 0;
112 112
113 ret = func(arg); 113 ret = func(arg);
114 114
115 set_cpus_allowed(current, old_affinity); 115 set_cpus_allowed_ptr(current, &old_affinity);
116 116
117 return ret; 117 return ret;
118} 118}
diff --git a/arch/sparc/kernel/us2e_cpufreq.c b/arch/sparc/kernel/us2e_cpufreq.c
index 791c15138f3a..8f982b76c712 100644
--- a/arch/sparc/kernel/us2e_cpufreq.c
+++ b/arch/sparc/kernel/us2e_cpufreq.c
@@ -238,12 +238,12 @@ static unsigned int us2e_freq_get(unsigned int cpu)
238 return 0; 238 return 0;
239 239
240 cpus_allowed = current->cpus_allowed; 240 cpus_allowed = current->cpus_allowed;
241 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 241 set_cpus_allowed_ptr(current, cpumask_of(cpu));
242 242
243 clock_tick = sparc64_get_clock_tick(cpu) / 1000; 243 clock_tick = sparc64_get_clock_tick(cpu) / 1000;
244 estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR); 244 estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
245 245
246 set_cpus_allowed(current, cpus_allowed); 246 set_cpus_allowed_ptr(current, &cpus_allowed);
247 247
248 return clock_tick / estar_to_divisor(estar); 248 return clock_tick / estar_to_divisor(estar);
249} 249}
@@ -259,7 +259,7 @@ static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index)
259 return; 259 return;
260 260
261 cpus_allowed = current->cpus_allowed; 261 cpus_allowed = current->cpus_allowed;
262 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 262 set_cpus_allowed_ptr(current, cpumask_of(cpu));
263 263
264 new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000; 264 new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
265 new_bits = index_to_estar_mode(index); 265 new_bits = index_to_estar_mode(index);
@@ -281,7 +281,7 @@ static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index)
281 281
282 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 282 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
283 283
284 set_cpus_allowed(current, cpus_allowed); 284 set_cpus_allowed_ptr(current, &cpus_allowed);
285} 285}
286 286
287static int us2e_freq_target(struct cpufreq_policy *policy, 287static int us2e_freq_target(struct cpufreq_policy *policy,
diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c
index 365b6464e2ce..f35d1e794548 100644
--- a/arch/sparc/kernel/us3_cpufreq.c
+++ b/arch/sparc/kernel/us3_cpufreq.c
@@ -86,12 +86,12 @@ static unsigned int us3_freq_get(unsigned int cpu)
86 return 0; 86 return 0;
87 87
88 cpus_allowed = current->cpus_allowed; 88 cpus_allowed = current->cpus_allowed;
89 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 89 set_cpus_allowed_ptr(current, cpumask_of(cpu));
90 90
91 reg = read_safari_cfg(); 91 reg = read_safari_cfg();
92 ret = get_current_freq(cpu, reg); 92 ret = get_current_freq(cpu, reg);
93 93
94 set_cpus_allowed(current, cpus_allowed); 94 set_cpus_allowed_ptr(current, &cpus_allowed);
95 95
96 return ret; 96 return ret;
97} 97}
@@ -106,7 +106,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
106 return; 106 return;
107 107
108 cpus_allowed = current->cpus_allowed; 108 cpus_allowed = current->cpus_allowed;
109 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 109 set_cpus_allowed_ptr(current, cpumask_of(cpu));
110 110
111 new_freq = sparc64_get_clock_tick(cpu) / 1000; 111 new_freq = sparc64_get_clock_tick(cpu) / 1000;
112 switch (index) { 112 switch (index) {
@@ -140,7 +140,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
140 140
141 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 141 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
142 142
143 set_cpus_allowed(current, cpus_allowed); 143 set_cpus_allowed_ptr(current, &cpus_allowed);
144} 144}
145 145
146static int us3_freq_target(struct cpufreq_policy *policy, 146static int us3_freq_target(struct cpufreq_policy *policy,
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 635f03bb4995..d07b44f7d1dc 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -82,6 +82,9 @@ enum fixed_addresses {
82#endif 82#endif
83 FIX_DBGP_BASE, 83 FIX_DBGP_BASE,
84 FIX_EARLYCON_MEM_BASE, 84 FIX_EARLYCON_MEM_BASE,
85#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
86 FIX_OHCI1394_BASE,
87#endif
85#ifdef CONFIG_X86_LOCAL_APIC 88#ifdef CONFIG_X86_LOCAL_APIC
86 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ 89 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
87#endif 90#endif
@@ -132,9 +135,6 @@ enum fixed_addresses {
132 (__end_of_permanent_fixed_addresses & (TOTAL_FIX_BTMAPS - 1)) 135 (__end_of_permanent_fixed_addresses & (TOTAL_FIX_BTMAPS - 1))
133 : __end_of_permanent_fixed_addresses, 136 : __end_of_permanent_fixed_addresses,
134 FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, 137 FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
135#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
136 FIX_OHCI1394_BASE,
137#endif
138#ifdef CONFIG_X86_32 138#ifdef CONFIG_X86_32
139 FIX_WP_TEST, 139 FIX_WP_TEST,
140#endif 140#endif
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index a929c9ede33d..46c0fe05f230 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -133,6 +133,7 @@ extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void);
133 133
134typedef int vector_irq_t[NR_VECTORS]; 134typedef int vector_irq_t[NR_VECTORS];
135DECLARE_PER_CPU(vector_irq_t, vector_irq); 135DECLARE_PER_CPU(vector_irq_t, vector_irq);
136extern void setup_vector_irq(int cpu);
136 137
137#ifdef CONFIG_X86_IO_APIC 138#ifdef CONFIG_X86_IO_APIC
138extern void lock_vector_lock(void); 139extern void lock_vector_lock(void);
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 06e4cf0d3846..bc473acfa7f9 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -108,6 +108,8 @@
108#define MSR_AMD64_PATCH_LEVEL 0x0000008b 108#define MSR_AMD64_PATCH_LEVEL 0x0000008b
109#define MSR_AMD64_NB_CFG 0xc001001f 109#define MSR_AMD64_NB_CFG 0xc001001f
110#define MSR_AMD64_PATCH_LOADER 0xc0010020 110#define MSR_AMD64_PATCH_LOADER 0xc0010020
111#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
112#define MSR_AMD64_OSVW_STATUS 0xc0010141
111#define MSR_AMD64_IBSFETCHCTL 0xc0011030 113#define MSR_AMD64_IBSFETCHCTL 0xc0011030
112#define MSR_AMD64_IBSFETCHLINAD 0xc0011031 114#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
113#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 115#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index e4e0ddcb1546..463de9a858ad 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1268,6 +1268,14 @@ void __setup_vector_irq(int cpu)
1268 /* Mark the inuse vectors */ 1268 /* Mark the inuse vectors */
1269 for_each_irq_desc(irq, desc) { 1269 for_each_irq_desc(irq, desc) {
1270 cfg = desc->chip_data; 1270 cfg = desc->chip_data;
1271
1272 /*
1273 * If it is a legacy IRQ handled by the legacy PIC, this cpu
1274 * will be part of the irq_cfg's domain.
1275 */
1276 if (irq < legacy_pic->nr_legacy_irqs && !IO_APIC_IRQ(irq))
1277 cpumask_set_cpu(cpu, cfg->domain);
1278
1271 if (!cpumask_test_cpu(cpu, cfg->domain)) 1279 if (!cpumask_test_cpu(cpu, cfg->domain))
1272 continue; 1280 continue;
1273 vector = cfg->vector; 1281 vector = cfg->vector;
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 6f66d4a845ff..b53435661813 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -28,6 +28,7 @@
28#include <asm/apic.h> 28#include <asm/apic.h>
29#include <asm/stacktrace.h> 29#include <asm/stacktrace.h>
30#include <asm/nmi.h> 30#include <asm/nmi.h>
31#include <asm/compat.h>
31 32
32#if 0 33#if 0
33#undef wrmsrl 34#undef wrmsrl
@@ -209,7 +210,7 @@ struct x86_pmu {
209 struct event_constraint *event_constraints; 210 struct event_constraint *event_constraints;
210 void (*quirks)(void); 211 void (*quirks)(void);
211 212
212 void (*cpu_prepare)(int cpu); 213 int (*cpu_prepare)(int cpu);
213 void (*cpu_starting)(int cpu); 214 void (*cpu_starting)(int cpu);
214 void (*cpu_dying)(int cpu); 215 void (*cpu_dying)(int cpu);
215 void (*cpu_dead)(int cpu); 216 void (*cpu_dead)(int cpu);
@@ -1330,11 +1331,12 @@ static int __cpuinit
1330x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 1331x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1331{ 1332{
1332 unsigned int cpu = (long)hcpu; 1333 unsigned int cpu = (long)hcpu;
1334 int ret = NOTIFY_OK;
1333 1335
1334 switch (action & ~CPU_TASKS_FROZEN) { 1336 switch (action & ~CPU_TASKS_FROZEN) {
1335 case CPU_UP_PREPARE: 1337 case CPU_UP_PREPARE:
1336 if (x86_pmu.cpu_prepare) 1338 if (x86_pmu.cpu_prepare)
1337 x86_pmu.cpu_prepare(cpu); 1339 ret = x86_pmu.cpu_prepare(cpu);
1338 break; 1340 break;
1339 1341
1340 case CPU_STARTING: 1342 case CPU_STARTING:
@@ -1347,6 +1349,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1347 x86_pmu.cpu_dying(cpu); 1349 x86_pmu.cpu_dying(cpu);
1348 break; 1350 break;
1349 1351
1352 case CPU_UP_CANCELED:
1350 case CPU_DEAD: 1353 case CPU_DEAD:
1351 if (x86_pmu.cpu_dead) 1354 if (x86_pmu.cpu_dead)
1352 x86_pmu.cpu_dead(cpu); 1355 x86_pmu.cpu_dead(cpu);
@@ -1356,7 +1359,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1356 break; 1359 break;
1357 } 1360 }
1358 1361
1359 return NOTIFY_OK; 1362 return ret;
1360} 1363}
1361 1364
1362static void __init pmu_check_apic(void) 1365static void __init pmu_check_apic(void)
@@ -1620,14 +1623,42 @@ perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
1620 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry); 1623 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
1621} 1624}
1622 1625
1623static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) 1626#ifdef CONFIG_COMPAT
1627static inline int
1628perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1624{ 1629{
1625 unsigned long bytes; 1630 /* 32-bit process in 64-bit kernel. */
1631 struct stack_frame_ia32 frame;
1632 const void __user *fp;
1626 1633
1627 bytes = copy_from_user_nmi(frame, fp, sizeof(*frame)); 1634 if (!test_thread_flag(TIF_IA32))
1635 return 0;
1636
1637 fp = compat_ptr(regs->bp);
1638 while (entry->nr < PERF_MAX_STACK_DEPTH) {
1639 unsigned long bytes;
1640 frame.next_frame = 0;
1641 frame.return_address = 0;
1642
1643 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1644 if (bytes != sizeof(frame))
1645 break;
1646
1647 if (fp < compat_ptr(regs->sp))
1648 break;
1628 1649
1629 return bytes == sizeof(*frame); 1650 callchain_store(entry, frame.return_address);
1651 fp = compat_ptr(frame.next_frame);
1652 }
1653 return 1;
1630} 1654}
1655#else
1656static inline int
1657perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1658{
1659 return 0;
1660}
1661#endif
1631 1662
1632static void 1663static void
1633perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) 1664perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
@@ -1643,11 +1674,16 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1643 callchain_store(entry, PERF_CONTEXT_USER); 1674 callchain_store(entry, PERF_CONTEXT_USER);
1644 callchain_store(entry, regs->ip); 1675 callchain_store(entry, regs->ip);
1645 1676
1677 if (perf_callchain_user32(regs, entry))
1678 return;
1679
1646 while (entry->nr < PERF_MAX_STACK_DEPTH) { 1680 while (entry->nr < PERF_MAX_STACK_DEPTH) {
1681 unsigned long bytes;
1647 frame.next_frame = NULL; 1682 frame.next_frame = NULL;
1648 frame.return_address = 0; 1683 frame.return_address = 0;
1649 1684
1650 if (!copy_stack_frame(fp, &frame)) 1685 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1686 if (bytes != sizeof(frame))
1651 break; 1687 break;
1652 1688
1653 if ((unsigned long)fp < regs->sp) 1689 if ((unsigned long)fp < regs->sp)
@@ -1694,7 +1730,6 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1694 return entry; 1730 return entry;
1695} 1731}
1696 1732
1697#ifdef CONFIG_EVENT_TRACING
1698void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) 1733void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
1699{ 1734{
1700 regs->ip = ip; 1735 regs->ip = ip;
@@ -1706,4 +1741,3 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski
1706 regs->cs = __KERNEL_CS; 1741 regs->cs = __KERNEL_CS;
1707 local_save_flags(regs->flags); 1742 local_save_flags(regs->flags);
1708} 1743}
1709#endif
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 358a8e3d05f8..285623bc3cc8 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -137,6 +137,13 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc)
137 return (hwc->config & 0xe0) == 0xe0; 137 return (hwc->config & 0xe0) == 0xe0;
138} 138}
139 139
140static inline int amd_has_nb(struct cpu_hw_events *cpuc)
141{
142 struct amd_nb *nb = cpuc->amd_nb;
143
144 return nb && nb->nb_id != -1;
145}
146
140static void amd_put_event_constraints(struct cpu_hw_events *cpuc, 147static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
141 struct perf_event *event) 148 struct perf_event *event)
142{ 149{
@@ -147,7 +154,7 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
147 /* 154 /*
148 * only care about NB events 155 * only care about NB events
149 */ 156 */
150 if (!(nb && amd_is_nb_event(hwc))) 157 if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
151 return; 158 return;
152 159
153 /* 160 /*
@@ -214,7 +221,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
214 /* 221 /*
215 * if not NB event or no NB, then no constraints 222 * if not NB event or no NB, then no constraints
216 */ 223 */
217 if (!(nb && amd_is_nb_event(hwc))) 224 if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
218 return &unconstrained; 225 return &unconstrained;
219 226
220 /* 227 /*
@@ -293,51 +300,55 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
293 return nb; 300 return nb;
294} 301}
295 302
296static void amd_pmu_cpu_online(int cpu) 303static int amd_pmu_cpu_prepare(int cpu)
304{
305 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
306
307 WARN_ON_ONCE(cpuc->amd_nb);
308
309 if (boot_cpu_data.x86_max_cores < 2)
310 return NOTIFY_OK;
311
312 cpuc->amd_nb = amd_alloc_nb(cpu, -1);
313 if (!cpuc->amd_nb)
314 return NOTIFY_BAD;
315
316 return NOTIFY_OK;
317}
318
319static void amd_pmu_cpu_starting(int cpu)
297{ 320{
298 struct cpu_hw_events *cpu1, *cpu2; 321 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
299 struct amd_nb *nb = NULL; 322 struct amd_nb *nb;
300 int i, nb_id; 323 int i, nb_id;
301 324
302 if (boot_cpu_data.x86_max_cores < 2) 325 if (boot_cpu_data.x86_max_cores < 2)
303 return; 326 return;
304 327
305 /*
306 * function may be called too early in the
307 * boot process, in which case nb_id is bogus
308 */
309 nb_id = amd_get_nb_id(cpu); 328 nb_id = amd_get_nb_id(cpu);
310 if (nb_id == BAD_APICID) 329 WARN_ON_ONCE(nb_id == BAD_APICID);
311 return;
312
313 cpu1 = &per_cpu(cpu_hw_events, cpu);
314 cpu1->amd_nb = NULL;
315 330
316 raw_spin_lock(&amd_nb_lock); 331 raw_spin_lock(&amd_nb_lock);
317 332
318 for_each_online_cpu(i) { 333 for_each_online_cpu(i) {
319 cpu2 = &per_cpu(cpu_hw_events, i); 334 nb = per_cpu(cpu_hw_events, i).amd_nb;
320 nb = cpu2->amd_nb; 335 if (WARN_ON_ONCE(!nb))
321 if (!nb)
322 continue; 336 continue;
323 if (nb->nb_id == nb_id)
324 goto found;
325 }
326 337
327 nb = amd_alloc_nb(cpu, nb_id); 338 if (nb->nb_id == nb_id) {
328 if (!nb) { 339 kfree(cpuc->amd_nb);
329 pr_err("perf_events: failed NB allocation for CPU%d\n", cpu); 340 cpuc->amd_nb = nb;
330 raw_spin_unlock(&amd_nb_lock); 341 break;
331 return; 342 }
332 } 343 }
333found: 344
334 nb->refcnt++; 345 cpuc->amd_nb->nb_id = nb_id;
335 cpu1->amd_nb = nb; 346 cpuc->amd_nb->refcnt++;
336 347
337 raw_spin_unlock(&amd_nb_lock); 348 raw_spin_unlock(&amd_nb_lock);
338} 349}
339 350
340static void amd_pmu_cpu_offline(int cpu) 351static void amd_pmu_cpu_dead(int cpu)
341{ 352{
342 struct cpu_hw_events *cpuhw; 353 struct cpu_hw_events *cpuhw;
343 354
@@ -348,10 +359,14 @@ static void amd_pmu_cpu_offline(int cpu)
348 359
349 raw_spin_lock(&amd_nb_lock); 360 raw_spin_lock(&amd_nb_lock);
350 361
351 if (--cpuhw->amd_nb->refcnt == 0) 362 if (cpuhw->amd_nb) {
352 kfree(cpuhw->amd_nb); 363 struct amd_nb *nb = cpuhw->amd_nb;
364
365 if (nb->nb_id == -1 || --nb->refcnt == 0)
366 kfree(nb);
353 367
354 cpuhw->amd_nb = NULL; 368 cpuhw->amd_nb = NULL;
369 }
355 370
356 raw_spin_unlock(&amd_nb_lock); 371 raw_spin_unlock(&amd_nb_lock);
357} 372}
@@ -379,8 +394,9 @@ static __initconst struct x86_pmu amd_pmu = {
379 .get_event_constraints = amd_get_event_constraints, 394 .get_event_constraints = amd_get_event_constraints,
380 .put_event_constraints = amd_put_event_constraints, 395 .put_event_constraints = amd_put_event_constraints,
381 396
382 .cpu_prepare = amd_pmu_cpu_online, 397 .cpu_prepare = amd_pmu_cpu_prepare,
383 .cpu_dead = amd_pmu_cpu_offline, 398 .cpu_starting = amd_pmu_cpu_starting,
399 .cpu_dead = amd_pmu_cpu_dead,
384}; 400};
385 401
386static __init int amd_pmu_init(void) 402static __init int amd_pmu_init(void)
diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
index 29e5f7c845b2..e39e77168a37 100644
--- a/arch/x86/kernel/dumpstack.h
+++ b/arch/x86/kernel/dumpstack.h
@@ -30,6 +30,11 @@ struct stack_frame {
30 unsigned long return_address; 30 unsigned long return_address;
31}; 31};
32 32
33struct stack_frame_ia32 {
34 u32 next_frame;
35 u32 return_address;
36};
37
33static inline unsigned long rewind_frame_pointer(int n) 38static inline unsigned long rewind_frame_pointer(int n)
34{ 39{
35 struct stack_frame *frame; 40 struct stack_frame *frame;
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index adedeef1dedc..b2e246037392 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -7,6 +7,7 @@
7 7
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/start_kernel.h> 9#include <linux/start_kernel.h>
10#include <linux/mm.h>
10 11
11#include <asm/setup.h> 12#include <asm/setup.h>
12#include <asm/sections.h> 13#include <asm/sections.h>
@@ -44,9 +45,10 @@ void __init i386_start_kernel(void)
44#ifdef CONFIG_BLK_DEV_INITRD 45#ifdef CONFIG_BLK_DEV_INITRD
45 /* Reserve INITRD */ 46 /* Reserve INITRD */
46 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { 47 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
48 /* Assume only end is not page aligned */
47 u64 ramdisk_image = boot_params.hdr.ramdisk_image; 49 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
48 u64 ramdisk_size = boot_params.hdr.ramdisk_size; 50 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
49 u64 ramdisk_end = ramdisk_image + ramdisk_size; 51 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
50 reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); 52 reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
51 } 53 }
52#endif 54#endif
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index b5a9896ca1e7..7147143fd614 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -103,9 +103,10 @@ void __init x86_64_start_reservations(char *real_mode_data)
103#ifdef CONFIG_BLK_DEV_INITRD 103#ifdef CONFIG_BLK_DEV_INITRD
104 /* Reserve INITRD */ 104 /* Reserve INITRD */
105 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { 105 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
106 /* Assume only end is not page aligned */
106 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; 107 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
107 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; 108 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
108 unsigned long ramdisk_end = ramdisk_image + ramdisk_size; 109 unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
109 reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); 110 reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
110 } 111 }
111#endif 112#endif
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index ef257fc2921b..f01d390f9c5b 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -141,6 +141,28 @@ void __init init_IRQ(void)
141 x86_init.irqs.intr_init(); 141 x86_init.irqs.intr_init();
142} 142}
143 143
144/*
145 * Setup the vector to irq mappings.
146 */
147void setup_vector_irq(int cpu)
148{
149#ifndef CONFIG_X86_IO_APIC
150 int irq;
151
152 /*
153 * On most of the platforms, legacy PIC delivers the interrupts on the
154 * boot cpu. But there are certain platforms where PIC interrupts are
155 * delivered to multiple cpu's. If the legacy IRQ is handled by the
156 * legacy PIC, for the new cpu that is coming online, setup the static
157 * legacy vector to irq mapping:
158 */
159 for (irq = 0; irq < legacy_pic->nr_legacy_irqs; irq++)
160 per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq;
161#endif
162
163 __setup_vector_irq(cpu);
164}
165
144static void __init smp_intr_init(void) 166static void __init smp_intr_init(void)
145{ 167{
146#ifdef CONFIG_SMP 168#ifdef CONFIG_SMP
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index bfba6019d762..b2258ca91003 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -618,8 +618,8 @@ int kgdb_arch_init(void)
618 * portion of kgdb because this operation requires mutexs to 618 * portion of kgdb because this operation requires mutexs to
619 * complete. 619 * complete.
620 */ 620 */
621 hw_breakpoint_init(&attr);
621 attr.bp_addr = (unsigned long)kgdb_arch_init; 622 attr.bp_addr = (unsigned long)kgdb_arch_init;
622 attr.type = PERF_TYPE_BREAKPOINT;
623 attr.bp_len = HW_BREAKPOINT_LEN_1; 623 attr.bp_len = HW_BREAKPOINT_LEN_1;
624 attr.bp_type = HW_BREAKPOINT_W; 624 attr.bp_type = HW_BREAKPOINT_W;
625 attr.disabled = 1; 625 attr.disabled = 1;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 8328009416d7..eccdb57094e3 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -528,21 +528,37 @@ static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
528} 528}
529 529
530/* 530/*
531 * Check for AMD CPUs, which have potentially C1E support 531 * Check for AMD CPUs, where APIC timer interrupt does not wake up CPU from C1e.
532 * For more information see
533 * - Erratum #400 for NPT family 0xf and family 0x10 CPUs
534 * - Erratum #365 for family 0x11 (not affected because C1e not in use)
532 */ 535 */
533static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) 536static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
534{ 537{
538 u64 val;
535 if (c->x86_vendor != X86_VENDOR_AMD) 539 if (c->x86_vendor != X86_VENDOR_AMD)
536 return 0; 540 goto no_c1e_idle;
537
538 if (c->x86 < 0x0F)
539 return 0;
540 541
541 /* Family 0x0f models < rev F do not have C1E */ 542 /* Family 0x0f models < rev F do not have C1E */
542 if (c->x86 == 0x0f && c->x86_model < 0x40) 543 if (c->x86 == 0x0F && c->x86_model >= 0x40)
543 return 0; 544 return 1;
544 545
545 return 1; 546 if (c->x86 == 0x10) {
547 /*
548 * check OSVW bit for CPUs that are not affected
549 * by erratum #400
550 */
551 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
552 if (val >= 2) {
553 rdmsrl(MSR_AMD64_OSVW_STATUS, val);
554 if (!(val & BIT(1)))
555 goto no_c1e_idle;
556 }
557 return 1;
558 }
559
560no_c1e_idle:
561 return 0;
546} 562}
547 563
548static cpumask_var_t c1e_mask; 564static cpumask_var_t c1e_mask;
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 5d7ba1a449bd..d76e18570c60 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -314,16 +314,17 @@ static void __init reserve_brk(void)
314#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) 314#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT)
315static void __init relocate_initrd(void) 315static void __init relocate_initrd(void)
316{ 316{
317 317 /* Assume only end is not page aligned */
318 u64 ramdisk_image = boot_params.hdr.ramdisk_image; 318 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
319 u64 ramdisk_size = boot_params.hdr.ramdisk_size; 319 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
320 u64 area_size = PAGE_ALIGN(ramdisk_size);
320 u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT; 321 u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
321 u64 ramdisk_here; 322 u64 ramdisk_here;
322 unsigned long slop, clen, mapaddr; 323 unsigned long slop, clen, mapaddr;
323 char *p, *q; 324 char *p, *q;
324 325
325 /* We need to move the initrd down into lowmem */ 326 /* We need to move the initrd down into lowmem */
326 ramdisk_here = find_e820_area(0, end_of_lowmem, ramdisk_size, 327 ramdisk_here = find_e820_area(0, end_of_lowmem, area_size,
327 PAGE_SIZE); 328 PAGE_SIZE);
328 329
329 if (ramdisk_here == -1ULL) 330 if (ramdisk_here == -1ULL)
@@ -332,7 +333,7 @@ static void __init relocate_initrd(void)
332 333
333 /* Note: this includes all the lowmem currently occupied by 334 /* Note: this includes all the lowmem currently occupied by
334 the initrd, we rely on that fact to keep the data intact. */ 335 the initrd, we rely on that fact to keep the data intact. */
335 reserve_early(ramdisk_here, ramdisk_here + ramdisk_size, 336 reserve_early(ramdisk_here, ramdisk_here + area_size,
336 "NEW RAMDISK"); 337 "NEW RAMDISK");
337 initrd_start = ramdisk_here + PAGE_OFFSET; 338 initrd_start = ramdisk_here + PAGE_OFFSET;
338 initrd_end = initrd_start + ramdisk_size; 339 initrd_end = initrd_start + ramdisk_size;
@@ -376,9 +377,10 @@ static void __init relocate_initrd(void)
376 377
377static void __init reserve_initrd(void) 378static void __init reserve_initrd(void)
378{ 379{
380 /* Assume only end is not page aligned */
379 u64 ramdisk_image = boot_params.hdr.ramdisk_image; 381 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
380 u64 ramdisk_size = boot_params.hdr.ramdisk_size; 382 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
381 u64 ramdisk_end = ramdisk_image + ramdisk_size; 383 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
382 u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT; 384 u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
383 385
384 if (!boot_params.hdr.type_of_loader || 386 if (!boot_params.hdr.type_of_loader ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index a02e80c3c54b..6808b934d6c0 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -242,12 +242,10 @@ static void __cpuinit smp_callin(void)
242 end_local_APIC_setup(); 242 end_local_APIC_setup();
243 map_cpu_to_logical_apicid(); 243 map_cpu_to_logical_apicid();
244 244
245 notify_cpu_starting(cpuid);
246
247 /* 245 /*
248 * Need to setup vector mappings before we enable interrupts. 246 * Need to setup vector mappings before we enable interrupts.
249 */ 247 */
250 __setup_vector_irq(smp_processor_id()); 248 setup_vector_irq(smp_processor_id());
251 /* 249 /*
252 * Get our bogomips. 250 * Get our bogomips.
253 * 251 *
@@ -264,6 +262,8 @@ static void __cpuinit smp_callin(void)
264 */ 262 */
265 smp_store_cpu_info(cpuid); 263 smp_store_cpu_info(cpuid);
266 264
265 notify_cpu_starting(cpuid);
266
267 /* 267 /*
268 * Allow the master to continue. 268 * Allow the master to continue.
269 */ 269 */
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 44879df55696..2cc249718c46 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -291,8 +291,8 @@ SECTIONS
291 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { 291 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
292 __smp_locks = .; 292 __smp_locks = .;
293 *(.smp_locks) 293 *(.smp_locks)
294 __smp_locks_end = .;
295 . = ALIGN(PAGE_SIZE); 294 . = ALIGN(PAGE_SIZE);
295 __smp_locks_end = .;
296 } 296 }
297 297
298#ifdef CONFIG_X86_64 298#ifdef CONFIG_X86_64
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index e71c5cbc8f35..452ee5b8f309 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -331,11 +331,23 @@ int devmem_is_allowed(unsigned long pagenr)
331 331
332void free_init_pages(char *what, unsigned long begin, unsigned long end) 332void free_init_pages(char *what, unsigned long begin, unsigned long end)
333{ 333{
334 unsigned long addr = begin; 334 unsigned long addr;
335 unsigned long begin_aligned, end_aligned;
335 336
336 if (addr >= end) 337 /* Make sure boundaries are page aligned */
338 begin_aligned = PAGE_ALIGN(begin);
339 end_aligned = end & PAGE_MASK;
340
341 if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
342 begin = begin_aligned;
343 end = end_aligned;
344 }
345
346 if (begin >= end)
337 return; 347 return;
338 348
349 addr = begin;
350
339 /* 351 /*
340 * If debugging page accesses then do not free this memory but 352 * If debugging page accesses then do not free this memory but
341 * mark them not present - any buggy init-section access will 353 * mark them not present - any buggy init-section access will
@@ -343,7 +355,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
343 */ 355 */
344#ifdef CONFIG_DEBUG_PAGEALLOC 356#ifdef CONFIG_DEBUG_PAGEALLOC
345 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", 357 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
346 begin, PAGE_ALIGN(end)); 358 begin, end);
347 set_memory_np(begin, (end - begin) >> PAGE_SHIFT); 359 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
348#else 360#else
349 /* 361 /*
@@ -358,8 +370,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
358 for (; addr < end; addr += PAGE_SIZE) { 370 for (; addr < end; addr += PAGE_SIZE) {
359 ClearPageReserved(virt_to_page(addr)); 371 ClearPageReserved(virt_to_page(addr));
360 init_page_count(virt_to_page(addr)); 372 init_page_count(virt_to_page(addr));
361 memset((void *)(addr & ~(PAGE_SIZE-1)), 373 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
362 POISON_FREE_INITMEM, PAGE_SIZE);
363 free_page(addr); 374 free_page(addr);
364 totalram_pages++; 375 totalram_pages++;
365 } 376 }
@@ -376,6 +387,15 @@ void free_initmem(void)
376#ifdef CONFIG_BLK_DEV_INITRD 387#ifdef CONFIG_BLK_DEV_INITRD
377void free_initrd_mem(unsigned long start, unsigned long end) 388void free_initrd_mem(unsigned long start, unsigned long end)
378{ 389{
379 free_init_pages("initrd memory", start, end); 390 /*
391 * end could be not aligned, and We can not align that,
392 * decompresser could be confused by aligned initrd_end
393 * We already reserve the end partial page before in
394 * - i386_start_kernel()
395 * - x86_64_start_kernel()
396 * - relocate_initrd()
397 * So here We can do PAGE_ALIGN() safely to get partial page to be freed
398 */
399 free_init_pages("initrd memory", start, PAGE_ALIGN(end));
380} 400}
381#endif 401#endif
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 6e22454bfaa6..e31160216efb 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -122,8 +122,8 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
122 struct acpi_resource_address64 addr; 122 struct acpi_resource_address64 addr;
123 acpi_status status; 123 acpi_status status;
124 unsigned long flags; 124 unsigned long flags;
125 struct resource *root; 125 struct resource *root, *conflict;
126 u64 start, end; 126 u64 start, end, max_len;
127 127
128 status = resource_to_addr(acpi_res, &addr); 128 status = resource_to_addr(acpi_res, &addr);
129 if (!ACPI_SUCCESS(status)) 129 if (!ACPI_SUCCESS(status))
@@ -140,6 +140,17 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
140 } else 140 } else
141 return AE_OK; 141 return AE_OK;
142 142
143 max_len = addr.maximum - addr.minimum + 1;
144 if (addr.address_length > max_len) {
145 dev_printk(KERN_DEBUG, &info->bridge->dev,
146 "host bridge window length %#llx doesn't fit in "
147 "%#llx-%#llx, trimming\n",
148 (unsigned long long) addr.address_length,
149 (unsigned long long) addr.minimum,
150 (unsigned long long) addr.maximum);
151 addr.address_length = max_len;
152 }
153
143 start = addr.minimum + addr.translation_offset; 154 start = addr.minimum + addr.translation_offset;
144 end = start + addr.address_length - 1; 155 end = start + addr.address_length - 1;
145 156
@@ -157,9 +168,12 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
157 return AE_OK; 168 return AE_OK;
158 } 169 }
159 170
160 if (insert_resource(root, res)) { 171 conflict = insert_resource_conflict(root, res);
172 if (conflict) {
161 dev_err(&info->bridge->dev, 173 dev_err(&info->bridge->dev,
162 "can't allocate host bridge window %pR\n", res); 174 "address space collision: host bridge window %pR "
175 "conflicts with %s %pR\n",
176 res, conflict->name, conflict);
163 } else { 177 } else {
164 pci_bus_add_resource(info->bus, res, 0); 178 pci_bus_add_resource(info->bus, res, 0);
165 info->res_num++; 179 info->res_num++;
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index dece3eb9c906..46fd43f79103 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -127,9 +127,6 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
127 continue; 127 continue;
128 if (!r->start || 128 if (!r->start ||
129 pci_claim_resource(dev, idx) < 0) { 129 pci_claim_resource(dev, idx) < 0) {
130 dev_info(&dev->dev,
131 "can't reserve window %pR\n",
132 r);
133 /* 130 /*
134 * Something is wrong with the region. 131 * Something is wrong with the region.
135 * Invalidate the resource to prevent 132 * Invalidate the resource to prevent
@@ -181,8 +178,6 @@ static void __init pcibios_allocate_resources(int pass)
181 "BAR %d: reserving %pr (d=%d, p=%d)\n", 178 "BAR %d: reserving %pr (d=%d, p=%d)\n",
182 idx, r, disabled, pass); 179 idx, r, disabled, pass);
183 if (pci_claim_resource(dev, idx) < 0) { 180 if (pci_claim_resource(dev, idx) < 0) {
184 dev_info(&dev->dev,
185 "can't reserve %pR\n", r);
186 /* We'll assign a new address later */ 181 /* We'll assign a new address later */
187 r->end -= r->start; 182 r->end -= r->start;
188 r->start = 0; 183 r->start = 0;