diff options
Diffstat (limited to 'arch')
58 files changed, 347 insertions, 191 deletions
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile index 9bb7b858ed23..7a6d908bb865 100644 --- a/arch/alpha/kernel/Makefile +++ b/arch/alpha/kernel/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | extra-y := head.o vmlinux.lds | 5 | extra-y := head.o vmlinux.lds |
6 | asflags-y := $(KBUILD_CFLAGS) | 6 | asflags-y := $(KBUILD_CFLAGS) |
7 | ccflags-y := -Werror -Wno-sign-compare | 7 | ccflags-y := -Wno-sign-compare |
8 | 8 | ||
9 | obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \ | 9 | obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \ |
10 | irq_alpha.o signal.o setup.o ptrace.o time.o \ | 10 | irq_alpha.o signal.o setup.o ptrace.o time.o \ |
diff --git a/arch/alpha/kernel/core_mcpcia.c b/arch/alpha/kernel/core_mcpcia.c index 381fec0af52e..da7bcc372f16 100644 --- a/arch/alpha/kernel/core_mcpcia.c +++ b/arch/alpha/kernel/core_mcpcia.c | |||
@@ -88,7 +88,7 @@ conf_read(unsigned long addr, unsigned char type1, | |||
88 | { | 88 | { |
89 | unsigned long flags; | 89 | unsigned long flags; |
90 | unsigned long mid = MCPCIA_HOSE2MID(hose->index); | 90 | unsigned long mid = MCPCIA_HOSE2MID(hose->index); |
91 | unsigned int stat0, value, temp, cpu; | 91 | unsigned int stat0, value, cpu; |
92 | 92 | ||
93 | cpu = smp_processor_id(); | 93 | cpu = smp_processor_id(); |
94 | 94 | ||
@@ -101,7 +101,7 @@ conf_read(unsigned long addr, unsigned char type1, | |||
101 | stat0 = *(vuip)MCPCIA_CAP_ERR(mid); | 101 | stat0 = *(vuip)MCPCIA_CAP_ERR(mid); |
102 | *(vuip)MCPCIA_CAP_ERR(mid) = stat0; | 102 | *(vuip)MCPCIA_CAP_ERR(mid) = stat0; |
103 | mb(); | 103 | mb(); |
104 | temp = *(vuip)MCPCIA_CAP_ERR(mid); | 104 | *(vuip)MCPCIA_CAP_ERR(mid); |
105 | DBG_CFG(("conf_read: MCPCIA_CAP_ERR(%d) was 0x%x\n", mid, stat0)); | 105 | DBG_CFG(("conf_read: MCPCIA_CAP_ERR(%d) was 0x%x\n", mid, stat0)); |
106 | 106 | ||
107 | mb(); | 107 | mb(); |
@@ -136,7 +136,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1, | |||
136 | { | 136 | { |
137 | unsigned long flags; | 137 | unsigned long flags; |
138 | unsigned long mid = MCPCIA_HOSE2MID(hose->index); | 138 | unsigned long mid = MCPCIA_HOSE2MID(hose->index); |
139 | unsigned int stat0, temp, cpu; | 139 | unsigned int stat0, cpu; |
140 | 140 | ||
141 | cpu = smp_processor_id(); | 141 | cpu = smp_processor_id(); |
142 | 142 | ||
@@ -145,7 +145,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1, | |||
145 | /* Reset status register to avoid losing errors. */ | 145 | /* Reset status register to avoid losing errors. */ |
146 | stat0 = *(vuip)MCPCIA_CAP_ERR(mid); | 146 | stat0 = *(vuip)MCPCIA_CAP_ERR(mid); |
147 | *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb(); | 147 | *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb(); |
148 | temp = *(vuip)MCPCIA_CAP_ERR(mid); | 148 | *(vuip)MCPCIA_CAP_ERR(mid); |
149 | DBG_CFG(("conf_write: MCPCIA CAP_ERR(%d) was 0x%x\n", mid, stat0)); | 149 | DBG_CFG(("conf_write: MCPCIA CAP_ERR(%d) was 0x%x\n", mid, stat0)); |
150 | 150 | ||
151 | draina(); | 151 | draina(); |
@@ -157,7 +157,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1, | |||
157 | *((vuip)addr) = value; | 157 | *((vuip)addr) = value; |
158 | mb(); | 158 | mb(); |
159 | mb(); /* magic */ | 159 | mb(); /* magic */ |
160 | temp = *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */ | 160 | *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */ |
161 | mcheck_expected(cpu) = 0; | 161 | mcheck_expected(cpu) = 0; |
162 | mb(); | 162 | mb(); |
163 | 163 | ||
@@ -572,12 +572,10 @@ mcpcia_print_system_area(unsigned long la_ptr) | |||
572 | void | 572 | void |
573 | mcpcia_machine_check(unsigned long vector, unsigned long la_ptr) | 573 | mcpcia_machine_check(unsigned long vector, unsigned long la_ptr) |
574 | { | 574 | { |
575 | struct el_common *mchk_header; | ||
576 | struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout; | 575 | struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout; |
577 | unsigned int cpu = smp_processor_id(); | 576 | unsigned int cpu = smp_processor_id(); |
578 | int expected; | 577 | int expected; |
579 | 578 | ||
580 | mchk_header = (struct el_common *)la_ptr; | ||
581 | mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr; | 579 | mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr; |
582 | expected = mcheck_expected(cpu); | 580 | expected = mcheck_expected(cpu); |
583 | 581 | ||
diff --git a/arch/alpha/kernel/err_titan.c b/arch/alpha/kernel/err_titan.c index c3b3781a03de..14b26c466c89 100644 --- a/arch/alpha/kernel/err_titan.c +++ b/arch/alpha/kernel/err_titan.c | |||
@@ -533,8 +533,6 @@ static struct el_subpacket_annotation el_titan_annotations[] = { | |||
533 | static struct el_subpacket * | 533 | static struct el_subpacket * |
534 | el_process_regatta_subpacket(struct el_subpacket *header) | 534 | el_process_regatta_subpacket(struct el_subpacket *header) |
535 | { | 535 | { |
536 | int status; | ||
537 | |||
538 | if (header->class != EL_CLASS__REGATTA_FAMILY) { | 536 | if (header->class != EL_CLASS__REGATTA_FAMILY) { |
539 | printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n", | 537 | printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n", |
540 | err_print_prefix, | 538 | err_print_prefix, |
@@ -551,7 +549,7 @@ el_process_regatta_subpacket(struct el_subpacket *header) | |||
551 | printk("%s ** Occurred on CPU %d:\n", | 549 | printk("%s ** Occurred on CPU %d:\n", |
552 | err_print_prefix, | 550 | err_print_prefix, |
553 | (int)header->by_type.regatta_frame.cpuid); | 551 | (int)header->by_type.regatta_frame.cpuid); |
554 | status = privateer_process_logout_frame((struct el_common *) | 552 | privateer_process_logout_frame((struct el_common *) |
555 | header->by_type.regatta_frame.data_start, 1); | 553 | header->by_type.regatta_frame.data_start, 1); |
556 | break; | 554 | break; |
557 | default: | 555 | default: |
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index 1479dc6ebd97..51b7fbd9e4c1 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c | |||
@@ -228,7 +228,7 @@ struct irqaction timer_irqaction = { | |||
228 | void __init | 228 | void __init |
229 | init_rtc_irq(void) | 229 | init_rtc_irq(void) |
230 | { | 230 | { |
231 | irq_set_chip_and_handler_name(RTC_IRQ, &no_irq_chip, | 231 | irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip, |
232 | handle_simple_irq, "RTC"); | 232 | handle_simple_irq, "RTC"); |
233 | setup_irq(RTC_IRQ, &timer_irqaction); | 233 | setup_irq(RTC_IRQ, &timer_irqaction); |
234 | } | 234 | } |
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index d2634e4476b4..edbddcbd5bc6 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c | |||
@@ -1404,8 +1404,6 @@ determine_cpu_caches (unsigned int cpu_type) | |||
1404 | case PCA56_CPU: | 1404 | case PCA56_CPU: |
1405 | case PCA57_CPU: | 1405 | case PCA57_CPU: |
1406 | { | 1406 | { |
1407 | unsigned long cbox_config, size; | ||
1408 | |||
1409 | if (cpu_type == PCA56_CPU) { | 1407 | if (cpu_type == PCA56_CPU) { |
1410 | L1I = CSHAPE(16*1024, 6, 1); | 1408 | L1I = CSHAPE(16*1024, 6, 1); |
1411 | L1D = CSHAPE(8*1024, 5, 1); | 1409 | L1D = CSHAPE(8*1024, 5, 1); |
@@ -1415,10 +1413,12 @@ determine_cpu_caches (unsigned int cpu_type) | |||
1415 | } | 1413 | } |
1416 | L3 = -1; | 1414 | L3 = -1; |
1417 | 1415 | ||
1416 | #if 0 | ||
1417 | unsigned long cbox_config, size; | ||
1418 | |||
1418 | cbox_config = *(vulp) phys_to_virt (0xfffff00008UL); | 1419 | cbox_config = *(vulp) phys_to_virt (0xfffff00008UL); |
1419 | size = 512*1024 * (1 << ((cbox_config >> 12) & 3)); | 1420 | size = 512*1024 * (1 << ((cbox_config >> 12) & 3)); |
1420 | 1421 | ||
1421 | #if 0 | ||
1422 | L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1); | 1422 | L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1); |
1423 | #else | 1423 | #else |
1424 | L2 = external_cache_probe(512*1024, 6); | 1424 | L2 = external_cache_probe(512*1024, 6); |
diff --git a/arch/alpha/kernel/smc37c93x.c b/arch/alpha/kernel/smc37c93x.c index 3e6a2893af9f..6886b834f487 100644 --- a/arch/alpha/kernel/smc37c93x.c +++ b/arch/alpha/kernel/smc37c93x.c | |||
@@ -79,7 +79,6 @@ | |||
79 | static unsigned long __init SMCConfigState(unsigned long baseAddr) | 79 | static unsigned long __init SMCConfigState(unsigned long baseAddr) |
80 | { | 80 | { |
81 | unsigned char devId; | 81 | unsigned char devId; |
82 | unsigned char devRev; | ||
83 | 82 | ||
84 | unsigned long configPort; | 83 | unsigned long configPort; |
85 | unsigned long indexPort; | 84 | unsigned long indexPort; |
@@ -100,7 +99,7 @@ static unsigned long __init SMCConfigState(unsigned long baseAddr) | |||
100 | devId = inb(dataPort); | 99 | devId = inb(dataPort); |
101 | if (devId == VALID_DEVICE_ID) { | 100 | if (devId == VALID_DEVICE_ID) { |
102 | outb(DEVICE_REV, indexPort); | 101 | outb(DEVICE_REV, indexPort); |
103 | devRev = inb(dataPort); | 102 | /* unsigned char devRev = */ inb(dataPort); |
104 | break; | 103 | break; |
105 | } | 104 | } |
106 | else | 105 | else |
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c index d3cb28bb8eb0..d92cdc715c65 100644 --- a/arch/alpha/kernel/sys_wildfire.c +++ b/arch/alpha/kernel/sys_wildfire.c | |||
@@ -156,7 +156,6 @@ static void __init | |||
156 | wildfire_init_irq_per_pca(int qbbno, int pcano) | 156 | wildfire_init_irq_per_pca(int qbbno, int pcano) |
157 | { | 157 | { |
158 | int i, irq_bias; | 158 | int i, irq_bias; |
159 | unsigned long io_bias; | ||
160 | static struct irqaction isa_enable = { | 159 | static struct irqaction isa_enable = { |
161 | .handler = no_action, | 160 | .handler = no_action, |
162 | .name = "isa_enable", | 161 | .name = "isa_enable", |
@@ -165,10 +164,12 @@ wildfire_init_irq_per_pca(int qbbno, int pcano) | |||
165 | irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA) | 164 | irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA) |
166 | + pcano * WILDFIRE_IRQ_PER_PCA; | 165 | + pcano * WILDFIRE_IRQ_PER_PCA; |
167 | 166 | ||
167 | #if 0 | ||
168 | unsigned long io_bias; | ||
169 | |||
168 | /* Only need the following for first PCI bus per PCA. */ | 170 | /* Only need the following for first PCI bus per PCA. */ |
169 | io_bias = WILDFIRE_IO(qbbno, pcano<<1) - WILDFIRE_IO_BIAS; | 171 | io_bias = WILDFIRE_IO(qbbno, pcano<<1) - WILDFIRE_IO_BIAS; |
170 | 172 | ||
171 | #if 0 | ||
172 | outb(0, DMA1_RESET_REG + io_bias); | 173 | outb(0, DMA1_RESET_REG + io_bias); |
173 | outb(0, DMA2_RESET_REG + io_bias); | 174 | outb(0, DMA2_RESET_REG + io_bias); |
174 | outb(DMA_MODE_CASCADE, DMA2_MODE_REG + io_bias); | 175 | outb(DMA_MODE_CASCADE, DMA2_MODE_REG + io_bias); |
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index a58e84f1a63b..918e8e0b72ff 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c | |||
@@ -153,6 +153,7 @@ void read_persistent_clock(struct timespec *ts) | |||
153 | year += 100; | 153 | year += 100; |
154 | 154 | ||
155 | ts->tv_sec = mktime(year, mon, day, hour, min, sec); | 155 | ts->tv_sec = mktime(year, mon, day, hour, min, sec); |
156 | ts->tv_nsec = 0; | ||
156 | } | 157 | } |
157 | 158 | ||
158 | 159 | ||
diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c index 7f568611547e..6a96911b0ad5 100644 --- a/arch/arm/mach-msm/board-qsd8x50.c +++ b/arch/arm/mach-msm/board-qsd8x50.c | |||
@@ -160,10 +160,7 @@ static struct msm_mmc_platform_data qsd8x50_sdc1_data = { | |||
160 | 160 | ||
161 | static void __init qsd8x50_init_mmc(void) | 161 | static void __init qsd8x50_init_mmc(void) |
162 | { | 162 | { |
163 | if (machine_is_qsd8x50_ffa() || machine_is_qsd8x50a_ffa()) | 163 | vreg_mmc = vreg_get(NULL, "gp5"); |
164 | vreg_mmc = vreg_get(NULL, "gp6"); | ||
165 | else | ||
166 | vreg_mmc = vreg_get(NULL, "gp5"); | ||
167 | 164 | ||
168 | if (IS_ERR(vreg_mmc)) { | 165 | if (IS_ERR(vreg_mmc)) { |
169 | pr_err("vreg get for vreg_mmc failed (%ld)\n", | 166 | pr_err("vreg get for vreg_mmc failed (%ld)\n", |
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 56f920c55b6a..38b95e949d13 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c | |||
@@ -269,7 +269,7 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt) | |||
269 | 269 | ||
270 | /* Use existing clock_event for cpu 0 */ | 270 | /* Use existing clock_event for cpu 0 */ |
271 | if (!smp_processor_id()) | 271 | if (!smp_processor_id()) |
272 | return; | 272 | return 0; |
273 | 273 | ||
274 | writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL); | 274 | writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL); |
275 | 275 | ||
diff --git a/arch/arm/mach-tegra/gpio.c b/arch/arm/mach-tegra/gpio.c index 76a3f654220f..65a1aba6823d 100644 --- a/arch/arm/mach-tegra/gpio.c +++ b/arch/arm/mach-tegra/gpio.c | |||
@@ -257,7 +257,8 @@ static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) | |||
257 | void tegra_gpio_resume(void) | 257 | void tegra_gpio_resume(void) |
258 | { | 258 | { |
259 | unsigned long flags; | 259 | unsigned long flags; |
260 | int b, p, i; | 260 | int b; |
261 | int p; | ||
261 | 262 | ||
262 | local_irq_save(flags); | 263 | local_irq_save(flags); |
263 | 264 | ||
@@ -280,7 +281,8 @@ void tegra_gpio_resume(void) | |||
280 | void tegra_gpio_suspend(void) | 281 | void tegra_gpio_suspend(void) |
281 | { | 282 | { |
282 | unsigned long flags; | 283 | unsigned long flags; |
283 | int b, p, i; | 284 | int b; |
285 | int p; | ||
284 | 286 | ||
285 | local_irq_save(flags); | 287 | local_irq_save(flags); |
286 | for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) { | 288 | for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) { |
diff --git a/arch/arm/mach-tegra/tegra2_clocks.c b/arch/arm/mach-tegra/tegra2_clocks.c index 6d7c4eea4dcb..4459470c052d 100644 --- a/arch/arm/mach-tegra/tegra2_clocks.c +++ b/arch/arm/mach-tegra/tegra2_clocks.c | |||
@@ -1362,14 +1362,15 @@ static int tegra_clk_shared_bus_set_rate(struct clk *c, unsigned long rate) | |||
1362 | { | 1362 | { |
1363 | unsigned long flags; | 1363 | unsigned long flags; |
1364 | int ret; | 1364 | int ret; |
1365 | long new_rate = rate; | ||
1365 | 1366 | ||
1366 | rate = clk_round_rate(c->parent, rate); | 1367 | new_rate = clk_round_rate(c->parent, new_rate); |
1367 | if (rate < 0) | 1368 | if (new_rate < 0) |
1368 | return rate; | 1369 | return new_rate; |
1369 | 1370 | ||
1370 | spin_lock_irqsave(&c->parent->spinlock, flags); | 1371 | spin_lock_irqsave(&c->parent->spinlock, flags); |
1371 | 1372 | ||
1372 | c->u.shared_bus_user.rate = rate; | 1373 | c->u.shared_bus_user.rate = new_rate; |
1373 | ret = tegra_clk_shared_bus_update(c->parent); | 1374 | ret = tegra_clk_shared_bus_update(c->parent); |
1374 | 1375 | ||
1375 | spin_unlock_irqrestore(&c->parent->spinlock, flags); | 1376 | spin_unlock_irqrestore(&c->parent->spinlock, flags); |
diff --git a/arch/arm/plat-s5p/pm.c b/arch/arm/plat-s5p/pm.c index d592b6304b48..d15dc47b0e3d 100644 --- a/arch/arm/plat-s5p/pm.c +++ b/arch/arm/plat-s5p/pm.c | |||
@@ -19,17 +19,6 @@ | |||
19 | 19 | ||
20 | #define PFX "s5p pm: " | 20 | #define PFX "s5p pm: " |
21 | 21 | ||
22 | /* s3c_pm_check_resume_pin | ||
23 | * | ||
24 | * check to see if the pin is configured correctly for sleep mode, and | ||
25 | * make any necessary adjustments if it is not | ||
26 | */ | ||
27 | |||
28 | static void s3c_pm_check_resume_pin(unsigned int pin, unsigned int irqoffs) | ||
29 | { | ||
30 | /* nothing here yet */ | ||
31 | } | ||
32 | |||
33 | /* s3c_pm_configure_extint | 22 | /* s3c_pm_configure_extint |
34 | * | 23 | * |
35 | * configure all external interrupt pins | 24 | * configure all external interrupt pins |
diff --git a/arch/arm/plat-samsung/pm-check.c b/arch/arm/plat-samsung/pm-check.c index e4baf76f374a..6b733fafe7cd 100644 --- a/arch/arm/plat-samsung/pm-check.c +++ b/arch/arm/plat-samsung/pm-check.c | |||
@@ -164,7 +164,6 @@ static inline int in_region(void *ptr, int size, void *what, size_t whatsz) | |||
164 | */ | 164 | */ |
165 | static u32 *s3c_pm_runcheck(struct resource *res, u32 *val) | 165 | static u32 *s3c_pm_runcheck(struct resource *res, u32 *val) |
166 | { | 166 | { |
167 | void *save_at = phys_to_virt(s3c_sleep_save_phys); | ||
168 | unsigned long addr; | 167 | unsigned long addr; |
169 | unsigned long left; | 168 | unsigned long left; |
170 | void *stkpage; | 169 | void *stkpage; |
@@ -192,11 +191,6 @@ static u32 *s3c_pm_runcheck(struct resource *res, u32 *val) | |||
192 | goto skip_check; | 191 | goto skip_check; |
193 | } | 192 | } |
194 | 193 | ||
195 | if (in_region(ptr, left, save_at, 32*4 )) { | ||
196 | S3C_PMDBG("skipping %08lx, has save block in\n", addr); | ||
197 | goto skip_check; | ||
198 | } | ||
199 | |||
200 | /* calculate and check the checksum */ | 194 | /* calculate and check the checksum */ |
201 | 195 | ||
202 | calc = crc32_le(~0, ptr, left); | 196 | calc = crc32_le(~0, ptr, left); |
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c index d5b58d31903c..5c0a440d6e16 100644 --- a/arch/arm/plat-samsung/pm.c +++ b/arch/arm/plat-samsung/pm.c | |||
@@ -214,8 +214,9 @@ void s3c_pm_do_restore_core(struct sleep_save *ptr, int count) | |||
214 | * | 214 | * |
215 | * print any IRQs asserted at resume time (ie, we woke from) | 215 | * print any IRQs asserted at resume time (ie, we woke from) |
216 | */ | 216 | */ |
217 | static void s3c_pm_show_resume_irqs(int start, unsigned long which, | 217 | static void __maybe_unused s3c_pm_show_resume_irqs(int start, |
218 | unsigned long mask) | 218 | unsigned long which, |
219 | unsigned long mask) | ||
219 | { | 220 | { |
220 | int i; | 221 | int i; |
221 | 222 | ||
diff --git a/arch/avr32/include/asm/setup.h b/arch/avr32/include/asm/setup.h index ff5b7cf6be4d..160543dbec7e 100644 --- a/arch/avr32/include/asm/setup.h +++ b/arch/avr32/include/asm/setup.h | |||
@@ -94,6 +94,13 @@ struct tag_ethernet { | |||
94 | 94 | ||
95 | #define ETH_INVALID_PHY 0xff | 95 | #define ETH_INVALID_PHY 0xff |
96 | 96 | ||
97 | /* board information */ | ||
98 | #define ATAG_BOARDINFO 0x54410008 | ||
99 | |||
100 | struct tag_boardinfo { | ||
101 | u32 board_number; | ||
102 | }; | ||
103 | |||
97 | struct tag { | 104 | struct tag { |
98 | struct tag_header hdr; | 105 | struct tag_header hdr; |
99 | union { | 106 | union { |
@@ -102,6 +109,7 @@ struct tag { | |||
102 | struct tag_cmdline cmdline; | 109 | struct tag_cmdline cmdline; |
103 | struct tag_clock clock; | 110 | struct tag_clock clock; |
104 | struct tag_ethernet ethernet; | 111 | struct tag_ethernet ethernet; |
112 | struct tag_boardinfo boardinfo; | ||
105 | } u; | 113 | } u; |
106 | }; | 114 | }; |
107 | 115 | ||
@@ -128,6 +136,7 @@ extern struct tag *bootloader_tags; | |||
128 | 136 | ||
129 | extern resource_size_t fbmem_start; | 137 | extern resource_size_t fbmem_start; |
130 | extern resource_size_t fbmem_size; | 138 | extern resource_size_t fbmem_size; |
139 | extern u32 board_number; | ||
131 | 140 | ||
132 | void setup_processor(void); | 141 | void setup_processor(void); |
133 | 142 | ||
diff --git a/arch/avr32/kernel/setup.c b/arch/avr32/kernel/setup.c index 5c7083916c33..bb0974cce4ac 100644 --- a/arch/avr32/kernel/setup.c +++ b/arch/avr32/kernel/setup.c | |||
@@ -391,6 +391,21 @@ static int __init parse_tag_clock(struct tag *tag) | |||
391 | __tagtable(ATAG_CLOCK, parse_tag_clock); | 391 | __tagtable(ATAG_CLOCK, parse_tag_clock); |
392 | 392 | ||
393 | /* | 393 | /* |
394 | * The board_number correspond to the bd->bi_board_number in U-Boot. This | ||
395 | * parameter is only available during initialisation and can be used in some | ||
396 | * kind of board identification. | ||
397 | */ | ||
398 | u32 __initdata board_number; | ||
399 | |||
400 | static int __init parse_tag_boardinfo(struct tag *tag) | ||
401 | { | ||
402 | board_number = tag->u.boardinfo.board_number; | ||
403 | |||
404 | return 0; | ||
405 | } | ||
406 | __tagtable(ATAG_BOARDINFO, parse_tag_boardinfo); | ||
407 | |||
408 | /* | ||
394 | * Scan the tag table for this tag, and call its parse function. The | 409 | * Scan the tag table for this tag, and call its parse function. The |
395 | * tag table is built by the linker from all the __tagtable | 410 | * tag table is built by the linker from all the __tagtable |
396 | * declarations. | 411 | * declarations. |
diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c index b91b2044af9c..7aa25756412f 100644 --- a/arch/avr32/kernel/traps.c +++ b/arch/avr32/kernel/traps.c | |||
@@ -95,28 +95,6 @@ void _exception(long signr, struct pt_regs *regs, int code, | |||
95 | info.si_code = code; | 95 | info.si_code = code; |
96 | info.si_addr = (void __user *)addr; | 96 | info.si_addr = (void __user *)addr; |
97 | force_sig_info(signr, &info, current); | 97 | force_sig_info(signr, &info, current); |
98 | |||
99 | /* | ||
100 | * Init gets no signals that it doesn't have a handler for. | ||
101 | * That's all very well, but if it has caused a synchronous | ||
102 | * exception and we ignore the resulting signal, it will just | ||
103 | * generate the same exception over and over again and we get | ||
104 | * nowhere. Better to kill it and let the kernel panic. | ||
105 | */ | ||
106 | if (is_global_init(current)) { | ||
107 | __sighandler_t handler; | ||
108 | |||
109 | spin_lock_irq(¤t->sighand->siglock); | ||
110 | handler = current->sighand->action[signr-1].sa.sa_handler; | ||
111 | spin_unlock_irq(¤t->sighand->siglock); | ||
112 | if (handler == SIG_DFL) { | ||
113 | /* init has generated a synchronous exception | ||
114 | and it doesn't have a handler for the signal */ | ||
115 | printk(KERN_CRIT "init has generated signal %ld " | ||
116 | "but has no handler for it\n", signr); | ||
117 | do_exit(signr); | ||
118 | } | ||
119 | } | ||
120 | } | 98 | } |
121 | 99 | ||
122 | asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs) | 100 | asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs) |
diff --git a/arch/avr32/mach-at32ap/clock.c b/arch/avr32/mach-at32ap/clock.c index 442f08c5e641..86925fd6ea5b 100644 --- a/arch/avr32/mach-at32ap/clock.c +++ b/arch/avr32/mach-at32ap/clock.c | |||
@@ -35,22 +35,30 @@ void at32_clk_register(struct clk *clk) | |||
35 | spin_unlock(&clk_list_lock); | 35 | spin_unlock(&clk_list_lock); |
36 | } | 36 | } |
37 | 37 | ||
38 | struct clk *clk_get(struct device *dev, const char *id) | 38 | static struct clk *__clk_get(struct device *dev, const char *id) |
39 | { | 39 | { |
40 | struct clk *clk; | 40 | struct clk *clk; |
41 | 41 | ||
42 | spin_lock(&clk_list_lock); | ||
43 | |||
44 | list_for_each_entry(clk, &at32_clock_list, list) { | 42 | list_for_each_entry(clk, &at32_clock_list, list) { |
45 | if (clk->dev == dev && strcmp(id, clk->name) == 0) { | 43 | if (clk->dev == dev && strcmp(id, clk->name) == 0) { |
46 | spin_unlock(&clk_list_lock); | ||
47 | return clk; | 44 | return clk; |
48 | } | 45 | } |
49 | } | 46 | } |
50 | 47 | ||
51 | spin_unlock(&clk_list_lock); | ||
52 | return ERR_PTR(-ENOENT); | 48 | return ERR_PTR(-ENOENT); |
53 | } | 49 | } |
50 | |||
51 | struct clk *clk_get(struct device *dev, const char *id) | ||
52 | { | ||
53 | struct clk *clk; | ||
54 | |||
55 | spin_lock(&clk_list_lock); | ||
56 | clk = __clk_get(dev, id); | ||
57 | spin_unlock(&clk_list_lock); | ||
58 | |||
59 | return clk; | ||
60 | } | ||
61 | |||
54 | EXPORT_SYMBOL(clk_get); | 62 | EXPORT_SYMBOL(clk_get); |
55 | 63 | ||
56 | void clk_put(struct clk *clk) | 64 | void clk_put(struct clk *clk) |
@@ -257,15 +265,15 @@ static int clk_show(struct seq_file *s, void *unused) | |||
257 | spin_lock(&clk_list_lock); | 265 | spin_lock(&clk_list_lock); |
258 | 266 | ||
259 | /* show clock tree as derived from the three oscillators */ | 267 | /* show clock tree as derived from the three oscillators */ |
260 | clk = clk_get(NULL, "osc32k"); | 268 | clk = __clk_get(NULL, "osc32k"); |
261 | dump_clock(clk, &r); | 269 | dump_clock(clk, &r); |
262 | clk_put(clk); | 270 | clk_put(clk); |
263 | 271 | ||
264 | clk = clk_get(NULL, "osc0"); | 272 | clk = __clk_get(NULL, "osc0"); |
265 | dump_clock(clk, &r); | 273 | dump_clock(clk, &r); |
266 | clk_put(clk); | 274 | clk_put(clk); |
267 | 275 | ||
268 | clk = clk_get(NULL, "osc1"); | 276 | clk = __clk_get(NULL, "osc1"); |
269 | dump_clock(clk, &r); | 277 | dump_clock(clk, &r); |
270 | clk_put(clk); | 278 | clk_put(clk); |
271 | 279 | ||
diff --git a/arch/avr32/mach-at32ap/extint.c b/arch/avr32/mach-at32ap/extint.c index 47ba4b9b6db1..fbc2aeaebddb 100644 --- a/arch/avr32/mach-at32ap/extint.c +++ b/arch/avr32/mach-at32ap/extint.c | |||
@@ -61,34 +61,34 @@ struct eic { | |||
61 | static struct eic *nmi_eic; | 61 | static struct eic *nmi_eic; |
62 | static bool nmi_enabled; | 62 | static bool nmi_enabled; |
63 | 63 | ||
64 | static void eic_ack_irq(struct irq_chip *d) | 64 | static void eic_ack_irq(struct irq_data *d) |
65 | { | 65 | { |
66 | struct eic *eic = irq_data_get_irq_chip_data(data); | 66 | struct eic *eic = irq_data_get_irq_chip_data(d); |
67 | eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq)); | 67 | eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq)); |
68 | } | 68 | } |
69 | 69 | ||
70 | static void eic_mask_irq(struct irq_chip *d) | 70 | static void eic_mask_irq(struct irq_data *d) |
71 | { | 71 | { |
72 | struct eic *eic = irq_data_get_irq_chip_data(data); | 72 | struct eic *eic = irq_data_get_irq_chip_data(d); |
73 | eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq)); | 73 | eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq)); |
74 | } | 74 | } |
75 | 75 | ||
76 | static void eic_mask_ack_irq(struct irq_chip *d) | 76 | static void eic_mask_ack_irq(struct irq_data *d) |
77 | { | 77 | { |
78 | struct eic *eic = irq_data_get_irq_chip_data(data); | 78 | struct eic *eic = irq_data_get_irq_chip_data(d); |
79 | eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq)); | 79 | eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq)); |
80 | eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq)); | 80 | eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq)); |
81 | } | 81 | } |
82 | 82 | ||
83 | static void eic_unmask_irq(struct irq_chip *d) | 83 | static void eic_unmask_irq(struct irq_data *d) |
84 | { | 84 | { |
85 | struct eic *eic = irq_data_get_irq_chip_data(data); | 85 | struct eic *eic = irq_data_get_irq_chip_data(d); |
86 | eic_writel(eic, IER, 1 << (d->irq - eic->first_irq)); | 86 | eic_writel(eic, IER, 1 << (d->irq - eic->first_irq)); |
87 | } | 87 | } |
88 | 88 | ||
89 | static int eic_set_irq_type(struct irq_chip *d, unsigned int flow_type) | 89 | static int eic_set_irq_type(struct irq_data *d, unsigned int flow_type) |
90 | { | 90 | { |
91 | struct eic *eic = irq_data_get_irq_chip_data(data); | 91 | struct eic *eic = irq_data_get_irq_chip_data(d); |
92 | unsigned int irq = d->irq; | 92 | unsigned int irq = d->irq; |
93 | unsigned int i = irq - eic->first_irq; | 93 | unsigned int i = irq - eic->first_irq; |
94 | u32 mode, edge, level; | 94 | u32 mode, edge, level; |
@@ -191,7 +191,7 @@ static int __init eic_probe(struct platform_device *pdev) | |||
191 | 191 | ||
192 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 192 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
193 | int_irq = platform_get_irq(pdev, 0); | 193 | int_irq = platform_get_irq(pdev, 0); |
194 | if (!regs || !int_irq) { | 194 | if (!regs || (int)int_irq <= 0) { |
195 | dev_dbg(&pdev->dev, "missing regs and/or irq resource\n"); | 195 | dev_dbg(&pdev->dev, "missing regs and/or irq resource\n"); |
196 | return -ENXIO; | 196 | return -ENXIO; |
197 | } | 197 | } |
diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c index f308e1ddc629..2e0aa853a4bc 100644 --- a/arch/avr32/mach-at32ap/pio.c +++ b/arch/avr32/mach-at32ap/pio.c | |||
@@ -257,7 +257,7 @@ static void gpio_irq_mask(struct irq_data *d) | |||
257 | pio_writel(pio, IDR, 1 << (gpio & 0x1f)); | 257 | pio_writel(pio, IDR, 1 << (gpio & 0x1f)); |
258 | } | 258 | } |
259 | 259 | ||
260 | static void gpio_irq_unmask(struct irq_data *d)) | 260 | static void gpio_irq_unmask(struct irq_data *d) |
261 | { | 261 | { |
262 | unsigned gpio = irq_to_gpio(d->irq); | 262 | unsigned gpio = irq_to_gpio(d->irq); |
263 | struct pio_device *pio = &pio_dev[gpio >> 5]; | 263 | struct pio_device *pio = &pio_dev[gpio >> 5]; |
diff --git a/arch/avr32/mach-at32ap/pm-at32ap700x.S b/arch/avr32/mach-at32ap/pm-at32ap700x.S index 17503b0ed6c9..f868f4ce761b 100644 --- a/arch/avr32/mach-at32ap/pm-at32ap700x.S +++ b/arch/avr32/mach-at32ap/pm-at32ap700x.S | |||
@@ -53,7 +53,7 @@ cpu_enter_idle: | |||
53 | st.w r8[TI_flags], r9 | 53 | st.w r8[TI_flags], r9 |
54 | unmask_interrupts | 54 | unmask_interrupts |
55 | sleep CPU_SLEEP_IDLE | 55 | sleep CPU_SLEEP_IDLE |
56 | .size cpu_idle_sleep, . - cpu_idle_sleep | 56 | .size cpu_enter_idle, . - cpu_enter_idle |
57 | 57 | ||
58 | /* | 58 | /* |
59 | * Common return path for PM functions that don't run from | 59 | * Common return path for PM functions that don't run from |
diff --git a/arch/blackfin/include/asm/system.h b/arch/blackfin/include/asm/system.h index 19e2c7c3e63a..44bd0cced725 100644 --- a/arch/blackfin/include/asm/system.h +++ b/arch/blackfin/include/asm/system.h | |||
@@ -19,11 +19,11 @@ | |||
19 | * Force strict CPU ordering. | 19 | * Force strict CPU ordering. |
20 | */ | 20 | */ |
21 | #define nop() __asm__ __volatile__ ("nop;\n\t" : : ) | 21 | #define nop() __asm__ __volatile__ ("nop;\n\t" : : ) |
22 | #define mb() __asm__ __volatile__ ("" : : : "memory") | 22 | #define smp_mb() mb() |
23 | #define rmb() __asm__ __volatile__ ("" : : : "memory") | 23 | #define smp_rmb() rmb() |
24 | #define wmb() __asm__ __volatile__ ("" : : : "memory") | 24 | #define smp_wmb() wmb() |
25 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) | 25 | #define set_mb(var, value) do { var = value; mb(); } while (0) |
26 | #define read_barrier_depends() do { } while(0) | 26 | #define smp_read_barrier_depends() read_barrier_depends() |
27 | 27 | ||
28 | #ifdef CONFIG_SMP | 28 | #ifdef CONFIG_SMP |
29 | asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value); | 29 | asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value); |
@@ -37,16 +37,16 @@ asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr, | |||
37 | unsigned long new, unsigned long old); | 37 | unsigned long new, unsigned long old); |
38 | 38 | ||
39 | #ifdef __ARCH_SYNC_CORE_DCACHE | 39 | #ifdef __ARCH_SYNC_CORE_DCACHE |
40 | # define smp_mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) | 40 | /* Force Core data cache coherence */ |
41 | # define smp_rmb() do { barrier(); smp_check_barrier(); } while (0) | 41 | # define mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) |
42 | # define smp_wmb() do { barrier(); smp_mark_barrier(); } while (0) | 42 | # define rmb() do { barrier(); smp_check_barrier(); } while (0) |
43 | #define smp_read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) | 43 | # define wmb() do { barrier(); smp_mark_barrier(); } while (0) |
44 | 44 | # define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) | |
45 | #else | 45 | #else |
46 | # define smp_mb() barrier() | 46 | # define mb() barrier() |
47 | # define smp_rmb() barrier() | 47 | # define rmb() barrier() |
48 | # define smp_wmb() barrier() | 48 | # define wmb() barrier() |
49 | #define smp_read_barrier_depends() barrier() | 49 | # define read_barrier_depends() do { } while (0) |
50 | #endif | 50 | #endif |
51 | 51 | ||
52 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | 52 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, |
@@ -99,10 +99,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
99 | 99 | ||
100 | #else /* !CONFIG_SMP */ | 100 | #else /* !CONFIG_SMP */ |
101 | 101 | ||
102 | #define smp_mb() barrier() | 102 | #define mb() barrier() |
103 | #define smp_rmb() barrier() | 103 | #define rmb() barrier() |
104 | #define smp_wmb() barrier() | 104 | #define wmb() barrier() |
105 | #define smp_read_barrier_depends() do { } while(0) | 105 | #define read_barrier_depends() do { } while (0) |
106 | 106 | ||
107 | struct __xchg_dummy { | 107 | struct __xchg_dummy { |
108 | unsigned long a[100]; | 108 | unsigned long a[100]; |
diff --git a/arch/blackfin/kernel/gptimers.c b/arch/blackfin/kernel/gptimers.c index cdbe075de1dc..8b81dc04488a 100644 --- a/arch/blackfin/kernel/gptimers.c +++ b/arch/blackfin/kernel/gptimers.c | |||
@@ -268,7 +268,7 @@ void disable_gptimers(uint16_t mask) | |||
268 | _disable_gptimers(mask); | 268 | _disable_gptimers(mask); |
269 | for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i) | 269 | for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i) |
270 | if (mask & (1 << i)) | 270 | if (mask & (1 << i)) |
271 | group_regs[BFIN_TIMER_OCTET(i)]->status |= trun_mask[i]; | 271 | group_regs[BFIN_TIMER_OCTET(i)]->status = trun_mask[i]; |
272 | SSYNC(); | 272 | SSYNC(); |
273 | } | 273 | } |
274 | EXPORT_SYMBOL(disable_gptimers); | 274 | EXPORT_SYMBOL(disable_gptimers); |
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c index 8c9a43daf80f..cdb4beb6bc8f 100644 --- a/arch/blackfin/kernel/time-ts.c +++ b/arch/blackfin/kernel/time-ts.c | |||
@@ -206,8 +206,14 @@ irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id) | |||
206 | { | 206 | { |
207 | struct clock_event_device *evt = dev_id; | 207 | struct clock_event_device *evt = dev_id; |
208 | smp_mb(); | 208 | smp_mb(); |
209 | evt->event_handler(evt); | 209 | /* |
210 | * We want to ACK before we handle so that we can handle smaller timer | ||
211 | * intervals. This way if the timer expires again while we're handling | ||
212 | * things, we're more likely to see that 2nd int rather than swallowing | ||
213 | * it by ACKing the int at the end of this handler. | ||
214 | */ | ||
210 | bfin_gptmr0_ack(); | 215 | bfin_gptmr0_ack(); |
216 | evt->event_handler(evt); | ||
211 | return IRQ_HANDLED; | 217 | return IRQ_HANDLED; |
212 | } | 218 | } |
213 | 219 | ||
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index 6e17a265c4d3..8bce5ed031e4 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c | |||
@@ -109,10 +109,23 @@ static void ipi_flush_icache(void *info) | |||
109 | struct blackfin_flush_data *fdata = info; | 109 | struct blackfin_flush_data *fdata = info; |
110 | 110 | ||
111 | /* Invalidate the memory holding the bounds of the flushed region. */ | 111 | /* Invalidate the memory holding the bounds of the flushed region. */ |
112 | invalidate_dcache_range((unsigned long)fdata, | 112 | blackfin_dcache_invalidate_range((unsigned long)fdata, |
113 | (unsigned long)fdata + sizeof(*fdata)); | 113 | (unsigned long)fdata + sizeof(*fdata)); |
114 | |||
115 | /* Make sure all write buffers in the data side of the core | ||
116 | * are flushed before trying to invalidate the icache. This | ||
117 | * needs to be after the data flush and before the icache | ||
118 | * flush so that the SSYNC does the right thing in preventing | ||
119 | * the instruction prefetcher from hitting things in cached | ||
120 | * memory at the wrong time -- it runs much further ahead than | ||
121 | * the pipeline. | ||
122 | */ | ||
123 | SSYNC(); | ||
114 | 124 | ||
115 | flush_icache_range(fdata->start, fdata->end); | 125 | /* ipi_flaush_icache is invoked by generic flush_icache_range, |
126 | * so call blackfin arch icache flush directly here. | ||
127 | */ | ||
128 | blackfin_icache_flush_range(fdata->start, fdata->end); | ||
116 | } | 129 | } |
117 | 130 | ||
118 | static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) | 131 | static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) |
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 26d851d385bb..29e17907d9f2 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h | |||
@@ -343,10 +343,14 @@ | |||
343 | #define __NR_fanotify_init 337 | 343 | #define __NR_fanotify_init 337 |
344 | #define __NR_fanotify_mark 338 | 344 | #define __NR_fanotify_mark 338 |
345 | #define __NR_prlimit64 339 | 345 | #define __NR_prlimit64 339 |
346 | #define __NR_name_to_handle_at 340 | ||
347 | #define __NR_open_by_handle_at 341 | ||
348 | #define __NR_clock_adjtime 342 | ||
349 | #define __NR_syncfs 343 | ||
346 | 350 | ||
347 | #ifdef __KERNEL__ | 351 | #ifdef __KERNEL__ |
348 | 352 | ||
349 | #define NR_syscalls 340 | 353 | #define NR_syscalls 344 |
350 | 354 | ||
351 | #define __ARCH_WANT_IPC_PARSE_VERSION | 355 | #define __ARCH_WANT_IPC_PARSE_VERSION |
352 | #define __ARCH_WANT_OLD_READDIR | 356 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/arch/m68k/kernel/entry_mm.S b/arch/m68k/kernel/entry_mm.S index 1559dea36e55..1359ee659574 100644 --- a/arch/m68k/kernel/entry_mm.S +++ b/arch/m68k/kernel/entry_mm.S | |||
@@ -750,4 +750,8 @@ sys_call_table: | |||
750 | .long sys_fanotify_init | 750 | .long sys_fanotify_init |
751 | .long sys_fanotify_mark | 751 | .long sys_fanotify_mark |
752 | .long sys_prlimit64 | 752 | .long sys_prlimit64 |
753 | .long sys_name_to_handle_at /* 340 */ | ||
754 | .long sys_open_by_handle_at | ||
755 | .long sys_clock_adjtime | ||
756 | .long sys_syncfs | ||
753 | 757 | ||
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index 79b1ed198c07..9b8393d8adb8 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S | |||
@@ -358,6 +358,10 @@ ENTRY(sys_call_table) | |||
358 | .long sys_fanotify_init | 358 | .long sys_fanotify_init |
359 | .long sys_fanotify_mark | 359 | .long sys_fanotify_mark |
360 | .long sys_prlimit64 | 360 | .long sys_prlimit64 |
361 | .long sys_name_to_handle_at /* 340 */ | ||
362 | .long sys_open_by_handle_at | ||
363 | .long sys_clock_adjtime | ||
364 | .long sys_syncfs | ||
361 | 365 | ||
362 | .rept NR_syscalls-(.-sys_call_table)/4 | 366 | .rept NR_syscalls-(.-sys_call_table)/4 |
363 | .long sys_ni_syscall | 367 | .long sys_ni_syscall |
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 851b3bf6e962..eccdefe70d4e 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
@@ -6,7 +6,6 @@ config MICROBLAZE | |||
6 | select HAVE_FUNCTION_GRAPH_TRACER | 6 | select HAVE_FUNCTION_GRAPH_TRACER |
7 | select HAVE_DYNAMIC_FTRACE | 7 | select HAVE_DYNAMIC_FTRACE |
8 | select HAVE_FTRACE_MCOUNT_RECORD | 8 | select HAVE_FTRACE_MCOUNT_RECORD |
9 | select USB_ARCH_HAS_EHCI | ||
10 | select ARCH_WANT_OPTIONAL_GPIOLIB | 9 | select ARCH_WANT_OPTIONAL_GPIOLIB |
11 | select HAVE_OPROFILE | 10 | select HAVE_OPROFILE |
12 | select HAVE_ARCH_KGDB | 11 | select HAVE_ARCH_KGDB |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index b6ff882f695b..8f4d50b0adfa 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -209,7 +209,7 @@ config ARCH_HIBERNATION_POSSIBLE | |||
209 | config ARCH_SUSPEND_POSSIBLE | 209 | config ARCH_SUSPEND_POSSIBLE |
210 | def_bool y | 210 | def_bool y |
211 | depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \ | 211 | depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \ |
212 | PPC_85xx || PPC_86xx || PPC_PSERIES || 44x || 40x | 212 | (PPC_85xx && !SMP) || PPC_86xx || PPC_PSERIES || 44x || 40x |
213 | 213 | ||
214 | config PPC_DCR_NATIVE | 214 | config PPC_DCR_NATIVE |
215 | bool | 215 | bool |
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index be3cdf9134ce..1833d1a07e79 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h | |||
@@ -382,10 +382,12 @@ extern const char *powerpc_base_platform; | |||
382 | #define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ | 382 | #define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ |
383 | CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \ | 383 | CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \ |
384 | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) | 384 | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) |
385 | #define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ | 385 | #define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ |
386 | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \ | ||
387 | CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ | 386 | CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ |
388 | CPU_FTR_DBELL) | 387 | CPU_FTR_DBELL) |
388 | #define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ | ||
389 | CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ | ||
390 | CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD) | ||
389 | #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) | 391 | #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) |
390 | 392 | ||
391 | /* 64-bit CPUs */ | 393 | /* 64-bit CPUs */ |
@@ -435,11 +437,15 @@ extern const char *powerpc_base_platform; | |||
435 | #define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) | 437 | #define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) |
436 | 438 | ||
437 | #ifdef __powerpc64__ | 439 | #ifdef __powerpc64__ |
440 | #ifdef CONFIG_PPC_BOOK3E | ||
441 | #define CPU_FTRS_POSSIBLE (CPU_FTRS_E5500) | ||
442 | #else | ||
438 | #define CPU_FTRS_POSSIBLE \ | 443 | #define CPU_FTRS_POSSIBLE \ |
439 | (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \ | 444 | (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \ |
440 | CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \ | 445 | CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \ |
441 | CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \ | 446 | CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \ |
442 | CPU_FTR_1T_SEGMENT | CPU_FTR_VSX) | 447 | CPU_FTR_1T_SEGMENT | CPU_FTR_VSX) |
448 | #endif | ||
443 | #else | 449 | #else |
444 | enum { | 450 | enum { |
445 | CPU_FTRS_POSSIBLE = | 451 | CPU_FTRS_POSSIBLE = |
@@ -473,16 +479,21 @@ enum { | |||
473 | #endif | 479 | #endif |
474 | #ifdef CONFIG_E500 | 480 | #ifdef CONFIG_E500 |
475 | CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC | | 481 | CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC | |
482 | CPU_FTRS_E5500 | | ||
476 | #endif | 483 | #endif |
477 | 0, | 484 | 0, |
478 | }; | 485 | }; |
479 | #endif /* __powerpc64__ */ | 486 | #endif /* __powerpc64__ */ |
480 | 487 | ||
481 | #ifdef __powerpc64__ | 488 | #ifdef __powerpc64__ |
489 | #ifdef CONFIG_PPC_BOOK3E | ||
490 | #define CPU_FTRS_ALWAYS (CPU_FTRS_E5500) | ||
491 | #else | ||
482 | #define CPU_FTRS_ALWAYS \ | 492 | #define CPU_FTRS_ALWAYS \ |
483 | (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \ | 493 | (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \ |
484 | CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \ | 494 | CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \ |
485 | CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE) | 495 | CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE) |
496 | #endif | ||
486 | #else | 497 | #else |
487 | enum { | 498 | enum { |
488 | CPU_FTRS_ALWAYS = | 499 | CPU_FTRS_ALWAYS = |
@@ -513,6 +524,7 @@ enum { | |||
513 | #endif | 524 | #endif |
514 | #ifdef CONFIG_E500 | 525 | #ifdef CONFIG_E500 |
515 | CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC & | 526 | CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC & |
527 | CPU_FTRS_E5500 & | ||
516 | #endif | 528 | #endif |
517 | CPU_FTRS_POSSIBLE, | 529 | CPU_FTRS_POSSIBLE, |
518 | }; | 530 | }; |
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h index 811f04ac3660..8d1569c29042 100644 --- a/arch/powerpc/include/asm/pte-common.h +++ b/arch/powerpc/include/asm/pte-common.h | |||
@@ -162,7 +162,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); | |||
162 | * on platforms where such control is possible. | 162 | * on platforms where such control is possible. |
163 | */ | 163 | */ |
164 | #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ | 164 | #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ |
165 | defined(CONFIG_KPROBES) | 165 | defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) |
166 | #define PAGE_KERNEL_TEXT PAGE_KERNEL_X | 166 | #define PAGE_KERNEL_TEXT PAGE_KERNEL_X |
167 | #else | 167 | #else |
168 | #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX | 168 | #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX |
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index c9b68d07ac4f..b9602ee06deb 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
@@ -1973,7 +1973,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
1973 | .pvr_mask = 0xffff0000, | 1973 | .pvr_mask = 0xffff0000, |
1974 | .pvr_value = 0x80240000, | 1974 | .pvr_value = 0x80240000, |
1975 | .cpu_name = "e5500", | 1975 | .cpu_name = "e5500", |
1976 | .cpu_features = CPU_FTRS_E500MC, | 1976 | .cpu_features = CPU_FTRS_E5500, |
1977 | .cpu_user_features = COMMON_USER_BOOKE, | 1977 | .cpu_user_features = COMMON_USER_BOOKE, |
1978 | .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | | 1978 | .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | |
1979 | MMU_FTR_USE_TLBILX, | 1979 | MMU_FTR_USE_TLBILX, |
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 3d3d416339dd..5b5e1f002a8e 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c | |||
@@ -163,7 +163,7 @@ static void crash_kexec_prepare_cpus(int cpu) | |||
163 | } | 163 | } |
164 | 164 | ||
165 | /* wait for all the CPUs to hit real mode but timeout if they don't come in */ | 165 | /* wait for all the CPUs to hit real mode but timeout if they don't come in */ |
166 | #if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) | 166 | #ifdef CONFIG_PPC_STD_MMU_64 |
167 | static void crash_kexec_wait_realmode(int cpu) | 167 | static void crash_kexec_wait_realmode(int cpu) |
168 | { | 168 | { |
169 | unsigned int msecs; | 169 | unsigned int msecs; |
@@ -188,9 +188,7 @@ static void crash_kexec_wait_realmode(int cpu) | |||
188 | } | 188 | } |
189 | mb(); | 189 | mb(); |
190 | } | 190 | } |
191 | #else | 191 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
192 | static inline void crash_kexec_wait_realmode(int cpu) {} | ||
193 | #endif | ||
194 | 192 | ||
195 | /* | 193 | /* |
196 | * This function will be called by secondary cpus or by kexec cpu | 194 | * This function will be called by secondary cpus or by kexec cpu |
@@ -235,7 +233,9 @@ void crash_kexec_secondary(struct pt_regs *regs) | |||
235 | crash_ipi_callback(regs); | 233 | crash_ipi_callback(regs); |
236 | } | 234 | } |
237 | 235 | ||
238 | #else | 236 | #else /* ! CONFIG_SMP */ |
237 | static inline void crash_kexec_wait_realmode(int cpu) {} | ||
238 | |||
239 | static void crash_kexec_prepare_cpus(int cpu) | 239 | static void crash_kexec_prepare_cpus(int cpu) |
240 | { | 240 | { |
241 | /* | 241 | /* |
@@ -255,7 +255,7 @@ void crash_kexec_secondary(struct pt_regs *regs) | |||
255 | { | 255 | { |
256 | cpus_in_sr = CPU_MASK_NONE; | 256 | cpus_in_sr = CPU_MASK_NONE; |
257 | } | 257 | } |
258 | #endif | 258 | #endif /* CONFIG_SMP */ |
259 | 259 | ||
260 | /* | 260 | /* |
261 | * Register a function to be called on shutdown. Only use this if you | 261 | * Register a function to be called on shutdown. Only use this if you |
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index c00d4ca1ee15..28581f1ad2c0 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c | |||
@@ -527,7 +527,7 @@ static int ibmebus_bus_pm_resume_noirq(struct device *dev) | |||
527 | 527 | ||
528 | #endif /* !CONFIG_SUSPEND */ | 528 | #endif /* !CONFIG_SUSPEND */ |
529 | 529 | ||
530 | #ifdef CONFIG_HIBERNATION | 530 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
531 | 531 | ||
532 | static int ibmebus_bus_pm_freeze(struct device *dev) | 532 | static int ibmebus_bus_pm_freeze(struct device *dev) |
533 | { | 533 | { |
@@ -665,7 +665,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev) | |||
665 | return ret; | 665 | return ret; |
666 | } | 666 | } |
667 | 667 | ||
668 | #else /* !CONFIG_HIBERNATION */ | 668 | #else /* !CONFIG_HIBERNATE_CALLBACKS */ |
669 | 669 | ||
670 | #define ibmebus_bus_pm_freeze NULL | 670 | #define ibmebus_bus_pm_freeze NULL |
671 | #define ibmebus_bus_pm_thaw NULL | 671 | #define ibmebus_bus_pm_thaw NULL |
@@ -676,7 +676,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev) | |||
676 | #define ibmebus_bus_pm_poweroff_noirq NULL | 676 | #define ibmebus_bus_pm_poweroff_noirq NULL |
677 | #define ibmebus_bus_pm_restore_noirq NULL | 677 | #define ibmebus_bus_pm_restore_noirq NULL |
678 | 678 | ||
679 | #endif /* !CONFIG_HIBERNATION */ | 679 | #endif /* !CONFIG_HIBERNATE_CALLBACKS */ |
680 | 680 | ||
681 | static struct dev_pm_ops ibmebus_bus_dev_pm_ops = { | 681 | static struct dev_pm_ops ibmebus_bus_dev_pm_ops = { |
682 | .prepare = ibmebus_bus_pm_prepare, | 682 | .prepare = ibmebus_bus_pm_prepare, |
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index c834757bebc0..2b97b80d6d7d 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c | |||
@@ -330,9 +330,11 @@ void __init find_legacy_serial_ports(void) | |||
330 | if (!parent) | 330 | if (!parent) |
331 | continue; | 331 | continue; |
332 | if (of_match_node(legacy_serial_parents, parent) != NULL) { | 332 | if (of_match_node(legacy_serial_parents, parent) != NULL) { |
333 | index = add_legacy_soc_port(np, np); | 333 | if (of_device_is_available(np)) { |
334 | if (index >= 0 && np == stdout) | 334 | index = add_legacy_soc_port(np, np); |
335 | legacy_serial_console = index; | 335 | if (index >= 0 && np == stdout) |
336 | legacy_serial_console = index; | ||
337 | } | ||
336 | } | 338 | } |
337 | of_node_put(parent); | 339 | of_node_put(parent); |
338 | } | 340 | } |
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index c4063b7f49a0..822f63008ae1 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
@@ -398,6 +398,25 @@ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[], | |||
398 | return 0; | 398 | return 0; |
399 | } | 399 | } |
400 | 400 | ||
401 | static u64 check_and_compute_delta(u64 prev, u64 val) | ||
402 | { | ||
403 | u64 delta = (val - prev) & 0xfffffffful; | ||
404 | |||
405 | /* | ||
406 | * POWER7 can roll back counter values, if the new value is smaller | ||
407 | * than the previous value it will cause the delta and the counter to | ||
408 | * have bogus values unless we rolled a counter over. If a coutner is | ||
409 | * rolled back, it will be smaller, but within 256, which is the maximum | ||
410 | * number of events to rollback at once. If we dectect a rollback | ||
411 | * return 0. This can lead to a small lack of precision in the | ||
412 | * counters. | ||
413 | */ | ||
414 | if (prev > val && (prev - val) < 256) | ||
415 | delta = 0; | ||
416 | |||
417 | return delta; | ||
418 | } | ||
419 | |||
401 | static void power_pmu_read(struct perf_event *event) | 420 | static void power_pmu_read(struct perf_event *event) |
402 | { | 421 | { |
403 | s64 val, delta, prev; | 422 | s64 val, delta, prev; |
@@ -416,10 +435,11 @@ static void power_pmu_read(struct perf_event *event) | |||
416 | prev = local64_read(&event->hw.prev_count); | 435 | prev = local64_read(&event->hw.prev_count); |
417 | barrier(); | 436 | barrier(); |
418 | val = read_pmc(event->hw.idx); | 437 | val = read_pmc(event->hw.idx); |
438 | delta = check_and_compute_delta(prev, val); | ||
439 | if (!delta) | ||
440 | return; | ||
419 | } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); | 441 | } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); |
420 | 442 | ||
421 | /* The counters are only 32 bits wide */ | ||
422 | delta = (val - prev) & 0xfffffffful; | ||
423 | local64_add(delta, &event->count); | 443 | local64_add(delta, &event->count); |
424 | local64_sub(delta, &event->hw.period_left); | 444 | local64_sub(delta, &event->hw.period_left); |
425 | } | 445 | } |
@@ -449,8 +469,9 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw, | |||
449 | val = (event->hw.idx == 5) ? pmc5 : pmc6; | 469 | val = (event->hw.idx == 5) ? pmc5 : pmc6; |
450 | prev = local64_read(&event->hw.prev_count); | 470 | prev = local64_read(&event->hw.prev_count); |
451 | event->hw.idx = 0; | 471 | event->hw.idx = 0; |
452 | delta = (val - prev) & 0xfffffffful; | 472 | delta = check_and_compute_delta(prev, val); |
453 | local64_add(delta, &event->count); | 473 | if (delta) |
474 | local64_add(delta, &event->count); | ||
454 | } | 475 | } |
455 | } | 476 | } |
456 | 477 | ||
@@ -458,14 +479,16 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw, | |||
458 | unsigned long pmc5, unsigned long pmc6) | 479 | unsigned long pmc5, unsigned long pmc6) |
459 | { | 480 | { |
460 | struct perf_event *event; | 481 | struct perf_event *event; |
461 | u64 val; | 482 | u64 val, prev; |
462 | int i; | 483 | int i; |
463 | 484 | ||
464 | for (i = 0; i < cpuhw->n_limited; ++i) { | 485 | for (i = 0; i < cpuhw->n_limited; ++i) { |
465 | event = cpuhw->limited_counter[i]; | 486 | event = cpuhw->limited_counter[i]; |
466 | event->hw.idx = cpuhw->limited_hwidx[i]; | 487 | event->hw.idx = cpuhw->limited_hwidx[i]; |
467 | val = (event->hw.idx == 5) ? pmc5 : pmc6; | 488 | val = (event->hw.idx == 5) ? pmc5 : pmc6; |
468 | local64_set(&event->hw.prev_count, val); | 489 | prev = local64_read(&event->hw.prev_count); |
490 | if (check_and_compute_delta(prev, val)) | ||
491 | local64_set(&event->hw.prev_count, val); | ||
469 | perf_event_update_userpage(event); | 492 | perf_event_update_userpage(event); |
470 | } | 493 | } |
471 | } | 494 | } |
@@ -1197,7 +1220,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
1197 | 1220 | ||
1198 | /* we don't have to worry about interrupts here */ | 1221 | /* we don't have to worry about interrupts here */ |
1199 | prev = local64_read(&event->hw.prev_count); | 1222 | prev = local64_read(&event->hw.prev_count); |
1200 | delta = (val - prev) & 0xfffffffful; | 1223 | delta = check_and_compute_delta(prev, val); |
1201 | local64_add(delta, &event->count); | 1224 | local64_add(delta, &event->count); |
1202 | 1225 | ||
1203 | /* | 1226 | /* |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 375480c56eb9..f33acfd872ad 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -229,6 +229,9 @@ static u64 scan_dispatch_log(u64 stop_tb) | |||
229 | u64 stolen = 0; | 229 | u64 stolen = 0; |
230 | u64 dtb; | 230 | u64 dtb; |
231 | 231 | ||
232 | if (!dtl) | ||
233 | return 0; | ||
234 | |||
232 | if (i == vpa->dtl_idx) | 235 | if (i == vpa->dtl_idx) |
233 | return 0; | 236 | return 0; |
234 | while (i < vpa->dtl_idx) { | 237 | while (i < vpa->dtl_idx) { |
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index a830c5e80657..bc5f0dc6ae1e 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c | |||
@@ -842,6 +842,7 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr) | |||
842 | mpic_setup_this_cpu(); | 842 | mpic_setup_this_cpu(); |
843 | } | 843 | } |
844 | 844 | ||
845 | #ifdef CONFIG_PPC64 | ||
845 | #ifdef CONFIG_HOTPLUG_CPU | 846 | #ifdef CONFIG_HOTPLUG_CPU |
846 | static int smp_core99_cpu_notify(struct notifier_block *self, | 847 | static int smp_core99_cpu_notify(struct notifier_block *self, |
847 | unsigned long action, void *hcpu) | 848 | unsigned long action, void *hcpu) |
@@ -879,7 +880,6 @@ static struct notifier_block __cpuinitdata smp_core99_cpu_nb = { | |||
879 | 880 | ||
880 | static void __init smp_core99_bringup_done(void) | 881 | static void __init smp_core99_bringup_done(void) |
881 | { | 882 | { |
882 | #ifdef CONFIG_PPC64 | ||
883 | extern void g5_phy_disable_cpu1(void); | 883 | extern void g5_phy_disable_cpu1(void); |
884 | 884 | ||
885 | /* Close i2c bus if it was used for tb sync */ | 885 | /* Close i2c bus if it was used for tb sync */ |
@@ -894,14 +894,14 @@ static void __init smp_core99_bringup_done(void) | |||
894 | set_cpu_present(1, false); | 894 | set_cpu_present(1, false); |
895 | g5_phy_disable_cpu1(); | 895 | g5_phy_disable_cpu1(); |
896 | } | 896 | } |
897 | #endif /* CONFIG_PPC64 */ | ||
898 | |||
899 | #ifdef CONFIG_HOTPLUG_CPU | 897 | #ifdef CONFIG_HOTPLUG_CPU |
900 | register_cpu_notifier(&smp_core99_cpu_nb); | 898 | register_cpu_notifier(&smp_core99_cpu_nb); |
901 | #endif | 899 | #endif |
900 | |||
902 | if (ppc_md.progress) | 901 | if (ppc_md.progress) |
903 | ppc_md.progress("smp_core99_bringup_done", 0x349); | 902 | ppc_md.progress("smp_core99_bringup_done", 0x349); |
904 | } | 903 | } |
904 | #endif /* CONFIG_PPC64 */ | ||
905 | 905 | ||
906 | #ifdef CONFIG_HOTPLUG_CPU | 906 | #ifdef CONFIG_HOTPLUG_CPU |
907 | 907 | ||
@@ -975,7 +975,9 @@ static void pmac_cpu_die(void) | |||
975 | struct smp_ops_t core99_smp_ops = { | 975 | struct smp_ops_t core99_smp_ops = { |
976 | .message_pass = smp_mpic_message_pass, | 976 | .message_pass = smp_mpic_message_pass, |
977 | .probe = smp_core99_probe, | 977 | .probe = smp_core99_probe, |
978 | #ifdef CONFIG_PPC64 | ||
978 | .bringup_done = smp_core99_bringup_done, | 979 | .bringup_done = smp_core99_bringup_done, |
980 | #endif | ||
979 | .kick_cpu = smp_core99_kick_cpu, | 981 | .kick_cpu = smp_core99_kick_cpu, |
980 | .setup_cpu = smp_core99_setup_cpu, | 982 | .setup_cpu = smp_core99_setup_cpu, |
981 | .give_timebase = smp_core99_give_timebase, | 983 | .give_timebase = smp_core99_give_timebase, |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 000724149089..6c42cfde8415 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -287,14 +287,22 @@ static int alloc_dispatch_logs(void) | |||
287 | int cpu, ret; | 287 | int cpu, ret; |
288 | struct paca_struct *pp; | 288 | struct paca_struct *pp; |
289 | struct dtl_entry *dtl; | 289 | struct dtl_entry *dtl; |
290 | struct kmem_cache *dtl_cache; | ||
290 | 291 | ||
291 | if (!firmware_has_feature(FW_FEATURE_SPLPAR)) | 292 | if (!firmware_has_feature(FW_FEATURE_SPLPAR)) |
292 | return 0; | 293 | return 0; |
293 | 294 | ||
295 | dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES, | ||
296 | DISPATCH_LOG_BYTES, 0, NULL); | ||
297 | if (!dtl_cache) { | ||
298 | pr_warn("Failed to create dispatch trace log buffer cache\n"); | ||
299 | pr_warn("Stolen time statistics will be unreliable\n"); | ||
300 | return 0; | ||
301 | } | ||
302 | |||
294 | for_each_possible_cpu(cpu) { | 303 | for_each_possible_cpu(cpu) { |
295 | pp = &paca[cpu]; | 304 | pp = &paca[cpu]; |
296 | dtl = kmalloc_node(DISPATCH_LOG_BYTES, GFP_KERNEL, | 305 | dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL); |
297 | cpu_to_node(cpu)); | ||
298 | if (!dtl) { | 306 | if (!dtl) { |
299 | pr_warn("Failed to allocate dispatch trace log for cpu %d\n", | 307 | pr_warn("Failed to allocate dispatch trace log for cpu %d\n", |
300 | cpu); | 308 | cpu); |
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index f8f7f28c6343..68ca9290df94 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c | |||
@@ -324,6 +324,11 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary) | |||
324 | struct resource rsrc; | 324 | struct resource rsrc; |
325 | const int *bus_range; | 325 | const int *bus_range; |
326 | 326 | ||
327 | if (!of_device_is_available(dev)) { | ||
328 | pr_warning("%s: disabled\n", dev->full_name); | ||
329 | return -ENODEV; | ||
330 | } | ||
331 | |||
327 | pr_debug("Adding PCI host bridge %s\n", dev->full_name); | 332 | pr_debug("Adding PCI host bridge %s\n", dev->full_name); |
328 | 333 | ||
329 | /* Fetch host bridge registers address */ | 334 | /* Fetch host bridge registers address */ |
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 14232d57369c..49798532b477 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c | |||
@@ -1457,7 +1457,6 @@ int fsl_rio_setup(struct platform_device *dev) | |||
1457 | port->ops = ops; | 1457 | port->ops = ops; |
1458 | port->priv = priv; | 1458 | port->priv = priv; |
1459 | port->phys_efptr = 0x100; | 1459 | port->phys_efptr = 0x100; |
1460 | rio_register_mport(port); | ||
1461 | 1460 | ||
1462 | priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1); | 1461 | priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1); |
1463 | rio_regs_win = priv->regs_win; | 1462 | rio_regs_win = priv->regs_win; |
@@ -1504,6 +1503,9 @@ int fsl_rio_setup(struct platform_device *dev) | |||
1504 | dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n", | 1503 | dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n", |
1505 | port->sys_size ? 65536 : 256); | 1504 | port->sys_size ? 65536 : 256); |
1506 | 1505 | ||
1506 | if (rio_register_mport(port)) | ||
1507 | goto err; | ||
1508 | |||
1507 | if (port->host_deviceid >= 0) | 1509 | if (port->host_deviceid >= 0) |
1508 | out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST | | 1510 | out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST | |
1509 | RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED); | 1511 | RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED); |
diff --git a/arch/um/Kconfig.x86 b/arch/um/Kconfig.x86 index 02fb017fed47..a9da516a5274 100644 --- a/arch/um/Kconfig.x86 +++ b/arch/um/Kconfig.x86 | |||
@@ -4,6 +4,10 @@ menu "UML-specific options" | |||
4 | 4 | ||
5 | menu "Host processor type and features" | 5 | menu "Host processor type and features" |
6 | 6 | ||
7 | config CMPXCHG_LOCAL | ||
8 | bool | ||
9 | default n | ||
10 | |||
7 | source "arch/x86/Kconfig.cpu" | 11 | source "arch/x86/Kconfig.cpu" |
8 | 12 | ||
9 | endmenu | 13 | endmenu |
diff --git a/arch/um/include/asm/bug.h b/arch/um/include/asm/bug.h new file mode 100644 index 000000000000..9e33b864c359 --- /dev/null +++ b/arch/um/include/asm/bug.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef __UM_BUG_H | ||
2 | #define __UM_BUG_H | ||
3 | |||
4 | #include <asm-generic/bug.h> | ||
5 | |||
6 | #endif | ||
diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h index 43085bfc99c3..156cd5d18d2a 100644 --- a/arch/x86/include/asm/gart.h +++ b/arch/x86/include/asm/gart.h | |||
@@ -66,7 +66,7 @@ static inline void gart_set_size_and_enable(struct pci_dev *dev, u32 order) | |||
66 | * Don't enable translation but enable GART IO and CPU accesses. | 66 | * Don't enable translation but enable GART IO and CPU accesses. |
67 | * Also, set DISTLBWALKPRB since GART tables memory is UC. | 67 | * Also, set DISTLBWALKPRB since GART tables memory is UC. |
68 | */ | 68 | */ |
69 | ctl = DISTLBWALKPRB | order << 1; | 69 | ctl = order << 1; |
70 | 70 | ||
71 | pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); | 71 | pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); |
72 | } | 72 | } |
@@ -75,17 +75,17 @@ static inline void enable_gart_translation(struct pci_dev *dev, u64 addr) | |||
75 | { | 75 | { |
76 | u32 tmp, ctl; | 76 | u32 tmp, ctl; |
77 | 77 | ||
78 | /* address of the mappings table */ | 78 | /* address of the mappings table */ |
79 | addr >>= 12; | 79 | addr >>= 12; |
80 | tmp = (u32) addr<<4; | 80 | tmp = (u32) addr<<4; |
81 | tmp &= ~0xf; | 81 | tmp &= ~0xf; |
82 | pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp); | 82 | pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp); |
83 | 83 | ||
84 | /* Enable GART translation for this hammer. */ | 84 | /* Enable GART translation for this hammer. */ |
85 | pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); | 85 | pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); |
86 | ctl |= GARTEN; | 86 | ctl |= GARTEN | DISTLBWALKPRB; |
87 | ctl &= ~(DISGARTCPU | DISGARTIO); | 87 | ctl &= ~(DISGARTCPU | DISGARTIO); |
88 | pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); | 88 | pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); |
89 | } | 89 | } |
90 | 90 | ||
91 | static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size) | 91 | static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size) |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index fd5a1f365c95..3cce71413d0b 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -96,11 +96,15 @@ | |||
96 | #define MSR_IA32_MC0_ADDR 0x00000402 | 96 | #define MSR_IA32_MC0_ADDR 0x00000402 |
97 | #define MSR_IA32_MC0_MISC 0x00000403 | 97 | #define MSR_IA32_MC0_MISC 0x00000403 |
98 | 98 | ||
99 | #define MSR_AMD64_MC0_MASK 0xc0010044 | ||
100 | |||
99 | #define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) | 101 | #define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) |
100 | #define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x)) | 102 | #define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x)) |
101 | #define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x)) | 103 | #define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x)) |
102 | #define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x)) | 104 | #define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x)) |
103 | 105 | ||
106 | #define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x)) | ||
107 | |||
104 | /* These are consecutive and not in the normal 4er MCE bank block */ | 108 | /* These are consecutive and not in the normal 4er MCE bank block */ |
105 | #define MSR_IA32_MC0_CTL2 0x00000280 | 109 | #define MSR_IA32_MC0_CTL2 0x00000280 |
106 | #define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x)) | 110 | #define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x)) |
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 86d1ad4962a7..73fb469908c6 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -499,7 +499,7 @@ out: | |||
499 | * Don't enable translation yet but enable GART IO and CPU | 499 | * Don't enable translation yet but enable GART IO and CPU |
500 | * accesses and set DISTLBWALKPRB since GART table memory is UC. | 500 | * accesses and set DISTLBWALKPRB since GART table memory is UC. |
501 | */ | 501 | */ |
502 | u32 ctl = DISTLBWALKPRB | aper_order << 1; | 502 | u32 ctl = aper_order << 1; |
503 | 503 | ||
504 | bus = amd_nb_bus_dev_ranges[i].bus; | 504 | bus = amd_nb_bus_dev_ranges[i].bus; |
505 | dev_base = amd_nb_bus_dev_ranges[i].dev_base; | 505 | dev_base = amd_nb_bus_dev_ranges[i].dev_base; |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 3ecece0217ef..3532d3bf8105 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -615,6 +615,25 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
615 | /* As a rule processors have APIC timer running in deep C states */ | 615 | /* As a rule processors have APIC timer running in deep C states */ |
616 | if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) | 616 | if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) |
617 | set_cpu_cap(c, X86_FEATURE_ARAT); | 617 | set_cpu_cap(c, X86_FEATURE_ARAT); |
618 | |||
619 | /* | ||
620 | * Disable GART TLB Walk Errors on Fam10h. We do this here | ||
621 | * because this is always needed when GART is enabled, even in a | ||
622 | * kernel which has no MCE support built in. | ||
623 | */ | ||
624 | if (c->x86 == 0x10) { | ||
625 | /* | ||
626 | * BIOS should disable GartTlbWlk Errors themself. If | ||
627 | * it doesn't do it here as suggested by the BKDG. | ||
628 | * | ||
629 | * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 | ||
630 | */ | ||
631 | u64 mask; | ||
632 | |||
633 | rdmsrl(MSR_AMD64_MCx_MASK(4), mask); | ||
634 | mask |= (1 << 10); | ||
635 | wrmsrl(MSR_AMD64_MCx_MASK(4), mask); | ||
636 | } | ||
618 | } | 637 | } |
619 | 638 | ||
620 | #ifdef CONFIG_X86_32 | 639 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 461f62bbd774..cf4e369cea67 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -8,7 +8,7 @@ static __initconst const u64 amd_hw_cache_event_ids | |||
8 | [ C(L1D) ] = { | 8 | [ C(L1D) ] = { |
9 | [ C(OP_READ) ] = { | 9 | [ C(OP_READ) ] = { |
10 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ | 10 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ |
11 | [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */ | 11 | [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */ |
12 | }, | 12 | }, |
13 | [ C(OP_WRITE) ] = { | 13 | [ C(OP_WRITE) ] = { |
14 | [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */ | 14 | [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */ |
@@ -427,7 +427,9 @@ static __initconst const struct x86_pmu amd_pmu = { | |||
427 | * | 427 | * |
428 | * Exceptions: | 428 | * Exceptions: |
429 | * | 429 | * |
430 | * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*) | ||
430 | * 0x003 FP PERF_CTL[3] | 431 | * 0x003 FP PERF_CTL[3] |
432 | * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*) | ||
431 | * 0x00B FP PERF_CTL[3] | 433 | * 0x00B FP PERF_CTL[3] |
432 | * 0x00D FP PERF_CTL[3] | 434 | * 0x00D FP PERF_CTL[3] |
433 | * 0x023 DE PERF_CTL[2:0] | 435 | * 0x023 DE PERF_CTL[2:0] |
@@ -448,6 +450,8 @@ static __initconst const struct x86_pmu amd_pmu = { | |||
448 | * 0x0DF LS PERF_CTL[5:0] | 450 | * 0x0DF LS PERF_CTL[5:0] |
449 | * 0x1D6 EX PERF_CTL[5:0] | 451 | * 0x1D6 EX PERF_CTL[5:0] |
450 | * 0x1D8 EX PERF_CTL[5:0] | 452 | * 0x1D8 EX PERF_CTL[5:0] |
453 | * | ||
454 | * (*) depending on the umask all FPU counters may be used | ||
451 | */ | 455 | */ |
452 | 456 | ||
453 | static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0); | 457 | static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0); |
@@ -460,18 +464,28 @@ static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); | |||
460 | static struct event_constraint * | 464 | static struct event_constraint * |
461 | amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) | 465 | amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) |
462 | { | 466 | { |
463 | unsigned int event_code = amd_get_event_code(&event->hw); | 467 | struct hw_perf_event *hwc = &event->hw; |
468 | unsigned int event_code = amd_get_event_code(hwc); | ||
464 | 469 | ||
465 | switch (event_code & AMD_EVENT_TYPE_MASK) { | 470 | switch (event_code & AMD_EVENT_TYPE_MASK) { |
466 | case AMD_EVENT_FP: | 471 | case AMD_EVENT_FP: |
467 | switch (event_code) { | 472 | switch (event_code) { |
473 | case 0x000: | ||
474 | if (!(hwc->config & 0x0000F000ULL)) | ||
475 | break; | ||
476 | if (!(hwc->config & 0x00000F00ULL)) | ||
477 | break; | ||
478 | return &amd_f15_PMC3; | ||
479 | case 0x004: | ||
480 | if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1) | ||
481 | break; | ||
482 | return &amd_f15_PMC3; | ||
468 | case 0x003: | 483 | case 0x003: |
469 | case 0x00B: | 484 | case 0x00B: |
470 | case 0x00D: | 485 | case 0x00D: |
471 | return &amd_f15_PMC3; | 486 | return &amd_f15_PMC3; |
472 | default: | ||
473 | return &amd_f15_PMC53; | ||
474 | } | 487 | } |
488 | return &amd_f15_PMC53; | ||
475 | case AMD_EVENT_LS: | 489 | case AMD_EVENT_LS: |
476 | case AMD_EVENT_DC: | 490 | case AMD_EVENT_DC: |
477 | case AMD_EVENT_EX_LS: | 491 | case AMD_EVENT_EX_LS: |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 82ada01625b9..b117efd24f71 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -81,6 +81,9 @@ static u32 gart_unmapped_entry; | |||
81 | #define AGPEXTERN | 81 | #define AGPEXTERN |
82 | #endif | 82 | #endif |
83 | 83 | ||
84 | /* GART can only remap to physical addresses < 1TB */ | ||
85 | #define GART_MAX_PHYS_ADDR (1ULL << 40) | ||
86 | |||
84 | /* backdoor interface to AGP driver */ | 87 | /* backdoor interface to AGP driver */ |
85 | AGPEXTERN int agp_memory_reserved; | 88 | AGPEXTERN int agp_memory_reserved; |
86 | AGPEXTERN __u32 *agp_gatt_table; | 89 | AGPEXTERN __u32 *agp_gatt_table; |
@@ -212,9 +215,13 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, | |||
212 | size_t size, int dir, unsigned long align_mask) | 215 | size_t size, int dir, unsigned long align_mask) |
213 | { | 216 | { |
214 | unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE); | 217 | unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE); |
215 | unsigned long iommu_page = alloc_iommu(dev, npages, align_mask); | 218 | unsigned long iommu_page; |
216 | int i; | 219 | int i; |
217 | 220 | ||
221 | if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR)) | ||
222 | return bad_dma_addr; | ||
223 | |||
224 | iommu_page = alloc_iommu(dev, npages, align_mask); | ||
218 | if (iommu_page == -1) { | 225 | if (iommu_page == -1) { |
219 | if (!nonforced_iommu(dev, phys_mem, size)) | 226 | if (!nonforced_iommu(dev, phys_mem, size)) |
220 | return phys_mem; | 227 | return phys_mem; |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index c2871d3c71b6..8ed8908cc9f7 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -312,6 +312,26 @@ void __cpuinit smp_store_cpu_info(int id) | |||
312 | identify_secondary_cpu(c); | 312 | identify_secondary_cpu(c); |
313 | } | 313 | } |
314 | 314 | ||
315 | static void __cpuinit check_cpu_siblings_on_same_node(int cpu1, int cpu2) | ||
316 | { | ||
317 | int node1 = early_cpu_to_node(cpu1); | ||
318 | int node2 = early_cpu_to_node(cpu2); | ||
319 | |||
320 | /* | ||
321 | * Our CPU scheduler assumes all logical cpus in the same physical cpu | ||
322 | * share the same node. But, buggy ACPI or NUMA emulation might assign | ||
323 | * them to different node. Fix it. | ||
324 | */ | ||
325 | if (node1 != node2) { | ||
326 | pr_warning("CPU %d in node %d and CPU %d in node %d are in the same physical CPU. forcing same node %d\n", | ||
327 | cpu1, node1, cpu2, node2, node2); | ||
328 | |||
329 | numa_remove_cpu(cpu1); | ||
330 | numa_set_node(cpu1, node2); | ||
331 | numa_add_cpu(cpu1); | ||
332 | } | ||
333 | } | ||
334 | |||
315 | static void __cpuinit link_thread_siblings(int cpu1, int cpu2) | 335 | static void __cpuinit link_thread_siblings(int cpu1, int cpu2) |
316 | { | 336 | { |
317 | cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2)); | 337 | cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2)); |
@@ -320,6 +340,7 @@ static void __cpuinit link_thread_siblings(int cpu1, int cpu2) | |||
320 | cpumask_set_cpu(cpu2, cpu_core_mask(cpu1)); | 340 | cpumask_set_cpu(cpu2, cpu_core_mask(cpu1)); |
321 | cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2)); | 341 | cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2)); |
322 | cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1)); | 342 | cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1)); |
343 | check_cpu_siblings_on_same_node(cpu1, cpu2); | ||
323 | } | 344 | } |
324 | 345 | ||
325 | 346 | ||
@@ -361,10 +382,12 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
361 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { | 382 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { |
362 | cpumask_set_cpu(i, cpu_llc_shared_mask(cpu)); | 383 | cpumask_set_cpu(i, cpu_llc_shared_mask(cpu)); |
363 | cpumask_set_cpu(cpu, cpu_llc_shared_mask(i)); | 384 | cpumask_set_cpu(cpu, cpu_llc_shared_mask(i)); |
385 | check_cpu_siblings_on_same_node(cpu, i); | ||
364 | } | 386 | } |
365 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { | 387 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { |
366 | cpumask_set_cpu(i, cpu_core_mask(cpu)); | 388 | cpumask_set_cpu(i, cpu_core_mask(cpu)); |
367 | cpumask_set_cpu(cpu, cpu_core_mask(i)); | 389 | cpumask_set_cpu(cpu, cpu_core_mask(i)); |
390 | check_cpu_siblings_on_same_node(cpu, i); | ||
368 | /* | 391 | /* |
369 | * Does this new cpu bringup a new core? | 392 | * Does this new cpu bringup a new core? |
370 | */ | 393 | */ |
diff --git a/arch/x86/platform/ce4100/falconfalls.dts b/arch/x86/platform/ce4100/falconfalls.dts index dc701ea58546..2d6d226f2b10 100644 --- a/arch/x86/platform/ce4100/falconfalls.dts +++ b/arch/x86/platform/ce4100/falconfalls.dts | |||
@@ -74,6 +74,7 @@ | |||
74 | compatible = "intel,ce4100-pci", "pci"; | 74 | compatible = "intel,ce4100-pci", "pci"; |
75 | device_type = "pci"; | 75 | device_type = "pci"; |
76 | bus-range = <1 1>; | 76 | bus-range = <1 1>; |
77 | reg = <0x0800 0x0 0x0 0x0 0x0>; | ||
77 | ranges = <0x2000000 0 0xdffe0000 0x2000000 0 0xdffe0000 0 0x1000>; | 78 | ranges = <0x2000000 0 0xdffe0000 0x2000000 0 0xdffe0000 0 0x1000>; |
78 | 79 | ||
79 | interrupt-parent = <&ioapic2>; | 80 | interrupt-parent = <&ioapic2>; |
@@ -412,6 +413,7 @@ | |||
412 | #address-cells = <2>; | 413 | #address-cells = <2>; |
413 | #size-cells = <1>; | 414 | #size-cells = <1>; |
414 | compatible = "isa"; | 415 | compatible = "isa"; |
416 | reg = <0xf800 0x0 0x0 0x0 0x0>; | ||
415 | ranges = <1 0 0 0 0 0x100>; | 417 | ranges = <1 0 0 0 0 0x100>; |
416 | 418 | ||
417 | rtc@70 { | 419 | rtc@70 { |
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index 5c0207bf959b..275dbc19e2cf 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c | |||
@@ -97,11 +97,11 @@ static int __init sfi_parse_mtmr(struct sfi_table_header *table) | |||
97 | pentry->freq_hz, pentry->irq); | 97 | pentry->freq_hz, pentry->irq); |
98 | if (!pentry->irq) | 98 | if (!pentry->irq) |
99 | continue; | 99 | continue; |
100 | mp_irq.type = MP_IOAPIC; | 100 | mp_irq.type = MP_INTSRC; |
101 | mp_irq.irqtype = mp_INT; | 101 | mp_irq.irqtype = mp_INT; |
102 | /* triggering mode edge bit 2-3, active high polarity bit 0-1 */ | 102 | /* triggering mode edge bit 2-3, active high polarity bit 0-1 */ |
103 | mp_irq.irqflag = 5; | 103 | mp_irq.irqflag = 5; |
104 | mp_irq.srcbus = 0; | 104 | mp_irq.srcbus = MP_BUS_ISA; |
105 | mp_irq.srcbusirq = pentry->irq; /* IRQ */ | 105 | mp_irq.srcbusirq = pentry->irq; /* IRQ */ |
106 | mp_irq.dstapic = MP_APIC_ALL; | 106 | mp_irq.dstapic = MP_APIC_ALL; |
107 | mp_irq.dstirq = pentry->irq; | 107 | mp_irq.dstirq = pentry->irq; |
@@ -168,10 +168,10 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table) | |||
168 | for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) { | 168 | for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) { |
169 | pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n", | 169 | pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n", |
170 | totallen, (u32)pentry->phys_addr, pentry->irq); | 170 | totallen, (u32)pentry->phys_addr, pentry->irq); |
171 | mp_irq.type = MP_IOAPIC; | 171 | mp_irq.type = MP_INTSRC; |
172 | mp_irq.irqtype = mp_INT; | 172 | mp_irq.irqtype = mp_INT; |
173 | mp_irq.irqflag = 0xf; /* level trigger and active low */ | 173 | mp_irq.irqflag = 0xf; /* level trigger and active low */ |
174 | mp_irq.srcbus = 0; | 174 | mp_irq.srcbus = MP_BUS_ISA; |
175 | mp_irq.srcbusirq = pentry->irq; /* IRQ */ | 175 | mp_irq.srcbusirq = pentry->irq; /* IRQ */ |
176 | mp_irq.dstapic = MP_APIC_ALL; | 176 | mp_irq.dstapic = MP_APIC_ALL; |
177 | mp_irq.dstirq = pentry->irq; | 177 | mp_irq.dstirq = pentry->irq; |
@@ -282,7 +282,7 @@ void __init x86_mrst_early_setup(void) | |||
282 | /* Avoid searching for BIOS MP tables */ | 282 | /* Avoid searching for BIOS MP tables */ |
283 | x86_init.mpparse.find_smp_config = x86_init_noop; | 283 | x86_init.mpparse.find_smp_config = x86_init_noop; |
284 | x86_init.mpparse.get_smp_config = x86_init_uint_noop; | 284 | x86_init.mpparse.get_smp_config = x86_init_uint_noop; |
285 | 285 | set_bit(MP_BUS_ISA, mp_bus_not_pci); | |
286 | } | 286 | } |
287 | 287 | ||
288 | /* | 288 | /* |
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig index 1c7121ba18ff..5cc821cb2e09 100644 --- a/arch/x86/xen/Kconfig +++ b/arch/x86/xen/Kconfig | |||
@@ -39,6 +39,7 @@ config XEN_MAX_DOMAIN_MEMORY | |||
39 | config XEN_SAVE_RESTORE | 39 | config XEN_SAVE_RESTORE |
40 | bool | 40 | bool |
41 | depends on XEN | 41 | depends on XEN |
42 | select HIBERNATE_CALLBACKS | ||
42 | default y | 43 | default y |
43 | 44 | ||
44 | config XEN_DEBUG_FS | 45 | config XEN_DEBUG_FS |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 49dbd78ec3cb..e3c6a06cf725 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -238,6 +238,7 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx, | |||
238 | static __init void xen_init_cpuid_mask(void) | 238 | static __init void xen_init_cpuid_mask(void) |
239 | { | 239 | { |
240 | unsigned int ax, bx, cx, dx; | 240 | unsigned int ax, bx, cx, dx; |
241 | unsigned int xsave_mask; | ||
241 | 242 | ||
242 | cpuid_leaf1_edx_mask = | 243 | cpuid_leaf1_edx_mask = |
243 | ~((1 << X86_FEATURE_MCE) | /* disable MCE */ | 244 | ~((1 << X86_FEATURE_MCE) | /* disable MCE */ |
@@ -249,24 +250,16 @@ static __init void xen_init_cpuid_mask(void) | |||
249 | cpuid_leaf1_edx_mask &= | 250 | cpuid_leaf1_edx_mask &= |
250 | ~((1 << X86_FEATURE_APIC) | /* disable local APIC */ | 251 | ~((1 << X86_FEATURE_APIC) | /* disable local APIC */ |
251 | (1 << X86_FEATURE_ACPI)); /* disable ACPI */ | 252 | (1 << X86_FEATURE_ACPI)); /* disable ACPI */ |
252 | |||
253 | ax = 1; | 253 | ax = 1; |
254 | cx = 0; | ||
255 | xen_cpuid(&ax, &bx, &cx, &dx); | 254 | xen_cpuid(&ax, &bx, &cx, &dx); |
256 | 255 | ||
257 | /* cpuid claims we support xsave; try enabling it to see what happens */ | 256 | xsave_mask = |
258 | if (cx & (1 << (X86_FEATURE_XSAVE % 32))) { | 257 | (1 << (X86_FEATURE_XSAVE % 32)) | |
259 | unsigned long cr4; | 258 | (1 << (X86_FEATURE_OSXSAVE % 32)); |
260 | |||
261 | set_in_cr4(X86_CR4_OSXSAVE); | ||
262 | |||
263 | cr4 = read_cr4(); | ||
264 | 259 | ||
265 | if ((cr4 & X86_CR4_OSXSAVE) == 0) | 260 | /* Xen will set CR4.OSXSAVE if supported and not disabled by force */ |
266 | cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32)); | 261 | if ((cx & xsave_mask) != xsave_mask) |
267 | 262 | cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */ | |
268 | clear_in_cr4(X86_CR4_OSXSAVE); | ||
269 | } | ||
270 | } | 263 | } |
271 | 264 | ||
272 | static void xen_set_debugreg(int reg, unsigned long val) | 265 | static void xen_set_debugreg(int reg, unsigned long val) |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index c82df6c9c0f0..aef7af92b28b 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -565,13 +565,13 @@ pte_t xen_make_pte_debug(pteval_t pte) | |||
565 | if (io_page && | 565 | if (io_page && |
566 | (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { | 566 | (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { |
567 | other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT; | 567 | other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT; |
568 | WARN(addr != other_addr, | 568 | WARN_ONCE(addr != other_addr, |
569 | "0x%lx is using VM_IO, but it is 0x%lx!\n", | 569 | "0x%lx is using VM_IO, but it is 0x%lx!\n", |
570 | (unsigned long)addr, (unsigned long)other_addr); | 570 | (unsigned long)addr, (unsigned long)other_addr); |
571 | } else { | 571 | } else { |
572 | pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP; | 572 | pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP; |
573 | other_addr = (_pte.pte & PTE_PFN_MASK); | 573 | other_addr = (_pte.pte & PTE_PFN_MASK); |
574 | WARN((addr == other_addr) && (!io_page) && (!iomap_set), | 574 | WARN_ONCE((addr == other_addr) && (!io_page) && (!iomap_set), |
575 | "0x%lx is missing VM_IO (and wasn't fixed)!\n", | 575 | "0x%lx is missing VM_IO (and wasn't fixed)!\n", |
576 | (unsigned long)addr); | 576 | (unsigned long)addr); |
577 | } | 577 | } |
@@ -1473,16 +1473,20 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
1473 | #endif | 1473 | #endif |
1474 | } | 1474 | } |
1475 | 1475 | ||
1476 | #ifdef CONFIG_X86_32 | ||
1476 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | 1477 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) |
1477 | { | 1478 | { |
1478 | unsigned long pfn = pte_pfn(pte); | ||
1479 | |||
1480 | #ifdef CONFIG_X86_32 | ||
1481 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ | 1479 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ |
1482 | if (pte_val_ma(*ptep) & _PAGE_PRESENT) | 1480 | if (pte_val_ma(*ptep) & _PAGE_PRESENT) |
1483 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & | 1481 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & |
1484 | pte_val_ma(pte)); | 1482 | pte_val_ma(pte)); |
1485 | #endif | 1483 | |
1484 | return pte; | ||
1485 | } | ||
1486 | #else /* CONFIG_X86_64 */ | ||
1487 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | ||
1488 | { | ||
1489 | unsigned long pfn = pte_pfn(pte); | ||
1486 | 1490 | ||
1487 | /* | 1491 | /* |
1488 | * If the new pfn is within the range of the newly allocated | 1492 | * If the new pfn is within the range of the newly allocated |
@@ -1497,6 +1501,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | |||
1497 | 1501 | ||
1498 | return pte; | 1502 | return pte; |
1499 | } | 1503 | } |
1504 | #endif /* CONFIG_X86_64 */ | ||
1500 | 1505 | ||
1501 | /* Init-time set_pte while constructing initial pagetables, which | 1506 | /* Init-time set_pte while constructing initial pagetables, which |
1502 | doesn't allow RO pagetable pages to be remapped RW */ | 1507 | doesn't allow RO pagetable pages to be remapped RW */ |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index fa0269a99377..90bac0aac3a5 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -227,7 +227,7 @@ char * __init xen_memory_setup(void) | |||
227 | 227 | ||
228 | memcpy(map_raw, map, sizeof(map)); | 228 | memcpy(map_raw, map, sizeof(map)); |
229 | e820.nr_map = 0; | 229 | e820.nr_map = 0; |
230 | xen_extra_mem_start = mem_end; | 230 | xen_extra_mem_start = max((1ULL << 32), mem_end); |
231 | for (i = 0; i < memmap.nr_entries; i++) { | 231 | for (i = 0; i < memmap.nr_entries; i++) { |
232 | unsigned long long end; | 232 | unsigned long long end; |
233 | 233 | ||