aboutsummaryrefslogtreecommitdiffstats
path: root/arch/alpha
diff options
context:
space:
mode:
authorIvan Kokshaysky <ink@jurassic.park.msu.ru>2010-10-26 17:22:17 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-26 19:52:12 -0400
commit98c532ecbe582586e204688c6cde7e27580cc43f (patch)
tree6659f979076730695af09d9e3653074a6d9643c3 /arch/alpha
parent947272dd3e959c69ff0fc54e62e44163b729b796 (diff)
alpha: use single HAE window on T2 core logic (gamma, sable)
T2 are the only alpha SMP systems that do HAE switching at runtime, which is fundamentally racy on SMP. This patch limits MMIO space on T2 to HAE0 only, like we did on MCPCIA (rawhide) long ago. This leaves us with only 112 Mb of PCI MMIO (128 Mb HAE aperture minus 16 Mb reserved for EISA), but since linux PCI allocations are reasonably tight, it should be enough for sane hardware configurations. Also, fix a typo in MCPCIA_FROB_MMIO macro which shouldn't call set_hae() if MCPCIA_ONE_HAE_WINDOW is defined. It's more for correctness, as set_hae() is a no-op anyway in that case. Signed-off-by: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Matt Turner <mattst88@gmail.com> Cc: Richard Henderson <rth@twiddle.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/alpha')
-rw-r--r--arch/alpha/include/asm/core_mcpcia.h2
-rw-r--r--arch/alpha/include/asm/core_t2.h54
-rw-r--r--arch/alpha/kernel/core_t2.c11
-rw-r--r--arch/alpha/kernel/machvec_impl.h3
4 files changed, 30 insertions, 40 deletions
diff --git a/arch/alpha/include/asm/core_mcpcia.h b/arch/alpha/include/asm/core_mcpcia.h
index 21ac53383b37..9f67a056b461 100644
--- a/arch/alpha/include/asm/core_mcpcia.h
+++ b/arch/alpha/include/asm/core_mcpcia.h
@@ -247,7 +247,7 @@ struct el_MCPCIA_uncorrected_frame_mcheck {
247#define vip volatile int __force * 247#define vip volatile int __force *
248#define vuip volatile unsigned int __force * 248#define vuip volatile unsigned int __force *
249 249
250#ifdef MCPCIA_ONE_HAE_WINDOW 250#ifndef MCPCIA_ONE_HAE_WINDOW
251#define MCPCIA_FROB_MMIO \ 251#define MCPCIA_FROB_MMIO \
252 if (__mcpcia_is_mmio(hose)) { \ 252 if (__mcpcia_is_mmio(hose)) { \
253 set_hae(hose & 0xffffffff); \ 253 set_hae(hose & 0xffffffff); \
diff --git a/arch/alpha/include/asm/core_t2.h b/arch/alpha/include/asm/core_t2.h
index 471c07292e0b..91b46801b290 100644
--- a/arch/alpha/include/asm/core_t2.h
+++ b/arch/alpha/include/asm/core_t2.h
@@ -1,6 +1,9 @@
1#ifndef __ALPHA_T2__H__ 1#ifndef __ALPHA_T2__H__
2#define __ALPHA_T2__H__ 2#define __ALPHA_T2__H__
3 3
4/* Fit everything into one 128MB HAE window. */
5#define T2_ONE_HAE_WINDOW 1
6
4#include <linux/types.h> 7#include <linux/types.h>
5#include <linux/spinlock.h> 8#include <linux/spinlock.h>
6#include <asm/compiler.h> 9#include <asm/compiler.h>
@@ -19,7 +22,7 @@
19 * 22 *
20 */ 23 */
21 24
22#define T2_MEM_R1_MASK 0x07ffffff /* Mem sparse region 1 mask is 26 bits */ 25#define T2_MEM_R1_MASK 0x07ffffff /* Mem sparse region 1 mask is 27 bits */
23 26
24/* GAMMA-SABLE is a SABLE with EV5-based CPUs */ 27/* GAMMA-SABLE is a SABLE with EV5-based CPUs */
25/* All LYNX machines, EV4 or EV5, use the GAMMA bias also */ 28/* All LYNX machines, EV4 or EV5, use the GAMMA bias also */
@@ -85,7 +88,9 @@
85#define T2_DIR (IDENT_ADDR + GAMMA_BIAS + 0x38e0004a0UL) 88#define T2_DIR (IDENT_ADDR + GAMMA_BIAS + 0x38e0004a0UL)
86#define T2_ICE (IDENT_ADDR + GAMMA_BIAS + 0x38e0004c0UL) 89#define T2_ICE (IDENT_ADDR + GAMMA_BIAS + 0x38e0004c0UL)
87 90
91#ifndef T2_ONE_HAE_WINDOW
88#define T2_HAE_ADDRESS T2_HAE_1 92#define T2_HAE_ADDRESS T2_HAE_1
93#endif
89 94
90/* T2 CSRs are in the non-cachable primary IO space from 3.8000.0000 to 95/* T2 CSRs are in the non-cachable primary IO space from 3.8000.0000 to
91 3.8fff.ffff 96 3.8fff.ffff
@@ -429,13 +434,15 @@ extern inline void t2_outl(u32 b, unsigned long addr)
429 * 434 *
430 */ 435 */
431 436
437#ifdef T2_ONE_HAE_WINDOW
438#define t2_set_hae
439#else
432#define t2_set_hae { \ 440#define t2_set_hae { \
433 msb = addr >> 27; \ 441 unsigned long msb = addr >> 27; \
434 addr &= T2_MEM_R1_MASK; \ 442 addr &= T2_MEM_R1_MASK; \
435 set_hae(msb); \ 443 set_hae(msb); \
436} 444}
437 445#endif
438extern raw_spinlock_t t2_hae_lock;
439 446
440/* 447/*
441 * NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since 448 * NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since
@@ -446,28 +453,22 @@ extern raw_spinlock_t t2_hae_lock;
446__EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr) 453__EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr)
447{ 454{
448 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 455 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
449 unsigned long result, msb; 456 unsigned long result;
450 unsigned long flags;
451 raw_spin_lock_irqsave(&t2_hae_lock, flags);
452 457
453 t2_set_hae; 458 t2_set_hae;
454 459
455 result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00); 460 result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00);
456 raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
457 return __kernel_extbl(result, addr & 3); 461 return __kernel_extbl(result, addr & 3);
458} 462}
459 463
460__EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr) 464__EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr)
461{ 465{
462 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 466 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
463 unsigned long result, msb; 467 unsigned long result;
464 unsigned long flags;
465 raw_spin_lock_irqsave(&t2_hae_lock, flags);
466 468
467 t2_set_hae; 469 t2_set_hae;
468 470
469 result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08); 471 result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08);
470 raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
471 return __kernel_extwl(result, addr & 3); 472 return __kernel_extwl(result, addr & 3);
472} 473}
473 474
@@ -478,59 +479,47 @@ __EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr)
478__EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr) 479__EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr)
479{ 480{
480 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 481 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
481 unsigned long result, msb; 482 unsigned long result;
482 unsigned long flags;
483 raw_spin_lock_irqsave(&t2_hae_lock, flags);
484 483
485 t2_set_hae; 484 t2_set_hae;
486 485
487 result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18); 486 result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18);
488 raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
489 return result & 0xffffffffUL; 487 return result & 0xffffffffUL;
490} 488}
491 489
492__EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr) 490__EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr)
493{ 491{
494 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 492 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
495 unsigned long r0, r1, work, msb; 493 unsigned long r0, r1, work;
496 unsigned long flags;
497 raw_spin_lock_irqsave(&t2_hae_lock, flags);
498 494
499 t2_set_hae; 495 t2_set_hae;
500 496
501 work = (addr << 5) + T2_SPARSE_MEM + 0x18; 497 work = (addr << 5) + T2_SPARSE_MEM + 0x18;
502 r0 = *(vuip)(work); 498 r0 = *(vuip)(work);
503 r1 = *(vuip)(work + (4 << 5)); 499 r1 = *(vuip)(work + (4 << 5));
504 raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
505 return r1 << 32 | r0; 500 return r1 << 32 | r0;
506} 501}
507 502
508__EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr) 503__EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr)
509{ 504{
510 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 505 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
511 unsigned long msb, w; 506 unsigned long w;
512 unsigned long flags;
513 raw_spin_lock_irqsave(&t2_hae_lock, flags);
514 507
515 t2_set_hae; 508 t2_set_hae;
516 509
517 w = __kernel_insbl(b, addr & 3); 510 w = __kernel_insbl(b, addr & 3);
518 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w; 511 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w;
519 raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
520} 512}
521 513
522__EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr) 514__EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
523{ 515{
524 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 516 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
525 unsigned long msb, w; 517 unsigned long w;
526 unsigned long flags;
527 raw_spin_lock_irqsave(&t2_hae_lock, flags);
528 518
529 t2_set_hae; 519 t2_set_hae;
530 520
531 w = __kernel_inswl(b, addr & 3); 521 w = __kernel_inswl(b, addr & 3);
532 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w; 522 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w;
533 raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
534} 523}
535 524
536/* 525/*
@@ -540,29 +529,22 @@ __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
540__EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr) 529__EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr)
541{ 530{
542 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 531 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
543 unsigned long msb;
544 unsigned long flags;
545 raw_spin_lock_irqsave(&t2_hae_lock, flags);
546 532
547 t2_set_hae; 533 t2_set_hae;
548 534
549 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b; 535 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b;
550 raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
551} 536}
552 537
553__EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr) 538__EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr)
554{ 539{
555 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 540 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
556 unsigned long msb, work; 541 unsigned long work;
557 unsigned long flags;
558 raw_spin_lock_irqsave(&t2_hae_lock, flags);
559 542
560 t2_set_hae; 543 t2_set_hae;
561 544
562 work = (addr << 5) + T2_SPARSE_MEM + 0x18; 545 work = (addr << 5) + T2_SPARSE_MEM + 0x18;
563 *(vuip)work = b; 546 *(vuip)work = b;
564 *(vuip)(work + (4 << 5)) = b >> 32; 547 *(vuip)(work + (4 << 5)) = b >> 32;
565 raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
566} 548}
567 549
568__EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr) 550__EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr)
diff --git a/arch/alpha/kernel/core_t2.c b/arch/alpha/kernel/core_t2.c
index e6d90568b65d..2f770e994289 100644
--- a/arch/alpha/kernel/core_t2.c
+++ b/arch/alpha/kernel/core_t2.c
@@ -74,8 +74,6 @@
74# define DBG(args) 74# define DBG(args)
75#endif 75#endif
76 76
77DEFINE_RAW_SPINLOCK(t2_hae_lock);
78
79static volatile unsigned int t2_mcheck_any_expected; 77static volatile unsigned int t2_mcheck_any_expected;
80static volatile unsigned int t2_mcheck_last_taken; 78static volatile unsigned int t2_mcheck_last_taken;
81 79
@@ -406,6 +404,7 @@ void __init
406t2_init_arch(void) 404t2_init_arch(void)
407{ 405{
408 struct pci_controller *hose; 406 struct pci_controller *hose;
407 struct resource *hae_mem;
409 unsigned long temp; 408 unsigned long temp;
410 unsigned int i; 409 unsigned int i;
411 410
@@ -433,7 +432,13 @@ t2_init_arch(void)
433 */ 432 */
434 pci_isa_hose = hose = alloc_pci_controller(); 433 pci_isa_hose = hose = alloc_pci_controller();
435 hose->io_space = &ioport_resource; 434 hose->io_space = &ioport_resource;
436 hose->mem_space = &iomem_resource; 435 hae_mem = alloc_resource();
436 hae_mem->start = 0;
437 hae_mem->end = T2_MEM_R1_MASK;
438 hae_mem->name = pci_hae0_name;
439 if (request_resource(&iomem_resource, hae_mem) < 0)
440 printk(KERN_ERR "Failed to request HAE_MEM\n");
441 hose->mem_space = hae_mem;
437 hose->index = 0; 442 hose->index = 0;
438 443
439 hose->sparse_mem_base = T2_SPARSE_MEM - IDENT_ADDR; 444 hose->sparse_mem_base = T2_SPARSE_MEM - IDENT_ADDR;
diff --git a/arch/alpha/kernel/machvec_impl.h b/arch/alpha/kernel/machvec_impl.h
index 512685f78097..7fa62488bd16 100644
--- a/arch/alpha/kernel/machvec_impl.h
+++ b/arch/alpha/kernel/machvec_impl.h
@@ -25,6 +25,9 @@
25#ifdef MCPCIA_ONE_HAE_WINDOW 25#ifdef MCPCIA_ONE_HAE_WINDOW
26#define MCPCIA_HAE_ADDRESS (&alpha_mv.hae_cache) 26#define MCPCIA_HAE_ADDRESS (&alpha_mv.hae_cache)
27#endif 27#endif
28#ifdef T2_ONE_HAE_WINDOW
29#define T2_HAE_ADDRESS (&alpha_mv.hae_cache)
30#endif
28 31
29/* Only a few systems don't define IACK_SC, handling all interrupts through 32/* Only a few systems don't define IACK_SC, handling all interrupts through
30 the SRM console. But splitting out that one case from IO() below 33 the SRM console. But splitting out that one case from IO() below