diff options
author | Chris Metcalf <cmetcalf@tilera.com> | 2011-02-28 16:37:34 -0500 |
---|---|---|
committer | Chris Metcalf <cmetcalf@tilera.com> | 2011-03-10 13:17:53 -0500 |
commit | 76c567fbba50c3da2f4d40e2e551bab26cfd4381 (patch) | |
tree | 6e3c92a266d0ec255e1930adf5ba5268cd71dee9 /arch/tile/kernel/intvec_32.S | |
parent | 09c17eab075ceeafb53935d858c575b6776394d1 (diff) |
arch/tile: support 4KB page size as well as 64KB
The Tilera architecture traditionally supports 64KB page sizes
to improve TLB utilization and improve performance when the
hardware is being used primarily to run a single application.
For more generic server scenarios, it can be beneficial to run
with 4KB page sizes, so this commit allows that to be specified
(by modifying the arch/tile/include/hv/pagesize.h header).
As part of this change, we also re-worked the PTE management
slightly so that PTE writes all go through a __set_pte() function
where we can do some additional validation. The set_pte_order()
function was eliminated since the "order" argument wasn't being used.
One bug uncovered was in the PCI DMA code, which wasn't properly
flushing the specified range. This was benign with 64KB pages,
but with 4KB pages we were getting some larger flushes wrong.
The per-cpu memory reservation code also needed updating to
conform with the newer percpu stuff; before it always chose 64KB,
and that was always correct, but with 4KB granularity we now have
to pay closer attention and reserve the amount of memory that will
be requested when the percpu code starts allocating.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/kernel/intvec_32.S')
-rw-r--r-- | arch/tile/kernel/intvec_32.S | 16 |
1 files changed, 13 insertions, 3 deletions
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S index eabf1ef02cb2..fffcfa6b3a62 100644 --- a/arch/tile/kernel/intvec_32.S +++ b/arch/tile/kernel/intvec_32.S | |||
@@ -1556,7 +1556,10 @@ STD_ENTRY(_sys_clone) | |||
1556 | .align 64 | 1556 | .align 64 |
1557 | /* Align much later jump on the start of a cache line. */ | 1557 | /* Align much later jump on the start of a cache line. */ |
1558 | #if !ATOMIC_LOCKS_FOUND_VIA_TABLE() | 1558 | #if !ATOMIC_LOCKS_FOUND_VIA_TABLE() |
1559 | nop; nop | 1559 | nop |
1560 | #if PAGE_SIZE >= 0x10000 | ||
1561 | nop | ||
1562 | #endif | ||
1560 | #endif | 1563 | #endif |
1561 | ENTRY(sys_cmpxchg) | 1564 | ENTRY(sys_cmpxchg) |
1562 | 1565 | ||
@@ -1587,6 +1590,10 @@ ENTRY(sys_cmpxchg) | |||
1587 | * NOTE: this must match __atomic_hashed_lock() in lib/atomic_32.c. | 1590 | * NOTE: this must match __atomic_hashed_lock() in lib/atomic_32.c. |
1588 | */ | 1591 | */ |
1589 | 1592 | ||
1593 | #if (PAGE_OFFSET & 0xffff) != 0 | ||
1594 | # error Code here assumes PAGE_OFFSET can be loaded with just hi16() | ||
1595 | #endif | ||
1596 | |||
1590 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() | 1597 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() |
1591 | { | 1598 | { |
1592 | /* Check for unaligned input. */ | 1599 | /* Check for unaligned input. */ |
@@ -1679,11 +1686,14 @@ ENTRY(sys_cmpxchg) | |||
1679 | lw r26, r0 | 1686 | lw r26, r0 |
1680 | } | 1687 | } |
1681 | { | 1688 | { |
1682 | /* atomic_locks is page aligned so this suffices to get its addr. */ | 1689 | auli r21, zero, ha16(atomic_locks) |
1683 | auli r21, zero, hi16(atomic_locks) | ||
1684 | 1690 | ||
1685 | bbns r23, .Lcmpxchg_badaddr | 1691 | bbns r23, .Lcmpxchg_badaddr |
1686 | } | 1692 | } |
1693 | #if PAGE_SIZE < 0x10000 | ||
1694 | /* atomic_locks is page-aligned so for big pages we don't need this. */ | ||
1695 | addli r21, r21, lo16(atomic_locks) | ||
1696 | #endif | ||
1687 | { | 1697 | { |
1688 | /* | 1698 | /* |
1689 | * Insert the hash bits into the page-aligned pointer. | 1699 | * Insert the hash bits into the page-aligned pointer. |