aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2011-03-16 19:35:25 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-03-16 19:35:25 -0400
commit1f0090a1eaa1b750a2fc5c99c91b790d5322a1fd (patch)
treec685060f260410e6704c9dfd457ed8c347141f1d /arch/arm/mm
parent2472f3c8d8fc18b25b2cf1574c036e238187c0ff (diff)
parent10a8c3839810ac9af1aec836d61b92e7a879f5fa (diff)
Merge branch 'misc' into devel
Conflicts: arch/arm/Kconfig
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig5
-rw-r--r--arch/arm/mm/cache-l2x0.c32
-rw-r--r--arch/arm/mm/mmu.c10
-rw-r--r--arch/arm/mm/vmregion.c17
4 files changed, 31 insertions, 33 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index e4509bae8fc4..05b26a03c209 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -845,6 +845,11 @@ config CACHE_XSC3L2
845 help 845 help
846 This option enables the L2 cache on XScale3. 846 This option enables the L2 cache on XScale3.
847 847
848config ARM_L1_CACHE_SHIFT_6
849 bool
850 help
851 Setting ARM L1 cache line size to 64 Bytes.
852
848config ARM_L1_CACHE_SHIFT 853config ARM_L1_CACHE_SHIFT
849 int 854 int
850 default 6 if ARM_L1_CACHE_SHIFT_6 855 default 6 if ARM_L1_CACHE_SHIFT_6
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index f2ce38e085d2..ef59099a5463 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -73,18 +73,24 @@ static inline void l2x0_inv_line(unsigned long addr)
73 writel_relaxed(addr, base + L2X0_INV_LINE_PA); 73 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
74} 74}
75 75
76#ifdef CONFIG_PL310_ERRATA_588369 76#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
77static void debug_writel(unsigned long val)
78{
79 extern void omap_smc1(u32 fn, u32 arg);
80 77
81 /* 78#define debug_writel(val) outer_cache.set_debug(val)
82 * Texas Instrument secure monitor api to modify the 79
83 * PL310 Debug Control Register. 80static void l2x0_set_debug(unsigned long val)
84 */ 81{
85 omap_smc1(0x100, val); 82 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
86} 83}
84#else
85/* Optimised out for non-errata case */
86static inline void debug_writel(unsigned long val)
87{
88}
89
90#define l2x0_set_debug NULL
91#endif
87 92
93#ifdef CONFIG_PL310_ERRATA_588369
88static inline void l2x0_flush_line(unsigned long addr) 94static inline void l2x0_flush_line(unsigned long addr)
89{ 95{
90 void __iomem *base = l2x0_base; 96 void __iomem *base = l2x0_base;
@@ -97,11 +103,6 @@ static inline void l2x0_flush_line(unsigned long addr)
97} 103}
98#else 104#else
99 105
100/* Optimised out for non-errata case */
101static inline void debug_writel(unsigned long val)
102{
103}
104
105static inline void l2x0_flush_line(unsigned long addr) 106static inline void l2x0_flush_line(unsigned long addr)
106{ 107{
107 void __iomem *base = l2x0_base; 108 void __iomem *base = l2x0_base;
@@ -125,9 +126,11 @@ static void l2x0_flush_all(void)
125 126
126 /* clean all ways */ 127 /* clean all ways */
127 spin_lock_irqsave(&l2x0_lock, flags); 128 spin_lock_irqsave(&l2x0_lock, flags);
129 debug_writel(0x03);
128 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); 130 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
129 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); 131 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
130 cache_sync(); 132 cache_sync();
133 debug_writel(0x00);
131 spin_unlock_irqrestore(&l2x0_lock, flags); 134 spin_unlock_irqrestore(&l2x0_lock, flags);
132} 135}
133 136
@@ -335,6 +338,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
335 outer_cache.flush_all = l2x0_flush_all; 338 outer_cache.flush_all = l2x0_flush_all;
336 outer_cache.inv_all = l2x0_inv_all; 339 outer_cache.inv_all = l2x0_inv_all;
337 outer_cache.disable = l2x0_disable; 340 outer_cache.disable = l2x0_disable;
341 outer_cache.set_debug = l2x0_set_debug;
338 342
339 printk(KERN_INFO "%s cache controller enabled\n", type); 343 printk(KERN_INFO "%s cache controller enabled\n", type);
340 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", 344 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 3c67e92f7d59..ff7b43b5885a 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -827,16 +827,6 @@ static void __init sanity_check_meminfo(void)
827 * rather difficult. 827 * rather difficult.
828 */ 828 */
829 reason = "with VIPT aliasing cache"; 829 reason = "with VIPT aliasing cache";
830 } else if (is_smp() && tlb_ops_need_broadcast()) {
831 /*
832 * kmap_high needs to occasionally flush TLB entries,
833 * however, if the TLB entries need to be broadcast
834 * we may deadlock:
835 * kmap_high(irqs off)->flush_all_zero_pkmaps->
836 * flush_tlb_kernel_range->smp_call_function_many
837 * (must not be called with irqs off)
838 */
839 reason = "without hardware TLB ops broadcasting";
840 } 830 }
841 if (reason) { 831 if (reason) {
842 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", 832 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c
index 935993e1b1ef..036fdbfdd62f 100644
--- a/arch/arm/mm/vmregion.c
+++ b/arch/arm/mm/vmregion.c
@@ -38,7 +38,7 @@ struct arm_vmregion *
38arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, 38arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
39 size_t size, gfp_t gfp) 39 size_t size, gfp_t gfp)
40{ 40{
41 unsigned long addr = head->vm_start, end = head->vm_end - size; 41 unsigned long start = head->vm_start, addr = head->vm_end;
42 unsigned long flags; 42 unsigned long flags;
43 struct arm_vmregion *c, *new; 43 struct arm_vmregion *c, *new;
44 44
@@ -54,21 +54,20 @@ arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
54 54
55 spin_lock_irqsave(&head->vm_lock, flags); 55 spin_lock_irqsave(&head->vm_lock, flags);
56 56
57 list_for_each_entry(c, &head->vm_list, vm_list) { 57 addr = rounddown(addr - size, align);
58 if ((addr + size) < addr) 58 list_for_each_entry_reverse(c, &head->vm_list, vm_list) {
59 goto nospc; 59 if (addr >= c->vm_end)
60 if ((addr + size) <= c->vm_start)
61 goto found; 60 goto found;
62 addr = ALIGN(c->vm_end, align); 61 addr = rounddown(c->vm_start - size, align);
63 if (addr > end) 62 if (addr < start)
64 goto nospc; 63 goto nospc;
65 } 64 }
66 65
67 found: 66 found:
68 /* 67 /*
69 * Insert this entry _before_ the one we found. 68 * Insert this entry after the one we found.
70 */ 69 */
71 list_add_tail(&new->vm_list, &c->vm_list); 70 list_add(&new->vm_list, &c->vm_list);
72 new->vm_start = addr; 71 new->vm_start = addr;
73 new->vm_end = addr + size; 72 new->vm_end = addr + size;
74 new->vm_active = 1; 73 new->vm_active = 1;