aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2007-07-20 06:42:40 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2007-07-20 16:29:44 -0400
commit0762097625711e829a008b64f42dc0ec74abb284 (patch)
tree3300d8960eb6c2ca326bc5f2b4ad60b8b0b1f9a8 /arch/arm
parent13a63ab289627e977a045864b36792cf0b61364a (diff)
[ARM] 4500/1: Add locking around the background L2x0 cache operations
The background operations of the L2x0 cache controllers are aborted if another operation is issued on the same or different core. This patch protects the maintenance operation issuing/polling with a spinlock. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/mm/cache-l2x0.c6
1 files changed, 6 insertions, 0 deletions
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 08a36f1b35d2..b4e9b734e0bd 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -17,6 +17,7 @@
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/spinlock.h>
20 21
21#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
22#include <asm/io.h> 23#include <asm/io.h>
@@ -25,14 +26,19 @@
25#define CACHE_LINE_SIZE 32 26#define CACHE_LINE_SIZE 32
26 27
27static void __iomem *l2x0_base; 28static void __iomem *l2x0_base;
29static DEFINE_SPINLOCK(l2x0_lock);
28 30
29static inline void sync_writel(unsigned long val, unsigned long reg, 31static inline void sync_writel(unsigned long val, unsigned long reg,
30 unsigned long complete_mask) 32 unsigned long complete_mask)
31{ 33{
34 unsigned long flags;
35
36 spin_lock_irqsave(&l2x0_lock, flags);
32 writel(val, l2x0_base + reg); 37 writel(val, l2x0_base + reg);
33 /* wait for the operation to complete */ 38 /* wait for the operation to complete */
34 while (readl(l2x0_base + reg) & complete_mask) 39 while (readl(l2x0_base + reg) & complete_mask)
35 ; 40 ;
41 spin_unlock_irqrestore(&l2x0_lock, flags);
36} 42}
37 43
38static inline void cache_sync(void) 44static inline void cache_sync(void)