From 68d9102f76de7a923fb81c8b6de4764f8f50ed17 Mon Sep 17 00:00:00 2001
From: Nicolas Pitre <nico@cam.org>
Date: Thu, 1 Sep 2005 12:37:13 +0100
Subject: [ARM] 2865/2: fix fadvise64_64 syscall argument passing

Patch from Nicolas Pitre

The prototype for sys_fadvise64_64() is:
    long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
The argument list is therefore as follows on legacy ABI:
	fd: type int (r0)
	offset: type long long (r1-r2)
	len: type long long (r3-sp[0])
	advice: type int (sp[4])
With EABI this becomes:
	fd: type int (r0)
	offset: type long long (r2-r3)
	len: type long long (sp[0]-sp[4])
	advice: type int (sp[8])
Not only do we have ABI differences here, but the EABI version requires
one additional word on the syscall stack.
To avoid the ABI mismatch and the extra stack space required with EABI
this syscall is now defined with a different argument ordering
on ARM as follows:
    long sys_arm_fadvise64_64(int fd, int advice, loff_t offset, loff_t len)
This gives us the following ABI independent argument distribution:
	fd: type int (r0)
	advice: type int (r1)
	offset: type long long (r2-r3)
	len: type long long (sp[0]-sp[4])
Now, since the syscall entry code takes care of 5 registers only by
default including the store of r4 to the stack, we need a wrapper to
store r5 to the stack as well.  Because that wrapper was missing and was
always required this means that sys_fadvise64_64 never worked on ARM and
therefore we can safely reuse its syscall number for our new
sys_arm_fadvise64_64 interface.

Signed-off-by: Nicolas Pitre <nico@cam.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 arch/arm/kernel/calls.S        |  2 +-
 arch/arm/kernel/entry-common.S |  4 ++++
 arch/arm/kernel/sys_arm.c      | 10 ++++++++++
 3 files changed, 15 insertions(+), 1 deletion(-)

(limited to 'arch')

diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 2b6b4c786e..db07ce42b3 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -284,7 +284,7 @@ __syscall_start:
 		.long	sys_fstatfs64
 		.long	sys_tgkill
 		.long	sys_utimes
-/* 270 */	.long	sys_fadvise64_64
+/* 270 */	.long	sys_arm_fadvise64_64_wrapper
 		.long	sys_pciconfig_iobase
 		.long	sys_pciconfig_read
 		.long	sys_pciconfig_write
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 3f8d0e3aef..6281d488ac 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -265,6 +265,10 @@ sys_futex_wrapper:
 		str	r5, [sp, #4]		@ push sixth arg
 		b	sys_futex
 
+sys_arm_fadvise64_64_wrapper:
+		str	r5, [sp, #4]		@ push r5 to stack
+		b	sys_arm_fadvise64_64
+
 /*
  * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
  * offset, we return EINVAL.
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
index f897ce2ccf..42629ff84f 100644
--- a/arch/arm/kernel/sys_arm.c
+++ b/arch/arm/kernel/sys_arm.c
@@ -311,3 +311,13 @@ long execve(const char *filename, char **argv, char **envp)
 	return ret;
 }
 EXPORT_SYMBOL(execve);
+
+/*
+ * Since loff_t is a 64 bit type we avoid a lot of ABI hastle
+ * with a different argument ordering.
+ */
+asmlinkage long sys_arm_fadvise64_64(int fd, int advice,
+				     loff_t offset, loff_t len)
+{
+	return sys_fadvise64_64(fd, offset, len, advice);
+}
-- 
cgit v1.2.2


From 5c53ff088cb76dca52bdf2ef36ea63147763f9ea Mon Sep 17 00:00:00 2001
From: Nicolas Pitre <nico@cam.org>
Date: Thu, 1 Sep 2005 12:48:40 +0100
Subject: [ARM] 2862/1: VST aka CONFIG_NO_IDLE_HZ support for PXA2xx

Patch from Nicolas Pitre

Signed-off-by: Nicolas Pitre <nico@cam.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 arch/arm/Kconfig         |  4 ++--
 arch/arm/mach-pxa/time.c | 51 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 53 insertions(+), 2 deletions(-)

(limited to 'arch')

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 4bf0e8737e..dc0fafc7f9 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -365,8 +365,8 @@ config NO_IDLE_HZ
 
 	  Please note that dynamic tick may affect the accuracy of
 	  timekeeping on some platforms depending on the implementation.
-	  Currently at least OMAP platform is known to have accurate
-	  timekeeping with dynamic tick.
+	  Currently at least OMAP and PXA2xx platforms are known to have
+	  accurate timekeeping with dynamic tick.
 
 config ARCH_DISCONTIGMEM_ENABLE
 	bool
diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c
index 6e5202154f..72b15e9a37 100644
--- a/arch/arm/mach-pxa/time.c
+++ b/arch/arm/mach-pxa/time.c
@@ -70,6 +70,11 @@ static unsigned long pxa_gettimeoffset (void)
 	return usec;
 }
 
+#ifdef CONFIG_NO_IDLE_HZ
+static unsigned long initial_match;
+static int match_posponed;
+#endif
+
 static irqreturn_t
 pxa_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
 {
@@ -77,6 +82,13 @@ pxa_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
 
 	write_seqlock(&xtime_lock);
 
+#ifdef CONFIG_NO_IDLE_HZ
+	if (match_posponed) {
+		match_posponed = 0;
+		OSMR0 = initial_match;
+	}
+#endif
+
 	/* Loop until we get ahead of the free running timer.
 	 * This ensures an exact clock tick count and time accuracy.
 	 * IRQs are disabled inside the loop to ensure coherence between
@@ -126,6 +138,42 @@ static void __init pxa_timer_init(void)
 	OSCR = 0;		/* initialize free-running timer, force first match */
 }
 
+#ifdef CONFIG_NO_IDLE_HZ
+static int pxa_dyn_tick_enable_disable(void)
+{
+	/* nothing to do */
+	return 0;
+}
+
+static void pxa_dyn_tick_reprogram(unsigned long ticks)
+{
+	if (ticks > 1) {
+		initial_match = OSMR0;
+		OSMR0 = initial_match + ticks * LATCH;
+		match_posponed = 1;
+	}
+}
+
+static irqreturn_t
+pxa_dyn_tick_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+	if (match_posponed) {
+		match_posponed = 0;
+		OSMR0 = initial_match;
+		if ( (signed long)(initial_match - OSCR) <= 8 )
+			return pxa_timer_interrupt(irq, dev_id, regs);
+	}
+	return IRQ_NONE;
+}
+
+static struct dyn_tick_timer pxa_dyn_tick = {
+	.enable		= pxa_dyn_tick_enable_disable,
+	.disable	= pxa_dyn_tick_enable_disable,
+	.reprogram	= pxa_dyn_tick_reprogram,
+	.handler	= pxa_dyn_tick_handler,
+};
+#endif
+
 #ifdef CONFIG_PM
 static unsigned long osmr[4], oier;
 
@@ -161,4 +209,7 @@ struct sys_timer pxa_timer = {
 	.suspend	= pxa_timer_suspend,
 	.resume		= pxa_timer_resume,
 	.offset		= pxa_gettimeoffset,
+#ifdef CONFIG_NO_IDLE_HZ
+	.dyn_tick	= &pxa_dyn_tick,
+#endif
 };
-- 
cgit v1.2.2


From 20e912680842504ab4633deaa644c2b855ad3d44 Mon Sep 17 00:00:00 2001
From: Nicolas Pitre <nico@cam.org>
Date: Thu, 1 Sep 2005 12:48:47 +0100
Subject: [ARM] 2863/1: clarify comment in PXA2xx and SA1x00 timer code

Patch from Nicolas Pitre

Signed-off-by: Nicolas Pitre <nico@cam.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 arch/arm/mach-pxa/time.c    |  7 ++++---
 arch/arm/mach-sa1100/time.c | 17 ++++++++---------
 2 files changed, 12 insertions(+), 12 deletions(-)

(limited to 'arch')

diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c
index 72b15e9a37..7dad3f1465 100644
--- a/arch/arm/mach-pxa/time.c
+++ b/arch/arm/mach-pxa/time.c
@@ -91,9 +91,10 @@ pxa_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
 
 	/* Loop until we get ahead of the free running timer.
 	 * This ensures an exact clock tick count and time accuracy.
-	 * IRQs are disabled inside the loop to ensure coherence between
-	 * lost_ticks (updated in do_timer()) and the match reg value, so we
-	 * can use do_gettimeofday() from interrupt handlers.
+	 * Since IRQs are disabled at this point, coherence between
+	 * lost_ticks(updated in do_timer()) and the match reg value is
+	 * ensured, hence we can use do_gettimeofday() from interrupt
+	 * handlers.
 	 *
 	 * HACK ALERT: it seems that the PXA timer regs aren't updated right
 	 * away in all cases when a write occurs.  We therefore compare with
diff --git a/arch/arm/mach-sa1100/time.c b/arch/arm/mach-sa1100/time.c
index 0eeb3616ff..a084b38698 100644
--- a/arch/arm/mach-sa1100/time.c
+++ b/arch/arm/mach-sa1100/time.c
@@ -70,15 +70,6 @@ static unsigned long sa1100_gettimeoffset (void)
 	return usec;
 }
 
-/*
- * We will be entered with IRQs enabled.
- *
- * Loop until we get ahead of the free running timer.
- * This ensures an exact clock tick count and time accuracy.
- * IRQs are disabled inside the loop to ensure coherence between
- * lost_ticks (updated in do_timer()) and the match reg value, so we
- * can use do_gettimeofday() from interrupt handlers.
- */
 static irqreturn_t
 sa1100_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
 {
@@ -86,6 +77,14 @@ sa1100_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
 
 	write_seqlock(&xtime_lock);
 
+	/*
+	 * Loop until we get ahead of the free running timer.
+	 * This ensures an exact clock tick count and time accuracy.
+	 * Since IRQs are disabled at this point, coherence between
+	 * lost_ticks(updated in do_timer()) and the match reg value is
+	 * ensured, hence we can use do_gettimeofday() from interrupt
+	 * handlers.
+	 */
 	do {
 		timer_tick(regs);
 		OSSR = OSSR_M0;  /* Clear match on timer 0 */
-- 
cgit v1.2.2


From 569d2c34dcf259b07977835492aa8813d1168230 Mon Sep 17 00:00:00 2001
From: Nicolas Pitre <nico@cam.org>
Date: Thu, 1 Sep 2005 12:48:48 +0100
Subject: [ARM] 2864/1: VST aka CONFIG_NO_IDLE_HZ support for SA11x0

Patch from Nicolas Pitre

Signed-off-by: Nicolas Pitre <nico@cam.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 arch/arm/Kconfig            |  4 ++--
 arch/arm/mach-sa1100/time.c | 51 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 53 insertions(+), 2 deletions(-)

(limited to 'arch')

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index dc0fafc7f9..68dfdba71d 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -365,8 +365,8 @@ config NO_IDLE_HZ
 
 	  Please note that dynamic tick may affect the accuracy of
 	  timekeeping on some platforms depending on the implementation.
-	  Currently at least OMAP and PXA2xx platforms are known to have
-	  accurate timekeeping with dynamic tick.
+	  Currently at least OMAP, PXA2xx and SA11x0 platforms are known
+	  to have accurate timekeeping with dynamic tick.
 
 config ARCH_DISCONTIGMEM_ENABLE
 	bool
diff --git a/arch/arm/mach-sa1100/time.c b/arch/arm/mach-sa1100/time.c
index a084b38698..47e0420623 100644
--- a/arch/arm/mach-sa1100/time.c
+++ b/arch/arm/mach-sa1100/time.c
@@ -70,6 +70,11 @@ static unsigned long sa1100_gettimeoffset (void)
 	return usec;
 }
 
+#ifdef CONFIG_NO_IDLE_HZ
+static unsigned long initial_match;
+static int match_posponed;
+#endif
+
 static irqreturn_t
 sa1100_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
 {
@@ -77,6 +82,13 @@ sa1100_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
 
 	write_seqlock(&xtime_lock);
 
+#ifdef CONFIG_NO_IDLE_HZ
+	if (match_posponed) {
+		match_posponed = 0;
+		OSMR0 = initial_match;
+	}
+#endif
+
 	/*
 	 * Loop until we get ahead of the free running timer.
 	 * This ensures an exact clock tick count and time accuracy.
@@ -119,6 +131,42 @@ static void __init sa1100_timer_init(void)
 	OSCR = 0;		/* initialize free-running timer, force first match */
 }
 
+#ifdef CONFIG_NO_IDLE_HZ
+static int sa1100_dyn_tick_enable_disable(void)
+{
+	/* nothing to do */
+	return 0;
+}
+
+static void sa1100_dyn_tick_reprogram(unsigned long ticks)
+{
+	if (ticks > 1) {
+		initial_match = OSMR0;
+		OSMR0 = initial_match + ticks * LATCH;
+		match_posponed = 1;
+	}
+}
+
+static irqreturn_t
+sa1100_dyn_tick_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+	if (match_posponed) {
+		match_posponed = 0;
+		OSMR0 = initial_match;
+		if ((signed long)(initial_match - OSCR) <= 0)
+			return sa1100_timer_interrupt(irq, dev_id, regs);
+	}
+	return IRQ_NONE;
+}
+
+static struct dyn_tick_timer sa1100_dyn_tick = {
+	.enable		= sa1100_dyn_tick_enable_disable,
+	.disable	= sa1100_dyn_tick_enable_disable,
+	.reprogram	= sa1100_dyn_tick_reprogram,
+	.handler	= sa1100_dyn_tick_handler,
+};
+#endif
+
 #ifdef CONFIG_PM
 unsigned long osmr[4], oier;
 
@@ -155,4 +203,7 @@ struct sys_timer sa1100_timer = {
 	.suspend	= sa1100_timer_suspend,
 	.resume		= sa1100_timer_resume,
 	.offset		= sa1100_gettimeoffset,
+#ifdef CONFIG_NO_IDLE_HZ
+	.dyn_tick	= &sa1100_dyn_tick,
+#endif
 };
-- 
cgit v1.2.2


From 08f4ffb3eb4ff23daf9c61bcd523940d43c2270c Mon Sep 17 00:00:00 2001
From: Russell King <rmk@dyn-67.arm.linux.org.uk>
Date: Thu, 1 Sep 2005 14:45:18 +0100
Subject: [ARM] Convert open-coded __pmd_populate to use inline function

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 arch/arm/mm/mm-armv.c | 6 +-----
 1 file changed, 1 insertion(+), 5 deletions(-)

(limited to 'arch')

diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index 4dae00bf7a..8cb024aa88 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -295,14 +295,10 @@ alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pg
 	pte_t *ptep;
 
 	if (pmd_none(*pmdp)) {
-		unsigned long pmdval;
 		ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
 					       sizeof(pte_t));
 
-		pmdval = __pa(ptep) | prot_l1;
-		pmdp[0] = __pmd(pmdval);
-		pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
-		flush_pmd_entry(pmdp);
+		__pmd_populate(pmdp, __pa(ptep) | prot_l1);
 	}
 	ptep = pte_offset_kernel(pmdp, virt);
 
-- 
cgit v1.2.2


From 103461a80c2f2dd95fe9a39a5decd984622c2a9e Mon Sep 17 00:00:00 2001
From: Russell King <rmk@dyn-67.arm.linux.org.uk>
Date: Thu, 1 Sep 2005 14:51:59 +0100
Subject: [ARM] Simplify setup_mm_for_reboot()

No point checking what CPU architecture level we have each time
within the loop, so precompute the base PMD flags outside the
loop.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 arch/arm/mm/mm-armv.c | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

(limited to 'arch')

diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index 8cb024aa88..3a81944d74 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -577,23 +577,23 @@ static void __init create_mapping(struct map_desc *md)
  */
 void setup_mm_for_reboot(char mode)
 {
-	unsigned long pmdval;
+	unsigned long base_pmdval;
 	pgd_t *pgd;
-	pmd_t *pmd;
 	int i;
-	int cpu_arch = cpu_architecture();
 
 	if (current->mm && current->mm->pgd)
 		pgd = current->mm->pgd;
 	else
 		pgd = init_mm.pgd;
 
-	for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++) {
-		pmdval = (i << PGDIR_SHIFT) |
-			 PMD_SECT_AP_WRITE | PMD_SECT_AP_READ |
-			 PMD_TYPE_SECT;
-		if (cpu_arch <= CPU_ARCH_ARMv5TEJ)
-			pmdval |= PMD_BIT4;
+	base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
+	if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ)
+		base_pmdval |= PMD_BIT4;
+
+	for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
+		unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;
+		pmd_t *pmd;
+
 		pmd = pmd_off(pgd, i << PGDIR_SHIFT);
 		pmd[0] = __pmd(pmdval);
 		pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
-- 
cgit v1.2.2


From 86a8a83963a3f6beeca4900d26da93c7d2a9d92d Mon Sep 17 00:00:00 2001
From: Russell King <rmk@dyn-67.arm.linux.org.uk>
Date: Thu, 1 Sep 2005 22:41:55 +0100
Subject: [ARM] Fix ARMv6 page table bits

We weren't explicitly setting the page table bits we desired
in user_prot in the protection table, which resulted in the
user mappings for v6 CPUs being marked global.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 arch/arm/mm/mm-armv.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'arch')

diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index 3a81944d74..d125a3dc06 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -453,7 +453,7 @@ static void __init build_mem_type_table(void)
 
 	for (i = 0; i < 16; i++) {
 		unsigned long v = pgprot_val(protection_map[i]);
-		v &= (~(PTE_BUFFERABLE|PTE_CACHEABLE)) | user_pgprot;
+		v = (v & ~(PTE_BUFFERABLE|PTE_CACHEABLE)) | user_pgprot;
 		protection_map[i] = __pgprot(v);
 	}
 
-- 
cgit v1.2.2