aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/boot/compressed/Makefile3
-rw-r--r--arch/arm/common/sa1111.c5
-rw-r--r--arch/arm/include/asm/io.h50
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S2
-rw-r--r--arch/arm/mach-realview/core.c2
-rw-r--r--arch/arm/mach-ux500/include/mach/uncompress.h10
-rw-r--r--arch/arm/mach-vexpress/v2m.c2
-rw-r--r--arch/arm/mm/cache-l2x0.c26
-rw-r--r--arch/arm/mm/highmem.c13
-rw-r--r--arch/s390/kernel/entry.S12
-rw-r--r--arch/s390/kernel/entry64.S12
-rw-r--r--arch/s390/kernel/time.c18
12 files changed, 98 insertions, 57 deletions
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index c2225fea3535..7636c9b3f9a7 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -68,6 +68,9 @@ targets := vmlinux vmlinux.lds \
68 piggy.$(suffix_y) piggy.$(suffix_y).o \ 68 piggy.$(suffix_y) piggy.$(suffix_y).o \
69 font.o font.c head.o misc.o $(OBJS) 69 font.o font.c head.o misc.o $(OBJS)
70 70
71# Make sure files are removed during clean
72extra-y += piggy.gzip piggy.lzo piggy.lzma lib1funcs.S
73
71ifeq ($(CONFIG_FUNCTION_TRACER),y) 74ifeq ($(CONFIG_FUNCTION_TRACER),y)
72ORIG_CFLAGS := $(KBUILD_CFLAGS) 75ORIG_CFLAGS := $(KBUILD_CFLAGS)
73KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) 76KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index ac2fd440652e..517d50ddbeb3 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -1025,13 +1025,12 @@ static int sa1111_remove(struct platform_device *pdev)
1025 struct sa1111 *sachip = platform_get_drvdata(pdev); 1025 struct sa1111 *sachip = platform_get_drvdata(pdev);
1026 1026
1027 if (sachip) { 1027 if (sachip) {
1028 __sa1111_remove(sachip);
1029 platform_set_drvdata(pdev, NULL);
1030
1031#ifdef CONFIG_PM 1028#ifdef CONFIG_PM
1032 kfree(sachip->saved_state); 1029 kfree(sachip->saved_state);
1033 sachip->saved_state = NULL; 1030 sachip->saved_state = NULL;
1034#endif 1031#endif
1032 __sa1111_remove(sachip);
1033 platform_set_drvdata(pdev, NULL);
1035 } 1034 }
1036 1035
1037 return 0; 1036 return 0;
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index c980156f3263..1261b1f928d9 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -26,6 +26,7 @@
26#include <linux/types.h> 26#include <linux/types.h>
27#include <asm/byteorder.h> 27#include <asm/byteorder.h>
28#include <asm/memory.h> 28#include <asm/memory.h>
29#include <asm/system.h>
29 30
30/* 31/*
31 * ISA I/O bus memory addresses are 1:1 with the physical address. 32 * ISA I/O bus memory addresses are 1:1 with the physical address.
@@ -179,25 +180,38 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
179 * IO port primitives for more information. 180 * IO port primitives for more information.
180 */ 181 */
181#ifdef __mem_pci 182#ifdef __mem_pci
182#define readb(c) ({ __u8 __v = __raw_readb(__mem_pci(c)); __v; }) 183#define readb_relaxed(c) ({ u8 __v = __raw_readb(__mem_pci(c)); __v; })
183#define readw(c) ({ __u16 __v = le16_to_cpu((__force __le16) \ 184#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16) \
184 __raw_readw(__mem_pci(c))); __v; }) 185 __raw_readw(__mem_pci(c))); __v; })
185#define readl(c) ({ __u32 __v = le32_to_cpu((__force __le32) \ 186#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32) \
186 __raw_readl(__mem_pci(c))); __v; }) 187 __raw_readl(__mem_pci(c))); __v; })
187#define readb_relaxed(addr) readb(addr) 188
188#define readw_relaxed(addr) readw(addr) 189#define writeb_relaxed(v,c) ((void)__raw_writeb(v,__mem_pci(c)))
189#define readl_relaxed(addr) readl(addr) 190#define writew_relaxed(v,c) ((void)__raw_writew((__force u16) \
191 cpu_to_le16(v),__mem_pci(c)))
192#define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \
193 cpu_to_le32(v),__mem_pci(c)))
194
195#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
196#define __iormb() rmb()
197#define __iowmb() wmb()
198#else
199#define __iormb() do { } while (0)
200#define __iowmb() do { } while (0)
201#endif
202
203#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
204#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
205#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
206
207#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); })
208#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
209#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
190 210
191#define readsb(p,d,l) __raw_readsb(__mem_pci(p),d,l) 211#define readsb(p,d,l) __raw_readsb(__mem_pci(p),d,l)
192#define readsw(p,d,l) __raw_readsw(__mem_pci(p),d,l) 212#define readsw(p,d,l) __raw_readsw(__mem_pci(p),d,l)
193#define readsl(p,d,l) __raw_readsl(__mem_pci(p),d,l) 213#define readsl(p,d,l) __raw_readsl(__mem_pci(p),d,l)
194 214
195#define writeb(v,c) __raw_writeb(v,__mem_pci(c))
196#define writew(v,c) __raw_writew((__force __u16) \
197 cpu_to_le16(v),__mem_pci(c))
198#define writel(v,c) __raw_writel((__force __u32) \
199 cpu_to_le32(v),__mem_pci(c))
200
201#define writesb(p,d,l) __raw_writesb(__mem_pci(p),d,l) 215#define writesb(p,d,l) __raw_writesb(__mem_pci(p),d,l)
202#define writesw(p,d,l) __raw_writesw(__mem_pci(p),d,l) 216#define writesw(p,d,l) __raw_writesw(__mem_pci(p),d,l)
203#define writesl(p,d,l) __raw_writesl(__mem_pci(p),d,l) 217#define writesl(p,d,l) __raw_writesl(__mem_pci(p),d,l)
@@ -244,13 +258,13 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
244 * io{read,write}{8,16,32} macros 258 * io{read,write}{8,16,32} macros
245 */ 259 */
246#ifndef ioread8 260#ifndef ioread8
247#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __v; }) 261#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __iormb(); __v; })
248#define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __v; }) 262#define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __iormb(); __v; })
249#define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __v; }) 263#define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __iormb(); __v; })
250 264
251#define iowrite8(v,p) __raw_writeb(v, p) 265#define iowrite8(v,p) ({ __iowmb(); (void)__raw_writeb(v, p); })
252#define iowrite16(v,p) __raw_writew((__force __u16)cpu_to_le16(v), p) 266#define iowrite16(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_le16(v), p); })
253#define iowrite32(v,p) __raw_writel((__force __u32)cpu_to_le32(v), p) 267#define iowrite32(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_le32(v), p); })
254 268
255#define ioread8_rep(p,d,c) __raw_readsb(p,d,c) 269#define ioread8_rep(p,d,c) __raw_readsb(p,d,c)
256#define ioread16_rep(p,d,c) __raw_readsw(p,d,c) 270#define ioread16_rep(p,d,c) __raw_readsw(p,d,c)
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 59ff6fdc1e63..7d08b43d2c0e 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -71,7 +71,7 @@
71 .pushsection .fixup,"ax" 71 .pushsection .fixup,"ax"
72 .align 4 72 .align 4
739001: mov r4, #-EFAULT 739001: mov r4, #-EFAULT
74 ldr r5, [fp, #4] @ *err_ptr 74 ldr r5, [sp, #8*4] @ *err_ptr
75 str r4, [r5] 75 str r4, [r5]
76 ldmia sp, {r1, r2} @ retrieve dst, len 76 ldmia sp, {r1, r2} @ retrieve dst, len
77 add r2, r2, r1 77 add r2, r2, r1
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index a54fbda77e45..2fa38df28414 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -251,7 +251,7 @@ static unsigned int realview_mmc_status(struct device *dev)
251 else 251 else
252 mask = 2; 252 mask = 2;
253 253
254 return !(readl(REALVIEW_SYSMCI) & mask); 254 return readl(REALVIEW_SYSMCI) & mask;
255} 255}
256 256
257struct mmci_platform_data realview_mmc0_plat_data = { 257struct mmci_platform_data realview_mmc0_plat_data = {
diff --git a/arch/arm/mach-ux500/include/mach/uncompress.h b/arch/arm/mach-ux500/include/mach/uncompress.h
index 8552eb188b50..0271ca0a83df 100644
--- a/arch/arm/mach-ux500/include/mach/uncompress.h
+++ b/arch/arm/mach-ux500/include/mach/uncompress.h
@@ -30,22 +30,22 @@
30static void putc(const char c) 30static void putc(const char c)
31{ 31{
32 /* Do nothing if the UART is not enabled. */ 32 /* Do nothing if the UART is not enabled. */
33 if (!(readb(U8500_UART_CR) & 0x1)) 33 if (!(__raw_readb(U8500_UART_CR) & 0x1))
34 return; 34 return;
35 35
36 if (c == '\n') 36 if (c == '\n')
37 putc('\r'); 37 putc('\r');
38 38
39 while (readb(U8500_UART_FR) & (1 << 5)) 39 while (__raw_readb(U8500_UART_FR) & (1 << 5))
40 barrier(); 40 barrier();
41 writeb(c, U8500_UART_DR); 41 __raw_writeb(c, U8500_UART_DR);
42} 42}
43 43
44static void flush(void) 44static void flush(void)
45{ 45{
46 if (!(readb(U8500_UART_CR) & 0x1)) 46 if (!(__raw_readb(U8500_UART_CR) & 0x1))
47 return; 47 return;
48 while (readb(U8500_UART_FR) & (1 << 3)) 48 while (__raw_readb(U8500_UART_FR) & (1 << 3))
49 barrier(); 49 barrier();
50} 50}
51 51
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index d6db3453908b..817f0ad38a0b 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -241,7 +241,7 @@ static struct platform_device v2m_flash_device = {
241 241
242static unsigned int v2m_mmci_status(struct device *dev) 242static unsigned int v2m_mmci_status(struct device *dev)
243{ 243{
244 return !(readl(MMIO_P2V(V2M_SYS_MCI)) & (1 << 0)); 244 return readl(MMIO_P2V(V2M_SYS_MCI)) & (1 << 0);
245} 245}
246 246
247static struct mmci_platform_data v2m_mmci_data = { 247static struct mmci_platform_data v2m_mmci_data = {
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index df4955885b21..9982eb385c0f 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -32,14 +32,14 @@ static uint32_t l2x0_way_mask; /* Bitmask of active ways */
32static inline void cache_wait(void __iomem *reg, unsigned long mask) 32static inline void cache_wait(void __iomem *reg, unsigned long mask)
33{ 33{
34 /* wait for the operation to complete */ 34 /* wait for the operation to complete */
35 while (readl(reg) & mask) 35 while (readl_relaxed(reg) & mask)
36 ; 36 ;
37} 37}
38 38
39static inline void cache_sync(void) 39static inline void cache_sync(void)
40{ 40{
41 void __iomem *base = l2x0_base; 41 void __iomem *base = l2x0_base;
42 writel(0, base + L2X0_CACHE_SYNC); 42 writel_relaxed(0, base + L2X0_CACHE_SYNC);
43 cache_wait(base + L2X0_CACHE_SYNC, 1); 43 cache_wait(base + L2X0_CACHE_SYNC, 1);
44} 44}
45 45
@@ -47,14 +47,14 @@ static inline void l2x0_clean_line(unsigned long addr)
47{ 47{
48 void __iomem *base = l2x0_base; 48 void __iomem *base = l2x0_base;
49 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 49 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
50 writel(addr, base + L2X0_CLEAN_LINE_PA); 50 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
51} 51}
52 52
53static inline void l2x0_inv_line(unsigned long addr) 53static inline void l2x0_inv_line(unsigned long addr)
54{ 54{
55 void __iomem *base = l2x0_base; 55 void __iomem *base = l2x0_base;
56 cache_wait(base + L2X0_INV_LINE_PA, 1); 56 cache_wait(base + L2X0_INV_LINE_PA, 1);
57 writel(addr, base + L2X0_INV_LINE_PA); 57 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
58} 58}
59 59
60#ifdef CONFIG_PL310_ERRATA_588369 60#ifdef CONFIG_PL310_ERRATA_588369
@@ -75,9 +75,9 @@ static inline void l2x0_flush_line(unsigned long addr)
75 75
76 /* Clean by PA followed by Invalidate by PA */ 76 /* Clean by PA followed by Invalidate by PA */
77 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 77 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
78 writel(addr, base + L2X0_CLEAN_LINE_PA); 78 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
79 cache_wait(base + L2X0_INV_LINE_PA, 1); 79 cache_wait(base + L2X0_INV_LINE_PA, 1);
80 writel(addr, base + L2X0_INV_LINE_PA); 80 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
81} 81}
82#else 82#else
83 83
@@ -90,7 +90,7 @@ static inline void l2x0_flush_line(unsigned long addr)
90{ 90{
91 void __iomem *base = l2x0_base; 91 void __iomem *base = l2x0_base;
92 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 92 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
93 writel(addr, base + L2X0_CLEAN_INV_LINE_PA); 93 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
94} 94}
95#endif 95#endif
96 96
@@ -109,7 +109,7 @@ static inline void l2x0_inv_all(void)
109 109
110 /* invalidate all ways */ 110 /* invalidate all ways */
111 spin_lock_irqsave(&l2x0_lock, flags); 111 spin_lock_irqsave(&l2x0_lock, flags);
112 writel(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); 112 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
113 cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); 113 cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
114 cache_sync(); 114 cache_sync();
115 spin_unlock_irqrestore(&l2x0_lock, flags); 115 spin_unlock_irqrestore(&l2x0_lock, flags);
@@ -215,8 +215,8 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
215 215
216 l2x0_base = base; 216 l2x0_base = base;
217 217
218 cache_id = readl(l2x0_base + L2X0_CACHE_ID); 218 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
219 aux = readl(l2x0_base + L2X0_AUX_CTRL); 219 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
220 220
221 aux &= aux_mask; 221 aux &= aux_mask;
222 aux |= aux_val; 222 aux |= aux_val;
@@ -248,15 +248,15 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
248 * If you are booting from non-secure mode 248 * If you are booting from non-secure mode
249 * accessing the below registers will fault. 249 * accessing the below registers will fault.
250 */ 250 */
251 if (!(readl(l2x0_base + L2X0_CTRL) & 1)) { 251 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
252 252
253 /* l2x0 controller is disabled */ 253 /* l2x0 controller is disabled */
254 writel(aux, l2x0_base + L2X0_AUX_CTRL); 254 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
255 255
256 l2x0_inv_all(); 256 l2x0_inv_all();
257 257
258 /* enable L2X0 */ 258 /* enable L2X0 */
259 writel(1, l2x0_base + L2X0_CTRL); 259 writel_relaxed(1, l2x0_base + L2X0_CTRL);
260 } 260 }
261 261
262 outer_cache.inv_range = l2x0_inv_range; 262 outer_cache.inv_range = l2x0_inv_range;
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 086816b205b8..6ab244062b4a 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -163,19 +163,22 @@ static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
163 163
164void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) 164void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
165{ 165{
166 unsigned int idx, cpu = smp_processor_id(); 166 unsigned int idx, cpu;
167 int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); 167 int *depth;
168 unsigned long vaddr, flags; 168 unsigned long vaddr, flags;
169 pte_t pte, *ptep; 169 pte_t pte, *ptep;
170 170
171 if (!in_interrupt())
172 preempt_disable();
173
174 cpu = smp_processor_id();
175 depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
176
171 idx = KM_L1_CACHE + KM_TYPE_NR * cpu; 177 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
172 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 178 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
173 ptep = TOP_PTE(vaddr); 179 ptep = TOP_PTE(vaddr);
174 pte = mk_pte(page, kmap_prot); 180 pte = mk_pte(page, kmap_prot);
175 181
176 if (!in_interrupt())
177 preempt_disable();
178
179 raw_local_irq_save(flags); 182 raw_local_irq_save(flags);
180 (*depth)++; 183 (*depth)++;
181 if (pte_val(*ptep) == pte_val(pte)) { 184 if (pte_val(*ptep) == pte_val(pte)) {
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index d5e3e6007447..bea9ee37ac9d 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -535,8 +535,16 @@ pgm_no_vtime2:
535 l %r3,__LC_PGM_ILC # load program interruption code 535 l %r3,__LC_PGM_ILC # load program interruption code
536 la %r8,0x7f 536 la %r8,0x7f
537 nr %r8,%r3 # clear per-event-bit and ilc 537 nr %r8,%r3 # clear per-event-bit and ilc
538 be BASED(pgm_exit) # only per or per+check ? 538 be BASED(pgm_exit2) # only per or per+check ?
539 b BASED(pgm_do_call) 539 l %r7,BASED(.Ljump_table)
540 sll %r8,2
541 l %r7,0(%r8,%r7) # load address of handler routine
542 la %r2,SP_PTREGS(%r15) # address of register-save area
543 basr %r14,%r7 # branch to interrupt-handler
544pgm_exit2:
545 TRACE_IRQS_ON
546 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
547 b BASED(sysc_return)
540 548
541# 549#
542# it was a single stepped SVC that is causing all the trouble 550# it was a single stepped SVC that is causing all the trouble
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index e7192e1cb678..8bccec15ea90 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -544,8 +544,16 @@ pgm_no_vtime2:
544 lgf %r3,__LC_PGM_ILC # load program interruption code 544 lgf %r3,__LC_PGM_ILC # load program interruption code
545 lghi %r8,0x7f 545 lghi %r8,0x7f
546 ngr %r8,%r3 # clear per-event-bit and ilc 546 ngr %r8,%r3 # clear per-event-bit and ilc
547 je pgm_exit 547 je pgm_exit2
548 j pgm_do_call 548 sll %r8,3
549 larl %r1,pgm_check_table
550 lg %r1,0(%r8,%r1) # load address of handler routine
551 la %r2,SP_PTREGS(%r15) # address of register-save area
552 basr %r14,%r1 # branch to interrupt-handler
553pgm_exit2:
554 TRACE_IRQS_ON
555 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
556 j sysc_return
549 557
550# 558#
551# it was a single stepped SVC that is causing all the trouble 559# it was a single stepped SVC that is causing all the trouble
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index a2163c95eb98..15a7536452d5 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -524,8 +524,11 @@ void etr_switch_to_local(void)
524 if (!etr_eacr.sl) 524 if (!etr_eacr.sl)
525 return; 525 return;
526 disable_sync_clock(NULL); 526 disable_sync_clock(NULL);
527 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); 527 if (!test_and_set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events)) {
528 queue_work(time_sync_wq, &etr_work); 528 etr_eacr.es = etr_eacr.sl = 0;
529 etr_setr(&etr_eacr);
530 queue_work(time_sync_wq, &etr_work);
531 }
529} 532}
530 533
531/* 534/*
@@ -539,8 +542,11 @@ void etr_sync_check(void)
539 if (!etr_eacr.es) 542 if (!etr_eacr.es)
540 return; 543 return;
541 disable_sync_clock(NULL); 544 disable_sync_clock(NULL);
542 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); 545 if (!test_and_set_bit(ETR_EVENT_SYNC_CHECK, &etr_events)) {
543 queue_work(time_sync_wq, &etr_work); 546 etr_eacr.es = 0;
547 etr_setr(&etr_eacr);
548 queue_work(time_sync_wq, &etr_work);
549 }
544} 550}
545 551
546/* 552/*
@@ -902,7 +908,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib,
902 * Do not try to get the alternate port aib if the clock 908 * Do not try to get the alternate port aib if the clock
903 * is not in sync yet. 909 * is not in sync yet.
904 */ 910 */
905 if (!check_sync_clock()) 911 if (!eacr.es || !check_sync_clock())
906 return eacr; 912 return eacr;
907 913
908 /* 914 /*
@@ -1064,7 +1070,7 @@ static void etr_work_fn(struct work_struct *work)
1064 * If the clock is in sync just update the eacr and return. 1070 * If the clock is in sync just update the eacr and return.
1065 * If there is no valid sync port wait for a port update. 1071 * If there is no valid sync port wait for a port update.
1066 */ 1072 */
1067 if (check_sync_clock() || sync_port < 0) { 1073 if ((eacr.es && check_sync_clock()) || sync_port < 0) {
1068 etr_update_eacr(eacr); 1074 etr_update_eacr(eacr);
1069 etr_set_tolec_timeout(now); 1075 etr_set_tolec_timeout(now);
1070 goto out_unlock; 1076 goto out_unlock;