aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/sh/include/asm/dwarf.h19
-rw-r--r--arch/sh/include/asm/module.h17
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S3
-rw-r--r--arch/sh/kernel/dwarf.c194
-rw-r--r--arch/sh/kernel/entry-common.S8
-rw-r--r--drivers/serial/sh-sci.h220
6 files changed, 173 insertions, 288 deletions
diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h
index bdccbbfdc0bd..d62abd1d0c05 100644
--- a/arch/sh/include/asm/dwarf.h
+++ b/arch/sh/include/asm/dwarf.h
@@ -243,16 +243,13 @@ struct dwarf_cie {
243 243
244 unsigned long cie_pointer; 244 unsigned long cie_pointer;
245 245
246 struct list_head link;
247
248 unsigned long flags; 246 unsigned long flags;
249#define DWARF_CIE_Z_AUGMENTATION (1 << 0) 247#define DWARF_CIE_Z_AUGMENTATION (1 << 0)
250 248
251 /* 249 /* linked-list entry if this CIE is from a module */
252 * 'mod' will be non-NULL if this CIE came from a module's 250 struct list_head link;
253 * .eh_frame section. 251
254 */ 252 struct rb_node node;
255 struct module *mod;
256}; 253};
257 254
258/** 255/**
@@ -266,13 +263,11 @@ struct dwarf_fde {
266 unsigned long address_range; 263 unsigned long address_range;
267 unsigned char *instructions; 264 unsigned char *instructions;
268 unsigned char *end; 265 unsigned char *end;
266
267 /* linked-list entry if this FDE is from a module */
269 struct list_head link; 268 struct list_head link;
270 269
271 /* 270 struct rb_node node;
272 * 'mod' will be non-NULL if this FDE came from a module's
273 * .eh_frame section.
274 */
275 struct module *mod;
276}; 271};
277 272
278/** 273/**
diff --git a/arch/sh/include/asm/module.h b/arch/sh/include/asm/module.h
index 068bf1659750..b7927de86f9f 100644
--- a/arch/sh/include/asm/module.h
+++ b/arch/sh/include/asm/module.h
@@ -1,7 +1,22 @@
1#ifndef _ASM_SH_MODULE_H 1#ifndef _ASM_SH_MODULE_H
2#define _ASM_SH_MODULE_H 2#define _ASM_SH_MODULE_H
3 3
4#include <asm-generic/module.h> 4struct mod_arch_specific {
5#ifdef CONFIG_DWARF_UNWINDER
6 struct list_head fde_list;
7 struct list_head cie_list;
8#endif
9};
10
11#ifdef CONFIG_64BIT
12#define Elf_Shdr Elf64_Shdr
13#define Elf_Sym Elf64_Sym
14#define Elf_Ehdr Elf64_Ehdr
15#else
16#define Elf_Shdr Elf32_Shdr
17#define Elf_Sym Elf32_Sym
18#define Elf_Ehdr Elf32_Ehdr
19#endif
5 20
6#ifdef CONFIG_CPU_LITTLE_ENDIAN 21#ifdef CONFIG_CPU_LITTLE_ENDIAN
7# ifdef CONFIG_CPU_SH2 22# ifdef CONFIG_CPU_SH2
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index 3f7e2a22c7c2..f6a389c996cb 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -132,7 +132,6 @@ ENTRY(tlb_protection_violation_store)
132 mov #1, r5 132 mov #1, r5
133 133
134call_handle_tlbmiss: 134call_handle_tlbmiss:
135 setup_frame_reg
136 mov.l 1f, r0 135 mov.l 1f, r0
137 mov r5, r8 136 mov r5, r8
138 mov.l @r0, r6 137 mov.l @r0, r6
@@ -365,6 +364,8 @@ handle_exception:
365 mov.l @k2, k2 ! read out vector and keep in k2 364 mov.l @k2, k2 ! read out vector and keep in k2
366 365
367handle_exception_special: 366handle_exception_special:
367 setup_frame_reg
368
368 ! Setup return address and jump to exception handler 369 ! Setup return address and jump to exception handler
369 mov.l 7f, r9 ! fetch return address 370 mov.l 7f, r9 ! fetch return address
370 stc r2_bank, r0 ! k2 (vector) 371 stc r2_bank, r0 ! k2 (vector)
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index 88d28ec3780a..bd1c497280a6 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -39,10 +39,10 @@ static mempool_t *dwarf_frame_pool;
39static struct kmem_cache *dwarf_reg_cachep; 39static struct kmem_cache *dwarf_reg_cachep;
40static mempool_t *dwarf_reg_pool; 40static mempool_t *dwarf_reg_pool;
41 41
42static LIST_HEAD(dwarf_cie_list); 42static struct rb_root cie_root;
43static DEFINE_SPINLOCK(dwarf_cie_lock); 43static DEFINE_SPINLOCK(dwarf_cie_lock);
44 44
45static LIST_HEAD(dwarf_fde_list); 45static struct rb_root fde_root;
46static DEFINE_SPINLOCK(dwarf_fde_lock); 46static DEFINE_SPINLOCK(dwarf_fde_lock);
47 47
48static struct dwarf_cie *cached_cie; 48static struct dwarf_cie *cached_cie;
@@ -301,7 +301,8 @@ static inline int dwarf_entry_len(char *addr, unsigned long *len)
301 */ 301 */
302static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr) 302static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
303{ 303{
304 struct dwarf_cie *cie; 304 struct rb_node **rb_node = &cie_root.rb_node;
305 struct dwarf_cie *cie = NULL;
305 unsigned long flags; 306 unsigned long flags;
306 307
307 spin_lock_irqsave(&dwarf_cie_lock, flags); 308 spin_lock_irqsave(&dwarf_cie_lock, flags);
@@ -315,16 +316,24 @@ static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
315 goto out; 316 goto out;
316 } 317 }
317 318
318 list_for_each_entry(cie, &dwarf_cie_list, link) { 319 while (*rb_node) {
319 if (cie->cie_pointer == cie_ptr) { 320 struct dwarf_cie *cie_tmp;
320 cached_cie = cie; 321
321 break; 322 cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
323 BUG_ON(!cie_tmp);
324
325 if (cie_ptr == cie_tmp->cie_pointer) {
326 cie = cie_tmp;
327 cached_cie = cie_tmp;
328 goto out;
329 } else {
330 if (cie_ptr < cie_tmp->cie_pointer)
331 rb_node = &(*rb_node)->rb_left;
332 else
333 rb_node = &(*rb_node)->rb_right;
322 } 334 }
323 } 335 }
324 336
325 /* Couldn't find the entry in the list. */
326 if (&cie->link == &dwarf_cie_list)
327 cie = NULL;
328out: 337out:
329 spin_unlock_irqrestore(&dwarf_cie_lock, flags); 338 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
330 return cie; 339 return cie;
@@ -336,25 +345,34 @@ out:
336 */ 345 */
337struct dwarf_fde *dwarf_lookup_fde(unsigned long pc) 346struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
338{ 347{
339 struct dwarf_fde *fde; 348 struct rb_node **rb_node = &fde_root.rb_node;
349 struct dwarf_fde *fde = NULL;
340 unsigned long flags; 350 unsigned long flags;
341 351
342 spin_lock_irqsave(&dwarf_fde_lock, flags); 352 spin_lock_irqsave(&dwarf_fde_lock, flags);
343 353
344 list_for_each_entry(fde, &dwarf_fde_list, link) { 354 while (*rb_node) {
345 unsigned long start, end; 355 struct dwarf_fde *fde_tmp;
356 unsigned long tmp_start, tmp_end;
346 357
347 start = fde->initial_location; 358 fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
348 end = fde->initial_location + fde->address_range; 359 BUG_ON(!fde_tmp);
349 360
350 if (pc >= start && pc < end) 361 tmp_start = fde_tmp->initial_location;
351 break; 362 tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
352 }
353 363
354 /* Couldn't find the entry in the list. */ 364 if (pc < tmp_start) {
355 if (&fde->link == &dwarf_fde_list) 365 rb_node = &(*rb_node)->rb_left;
356 fde = NULL; 366 } else {
367 if (pc < tmp_end) {
368 fde = fde_tmp;
369 goto out;
370 } else
371 rb_node = &(*rb_node)->rb_right;
372 }
373 }
357 374
375out:
358 spin_unlock_irqrestore(&dwarf_fde_lock, flags); 376 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
359 377
360 return fde; 378 return fde;
@@ -540,6 +558,8 @@ void dwarf_free_frame(struct dwarf_frame *frame)
540 mempool_free(frame, dwarf_frame_pool); 558 mempool_free(frame, dwarf_frame_pool);
541} 559}
542 560
561extern void ret_from_irq(void);
562
543/** 563/**
544 * dwarf_unwind_stack - unwind the stack 564 * dwarf_unwind_stack - unwind the stack
545 * 565 *
@@ -550,8 +570,8 @@ void dwarf_free_frame(struct dwarf_frame *frame)
550 * on the callstack. Each of the lower (older) stack frames are 570 * on the callstack. Each of the lower (older) stack frames are
551 * linked via the "prev" member. 571 * linked via the "prev" member.
552 */ 572 */
553struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, 573struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
554 struct dwarf_frame *prev) 574 struct dwarf_frame *prev)
555{ 575{
556 struct dwarf_frame *frame; 576 struct dwarf_frame *frame;
557 struct dwarf_cie *cie; 577 struct dwarf_cie *cie;
@@ -678,6 +698,24 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
678 addr = frame->cfa + reg->addr; 698 addr = frame->cfa + reg->addr;
679 frame->return_addr = __raw_readl(addr); 699 frame->return_addr = __raw_readl(addr);
680 700
701 /*
702 * Ah, the joys of unwinding through interrupts.
703 *
704 * Interrupts are tricky - the DWARF info needs to be _really_
705 * accurate and unfortunately I'm seeing a lot of bogus DWARF
706 * info. For example, I've seen interrupts occur in epilogues
707 * just after the frame pointer (r14) had been restored. The
708 * problem was that the DWARF info claimed that the CFA could be
709 * reached by using the value of the frame pointer before it was
710 * restored.
711 *
712 * So until the compiler can be trusted to produce reliable
713 * DWARF info when it really matters, let's stop unwinding once
714 * we've calculated the function that was interrupted.
715 */
716 if (prev && prev->pc == (unsigned long)ret_from_irq)
717 frame->return_addr = 0;
718
681 return frame; 719 return frame;
682 720
683bail: 721bail:
@@ -688,6 +726,8 @@ bail:
688static int dwarf_parse_cie(void *entry, void *p, unsigned long len, 726static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
689 unsigned char *end, struct module *mod) 727 unsigned char *end, struct module *mod)
690{ 728{
729 struct rb_node **rb_node = &cie_root.rb_node;
730 struct rb_node *parent;
691 struct dwarf_cie *cie; 731 struct dwarf_cie *cie;
692 unsigned long flags; 732 unsigned long flags;
693 int count; 733 int count;
@@ -782,11 +822,30 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
782 cie->initial_instructions = p; 822 cie->initial_instructions = p;
783 cie->instructions_end = end; 823 cie->instructions_end = end;
784 824
785 cie->mod = mod;
786
787 /* Add to list */ 825 /* Add to list */
788 spin_lock_irqsave(&dwarf_cie_lock, flags); 826 spin_lock_irqsave(&dwarf_cie_lock, flags);
789 list_add_tail(&cie->link, &dwarf_cie_list); 827
828 while (*rb_node) {
829 struct dwarf_cie *cie_tmp;
830
831 cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
832
833 parent = *rb_node;
834
835 if (cie->cie_pointer < cie_tmp->cie_pointer)
836 rb_node = &parent->rb_left;
837 else if (cie->cie_pointer >= cie_tmp->cie_pointer)
838 rb_node = &parent->rb_right;
839 else
840 WARN_ON(1);
841 }
842
843 rb_link_node(&cie->node, parent, rb_node);
844 rb_insert_color(&cie->node, &cie_root);
845
846 if (mod != NULL)
847 list_add_tail(&cie->link, &mod->arch.cie_list);
848
790 spin_unlock_irqrestore(&dwarf_cie_lock, flags); 849 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
791 850
792 return 0; 851 return 0;
@@ -796,6 +855,8 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
796 void *start, unsigned long len, 855 void *start, unsigned long len,
797 unsigned char *end, struct module *mod) 856 unsigned char *end, struct module *mod)
798{ 857{
858 struct rb_node **rb_node = &fde_root.rb_node;
859 struct rb_node *parent;
799 struct dwarf_fde *fde; 860 struct dwarf_fde *fde;
800 struct dwarf_cie *cie; 861 struct dwarf_cie *cie;
801 unsigned long flags; 862 unsigned long flags;
@@ -843,11 +904,38 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
843 fde->instructions = p; 904 fde->instructions = p;
844 fde->end = end; 905 fde->end = end;
845 906
846 fde->mod = mod;
847
848 /* Add to list. */ 907 /* Add to list. */
849 spin_lock_irqsave(&dwarf_fde_lock, flags); 908 spin_lock_irqsave(&dwarf_fde_lock, flags);
850 list_add_tail(&fde->link, &dwarf_fde_list); 909
910 while (*rb_node) {
911 struct dwarf_fde *fde_tmp;
912 unsigned long tmp_start, tmp_end;
913 unsigned long start, end;
914
915 fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
916
917 start = fde->initial_location;
918 end = fde->initial_location + fde->address_range;
919
920 tmp_start = fde_tmp->initial_location;
921 tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
922
923 parent = *rb_node;
924
925 if (start < tmp_start)
926 rb_node = &parent->rb_left;
927 else if (start >= tmp_end)
928 rb_node = &parent->rb_right;
929 else
930 WARN_ON(1);
931 }
932
933 rb_link_node(&fde->node, parent, rb_node);
934 rb_insert_color(&fde->node, &fde_root);
935
936 if (mod != NULL)
937 list_add_tail(&fde->link, &mod->arch.fde_list);
938
851 spin_unlock_irqrestore(&dwarf_fde_lock, flags); 939 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
852 940
853 return 0; 941 return 0;
@@ -892,19 +980,29 @@ static struct unwinder dwarf_unwinder = {
892 980
893static void dwarf_unwinder_cleanup(void) 981static void dwarf_unwinder_cleanup(void)
894{ 982{
895 struct dwarf_cie *cie, *cie_tmp; 983 struct rb_node **fde_rb_node = &fde_root.rb_node;
896 struct dwarf_fde *fde, *fde_tmp; 984 struct rb_node **cie_rb_node = &cie_root.rb_node;
897 985
898 /* 986 /*
899 * Deallocate all the memory allocated for the DWARF unwinder. 987 * Deallocate all the memory allocated for the DWARF unwinder.
900 * Traverse all the FDE/CIE lists and remove and free all the 988 * Traverse all the FDE/CIE lists and remove and free all the
901 * memory associated with those data structures. 989 * memory associated with those data structures.
902 */ 990 */
903 list_for_each_entry_safe(cie, cie_tmp, &dwarf_cie_list, link) 991 while (*fde_rb_node) {
904 kfree(cie); 992 struct dwarf_fde *fde;
905 993
906 list_for_each_entry_safe(fde, fde_tmp, &dwarf_fde_list, link) 994 fde = rb_entry(*fde_rb_node, struct dwarf_fde, node);
995 rb_erase(*fde_rb_node, &fde_root);
907 kfree(fde); 996 kfree(fde);
997 }
998
999 while (*cie_rb_node) {
1000 struct dwarf_cie *cie;
1001
1002 cie = rb_entry(*cie_rb_node, struct dwarf_cie, node);
1003 rb_erase(*cie_rb_node, &cie_root);
1004 kfree(cie);
1005 }
908 1006
909 kmem_cache_destroy(dwarf_reg_cachep); 1007 kmem_cache_destroy(dwarf_reg_cachep);
910 kmem_cache_destroy(dwarf_frame_cachep); 1008 kmem_cache_destroy(dwarf_frame_cachep);
@@ -1004,6 +1102,8 @@ int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
1004 1102
1005 /* Did we find the .eh_frame section? */ 1103 /* Did we find the .eh_frame section? */
1006 if (i != hdr->e_shnum) { 1104 if (i != hdr->e_shnum) {
1105 INIT_LIST_HEAD(&me->arch.cie_list);
1106 INIT_LIST_HEAD(&me->arch.fde_list);
1007 err = dwarf_parse_section((char *)start, (char *)end, me); 1107 err = dwarf_parse_section((char *)start, (char *)end, me);
1008 if (err) { 1108 if (err) {
1009 printk(KERN_WARNING "%s: failed to parse DWARF info\n", 1109 printk(KERN_WARNING "%s: failed to parse DWARF info\n",
@@ -1024,38 +1124,26 @@ int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
1024 */ 1124 */
1025void module_dwarf_cleanup(struct module *mod) 1125void module_dwarf_cleanup(struct module *mod)
1026{ 1126{
1027 struct dwarf_fde *fde; 1127 struct dwarf_fde *fde, *ftmp;
1028 struct dwarf_cie *cie; 1128 struct dwarf_cie *cie, *ctmp;
1029 unsigned long flags; 1129 unsigned long flags;
1030 1130
1031 spin_lock_irqsave(&dwarf_cie_lock, flags); 1131 spin_lock_irqsave(&dwarf_cie_lock, flags);
1032 1132
1033again_cie: 1133 list_for_each_entry_safe(cie, ctmp, &mod->arch.cie_list, link) {
1034 list_for_each_entry(cie, &dwarf_cie_list, link) {
1035 if (cie->mod == mod)
1036 break;
1037 }
1038
1039 if (&cie->link != &dwarf_cie_list) {
1040 list_del(&cie->link); 1134 list_del(&cie->link);
1135 rb_erase(&cie->node, &cie_root);
1041 kfree(cie); 1136 kfree(cie);
1042 goto again_cie;
1043 } 1137 }
1044 1138
1045 spin_unlock_irqrestore(&dwarf_cie_lock, flags); 1139 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
1046 1140
1047 spin_lock_irqsave(&dwarf_fde_lock, flags); 1141 spin_lock_irqsave(&dwarf_fde_lock, flags);
1048 1142
1049again_fde: 1143 list_for_each_entry_safe(fde, ftmp, &mod->arch.fde_list, link) {
1050 list_for_each_entry(fde, &dwarf_fde_list, link) {
1051 if (fde->mod == mod)
1052 break;
1053 }
1054
1055 if (&fde->link != &dwarf_fde_list) {
1056 list_del(&fde->link); 1144 list_del(&fde->link);
1145 rb_erase(&fde->node, &fde_root);
1057 kfree(fde); 1146 kfree(fde);
1058 goto again_fde;
1059 } 1147 }
1060 1148
1061 spin_unlock_irqrestore(&dwarf_fde_lock, flags); 1149 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
@@ -1074,8 +1162,6 @@ again_fde:
1074static int __init dwarf_unwinder_init(void) 1162static int __init dwarf_unwinder_init(void)
1075{ 1163{
1076 int err; 1164 int err;
1077 INIT_LIST_HEAD(&dwarf_cie_list);
1078 INIT_LIST_HEAD(&dwarf_fde_list);
1079 1165
1080 dwarf_frame_cachep = kmem_cache_create("dwarf_frames", 1166 dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
1081 sizeof(struct dwarf_frame), 0, 1167 sizeof(struct dwarf_frame), 0,
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index f0abd58c3a69..2b15ae60c3a0 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -70,8 +70,14 @@ ret_from_exception:
70 CFI_STARTPROC simple 70 CFI_STARTPROC simple
71 CFI_DEF_CFA r14, 0 71 CFI_DEF_CFA r14, 0
72 CFI_REL_OFFSET 17, 64 72 CFI_REL_OFFSET 17, 64
73 CFI_REL_OFFSET 15, 0 73 CFI_REL_OFFSET 15, 60
74 CFI_REL_OFFSET 14, 56 74 CFI_REL_OFFSET 14, 56
75 CFI_REL_OFFSET 13, 52
76 CFI_REL_OFFSET 12, 48
77 CFI_REL_OFFSET 11, 44
78 CFI_REL_OFFSET 10, 40
79 CFI_REL_OFFSET 9, 36
80 CFI_REL_OFFSET 8, 32
75 preempt_stop() 81 preempt_stop()
76ENTRY(ret_from_irq) 82ENTRY(ret_from_irq)
77 ! 83 !
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index 0efcded59ae6..f7d2589926d2 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -518,34 +518,6 @@ static inline int sci_rxd_in(struct uart_port *port)
518{ 518{
519 if (port->mapbase == 0xfffffe80) 519 if (port->mapbase == 0xfffffe80)
520 return __raw_readb(SCPDR)&0x01 ? 1 : 0; /* SCI */ 520 return __raw_readb(SCPDR)&0x01 ? 1 : 0; /* SCI */
521 if (port->mapbase == 0xa4000150)
522 return __raw_readb(SCPDR)&0x10 ? 1 : 0; /* SCIF */
523 if (port->mapbase == 0xa4000140)
524 return __raw_readb(SCPDR)&0x04 ? 1 : 0; /* IRDA */
525 return 1;
526}
527#elif defined(CONFIG_CPU_SUBTYPE_SH7705)
528static inline int sci_rxd_in(struct uart_port *port)
529{
530 if (port->mapbase == SCIF0)
531 return __raw_readb(SCPDR)&0x04 ? 1 : 0; /* IRDA */
532 if (port->mapbase == SCIF2)
533 return __raw_readb(SCPDR)&0x10 ? 1 : 0; /* SCIF */
534 return 1;
535}
536#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
537static inline int sci_rxd_in(struct uart_port *port)
538{
539 return sci_in(port,SCxSR)&0x0010 ? 1 : 0;
540}
541#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \
542 defined(CONFIG_CPU_SUBTYPE_SH7721)
543static inline int sci_rxd_in(struct uart_port *port)
544{
545 if (port->mapbase == 0xa4430000)
546 return sci_in(port, SCxSR) & 0x0003 ? 1 : 0;
547 else if (port->mapbase == 0xa4438000)
548 return sci_in(port, SCxSR) & 0x0003 ? 1 : 0;
549 return 1; 521 return 1;
550} 522}
551#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \ 523#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \
@@ -558,207 +530,17 @@ static inline int sci_rxd_in(struct uart_port *port)
558{ 530{
559 if (port->mapbase == 0xffe00000) 531 if (port->mapbase == 0xffe00000)
560 return __raw_readb(SCSPTR1)&0x01 ? 1 : 0; /* SCI */ 532 return __raw_readb(SCSPTR1)&0x01 ? 1 : 0; /* SCI */
561 if (port->mapbase == 0xffe80000)
562 return __raw_readw(SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */
563 return 1;
564}
565#elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
566static inline int sci_rxd_in(struct uart_port *port)
567{
568 if (port->mapbase == 0xffe80000)
569 return __raw_readw(SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */
570 return 1; 533 return 1;
571} 534}
572#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
573static inline int sci_rxd_in(struct uart_port *port)
574{
575 if (port->mapbase == 0xfe4b0000)
576 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0;
577 if (port->mapbase == 0xfe4c0000)
578 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0;
579 if (port->mapbase == 0xfe4d0000)
580 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0;
581}
582#elif defined(CONFIG_CPU_SUBTYPE_SH7760)
583static inline int sci_rxd_in(struct uart_port *port)
584{
585 if (port->mapbase == 0xfe600000)
586 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
587 if (port->mapbase == 0xfe610000)
588 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
589 if (port->mapbase == 0xfe620000)
590 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
591 return 1;
592}
593#elif defined(CONFIG_CPU_SUBTYPE_SH7343)
594static inline int sci_rxd_in(struct uart_port *port)
595{
596 if (port->mapbase == 0xffe00000)
597 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
598 if (port->mapbase == 0xffe10000)
599 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
600 if (port->mapbase == 0xffe20000)
601 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
602 if (port->mapbase == 0xffe30000)
603 return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
604 return 1;
605}
606#elif defined(CONFIG_CPU_SUBTYPE_SH7366)
607static inline int sci_rxd_in(struct uart_port *port)
608{
609 if (port->mapbase == 0xffe00000)
610 return __raw_readb(SCPDR0) & 0x0001 ? 1 : 0; /* SCIF0 */
611 return 1;
612}
613#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
614static inline int sci_rxd_in(struct uart_port *port)
615{
616 if (port->mapbase == 0xffe00000)
617 return __raw_readb(PSDR) & 0x02 ? 1 : 0; /* SCIF0 */
618 if (port->mapbase == 0xffe10000)
619 return __raw_readb(PADR) & 0x40 ? 1 : 0; /* SCIF1 */
620 if (port->mapbase == 0xffe20000)
621 return __raw_readb(PWDR) & 0x04 ? 1 : 0; /* SCIF2 */
622
623 return 1;
624}
625#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
626static inline int sci_rxd_in(struct uart_port *port)
627{
628 if (port->mapbase == 0xffe00000)
629 return __raw_readb(SCSPTR0) & 0x0008 ? 1 : 0; /* SCIF0 */
630 if (port->mapbase == 0xffe10000)
631 return __raw_readb(SCSPTR1) & 0x0020 ? 1 : 0; /* SCIF1 */
632 if (port->mapbase == 0xffe20000)
633 return __raw_readb(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF2 */
634 if (port->mapbase == 0xa4e30000)
635 return __raw_readb(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF3 */
636 if (port->mapbase == 0xa4e40000)
637 return __raw_readb(SCSPTR4) & 0x0001 ? 1 : 0; /* SCIF4 */
638 if (port->mapbase == 0xa4e50000)
639 return __raw_readb(SCSPTR5) & 0x0008 ? 1 : 0; /* SCIF5 */
640 return 1;
641}
642#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
643# define SCFSR 0x0010
644# define SCASSR 0x0014
645static inline int sci_rxd_in(struct uart_port *port)
646{
647 if (port->type == PORT_SCIF)
648 return __raw_readw((port->mapbase + SCFSR)) & SCIF_BRK ? 1 : 0;
649 if (port->type == PORT_SCIFA)
650 return __raw_readw((port->mapbase + SCASSR)) & SCIF_BRK ? 1 : 0;
651 return 1;
652}
653#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
654static inline int sci_rxd_in(struct uart_port *port)
655{
656 return sci_in(port, SCSPTR)&0x0001 ? 1 : 0; /* SCIF */
657}
658#elif defined(__H8300H__) || defined(__H8300S__) 535#elif defined(__H8300H__) || defined(__H8300S__)
659static inline int sci_rxd_in(struct uart_port *port) 536static inline int sci_rxd_in(struct uart_port *port)
660{ 537{
661 int ch = (port->mapbase - SMR0) >> 3; 538 int ch = (port->mapbase - SMR0) >> 3;
662 return (H8300_SCI_DR(ch) & h8300_sci_pins[ch].rx) ? 1 : 0; 539 return (H8300_SCI_DR(ch) & h8300_sci_pins[ch].rx) ? 1 : 0;
663} 540}
664#elif defined(CONFIG_CPU_SUBTYPE_SH7763) 541#else /* default case for non-SCI processors */
665static inline int sci_rxd_in(struct uart_port *port)
666{
667 if (port->mapbase == 0xffe00000)
668 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
669 if (port->mapbase == 0xffe08000)
670 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
671 if (port->mapbase == 0xffe10000)
672 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF/IRDA */
673
674 return 1;
675}
676#elif defined(CONFIG_CPU_SUBTYPE_SH7770)
677static inline int sci_rxd_in(struct uart_port *port)
678{
679 if (port->mapbase == 0xff923000)
680 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
681 if (port->mapbase == 0xff924000)
682 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
683 if (port->mapbase == 0xff925000)
684 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
685 return 1;
686}
687#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
688static inline int sci_rxd_in(struct uart_port *port)
689{
690 if (port->mapbase == 0xffe00000)
691 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
692 if (port->mapbase == 0xffe10000)
693 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
694 return 1;
695}
696#elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \
697 defined(CONFIG_CPU_SUBTYPE_SH7786)
698static inline int sci_rxd_in(struct uart_port *port)
699{
700 if (port->mapbase == 0xffea0000)
701 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
702 if (port->mapbase == 0xffeb0000)
703 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
704 if (port->mapbase == 0xffec0000)
705 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
706 if (port->mapbase == 0xffed0000)
707 return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
708 if (port->mapbase == 0xffee0000)
709 return __raw_readw(SCSPTR4) & 0x0001 ? 1 : 0; /* SCIF */
710 if (port->mapbase == 0xffef0000)
711 return __raw_readw(SCSPTR5) & 0x0001 ? 1 : 0; /* SCIF */
712 return 1;
713}
714#elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \
715 defined(CONFIG_CPU_SUBTYPE_SH7203) || \
716 defined(CONFIG_CPU_SUBTYPE_SH7206) || \
717 defined(CONFIG_CPU_SUBTYPE_SH7263)
718static inline int sci_rxd_in(struct uart_port *port)
719{
720 if (port->mapbase == 0xfffe8000)
721 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
722 if (port->mapbase == 0xfffe8800)
723 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
724 if (port->mapbase == 0xfffe9000)
725 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
726 if (port->mapbase == 0xfffe9800)
727 return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
728#if defined(CONFIG_CPU_SUBTYPE_SH7201)
729 if (port->mapbase == 0xfffeA000)
730 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
731 if (port->mapbase == 0xfffeA800)
732 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
733 if (port->mapbase == 0xfffeB000)
734 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
735 if (port->mapbase == 0xfffeB800)
736 return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
737#endif
738 return 1;
739}
740#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
741static inline int sci_rxd_in(struct uart_port *port)
742{
743 if (port->mapbase == 0xf8400000)
744 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
745 if (port->mapbase == 0xf8410000)
746 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
747 if (port->mapbase == 0xf8420000)
748 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
749 return 1;
750}
751#elif defined(CONFIG_CPU_SUBTYPE_SHX3)
752static inline int sci_rxd_in(struct uart_port *port) 542static inline int sci_rxd_in(struct uart_port *port)
753{ 543{
754 if (port->mapbase == 0xffc30000)
755 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
756 if (port->mapbase == 0xffc40000)
757 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
758 if (port->mapbase == 0xffc50000)
759 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
760 if (port->mapbase == 0xffc60000)
761 return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
762 return 1; 544 return 1;
763} 545}
764#endif 546#endif