diff options
author | Yi Li <yi.li@analog.com> | 2009-01-07 10:14:39 -0500 |
---|---|---|
committer | Bryan Wu <cooloney@kernel.org> | 2009-01-07 10:14:39 -0500 |
commit | 6a01f230339321292cf065551f8cf55361052461 (patch) | |
tree | 7ac2ac8fc9f05a7315ef6a7f6f0a387433c62c14 /arch/blackfin/kernel | |
parent | 5105432a3201e3f0e6c219cd0a74feee1e5e262b (diff) |
Blackfin arch: merge adeos blackfin part to arch/blackfin/
[Mike Frysinger <vapier.adi@gmail.com>:
- handle bf531/bf532/bf534/bf536 variants in ipipe.h
- cleanup IPIPE logic for bfin_set_irq_handler()
- cleanup ipipe asm code a bit and add missing ENDPROC()
- simplify IPIPE code in trap_c
- unify some of the IPIPE code and fix style
- simplify DO_IRQ_L1 handling with ipipe code
- revert IRQ_SW_INT# addition from ipipe merge
- remove duplicate get_{c,s}clk() prototypes
]
Signed-off-by: Yi Li <yi.li@analog.com>
Signed-off-by: Mike Frysinger <vapier.adi@gmail.com>
Signed-off-by: Bryan Wu <cooloney@kernel.org>
Diffstat (limited to 'arch/blackfin/kernel')
-rw-r--r-- | arch/blackfin/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/blackfin/kernel/bfin_gpio.c | 100 | ||||
-rw-r--r-- | arch/blackfin/kernel/cplb-mpu/cplbmgr.c | 8 | ||||
-rw-r--r-- | arch/blackfin/kernel/entry.S | 4 | ||||
-rw-r--r-- | arch/blackfin/kernel/ipipe.c | 428 | ||||
-rw-r--r-- | arch/blackfin/kernel/irqchip.c | 5 | ||||
-rw-r--r-- | arch/blackfin/kernel/mcount.S | 70 | ||||
-rw-r--r-- | arch/blackfin/kernel/process.c | 7 | ||||
-rw-r--r-- | arch/blackfin/kernel/time.c | 15 | ||||
-rw-r--r-- | arch/blackfin/kernel/traps.c | 13 |
10 files changed, 586 insertions, 66 deletions
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile index f0902c120dc2..38a233374f07 100644 --- a/arch/blackfin/kernel/Makefile +++ b/arch/blackfin/kernel/Makefile | |||
@@ -15,6 +15,8 @@ else | |||
15 | obj-y += time.o | 15 | obj-y += time.o |
16 | endif | 16 | endif |
17 | 17 | ||
18 | obj-$(CONFIG_IPIPE) += ipipe.o | ||
19 | obj-$(CONFIG_IPIPE_TRACE_MCOUNT) += mcount.o | ||
18 | obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o | 20 | obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o |
19 | obj-$(CONFIG_CPLB_INFO) += cplbinfo.o | 21 | obj-$(CONFIG_CPLB_INFO) += cplbinfo.o |
20 | obj-$(CONFIG_MODULES) += module.o | 22 | obj-$(CONFIG_MODULES) += module.o |
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c index 2c72b15b71b0..4c14331978f6 100644 --- a/arch/blackfin/kernel/bfin_gpio.c +++ b/arch/blackfin/kernel/bfin_gpio.c | |||
@@ -422,13 +422,13 @@ arch_initcall(bfin_gpio_init); | |||
422 | void set_gpio_ ## name(unsigned gpio, unsigned short arg) \ | 422 | void set_gpio_ ## name(unsigned gpio, unsigned short arg) \ |
423 | { \ | 423 | { \ |
424 | unsigned long flags; \ | 424 | unsigned long flags; \ |
425 | local_irq_save(flags); \ | 425 | local_irq_save_hw(flags); \ |
426 | if (arg) \ | 426 | if (arg) \ |
427 | gpio_bankb[gpio_bank(gpio)]->name |= gpio_bit(gpio); \ | 427 | gpio_bankb[gpio_bank(gpio)]->name |= gpio_bit(gpio); \ |
428 | else \ | 428 | else \ |
429 | gpio_bankb[gpio_bank(gpio)]->name &= ~gpio_bit(gpio); \ | 429 | gpio_bankb[gpio_bank(gpio)]->name &= ~gpio_bit(gpio); \ |
430 | AWA_DUMMY_READ(name); \ | 430 | AWA_DUMMY_READ(name); \ |
431 | local_irq_restore(flags); \ | 431 | local_irq_restore_hw(flags); \ |
432 | } \ | 432 | } \ |
433 | EXPORT_SYMBOL(set_gpio_ ## name); | 433 | EXPORT_SYMBOL(set_gpio_ ## name); |
434 | 434 | ||
@@ -444,13 +444,13 @@ SET_GPIO(both) | |||
444 | void set_gpio_ ## name(unsigned gpio, unsigned short arg) \ | 444 | void set_gpio_ ## name(unsigned gpio, unsigned short arg) \ |
445 | { \ | 445 | { \ |
446 | unsigned long flags; \ | 446 | unsigned long flags; \ |
447 | local_irq_save(flags); \ | 447 | local_irq_save_hw(flags); \ |
448 | if (arg) \ | 448 | if (arg) \ |
449 | gpio_bankb[gpio_bank(gpio)]->name ## _set = gpio_bit(gpio); \ | 449 | gpio_bankb[gpio_bank(gpio)]->name ## _set = gpio_bit(gpio); \ |
450 | else \ | 450 | else \ |
451 | gpio_bankb[gpio_bank(gpio)]->name ## _clear = gpio_bit(gpio); \ | 451 | gpio_bankb[gpio_bank(gpio)]->name ## _clear = gpio_bit(gpio); \ |
452 | AWA_DUMMY_READ(name); \ | 452 | AWA_DUMMY_READ(name); \ |
453 | local_irq_restore(flags); \ | 453 | local_irq_restore_hw(flags); \ |
454 | } \ | 454 | } \ |
455 | EXPORT_SYMBOL(set_gpio_ ## name); | 455 | EXPORT_SYMBOL(set_gpio_ ## name); |
456 | #else | 456 | #else |
@@ -473,10 +473,10 @@ SET_GPIO_SC(data) | |||
473 | void set_gpio_toggle(unsigned gpio) | 473 | void set_gpio_toggle(unsigned gpio) |
474 | { | 474 | { |
475 | unsigned long flags; | 475 | unsigned long flags; |
476 | local_irq_save(flags); | 476 | local_irq_save_hw(flags); |
477 | gpio_bankb[gpio_bank(gpio)]->toggle = gpio_bit(gpio); | 477 | gpio_bankb[gpio_bank(gpio)]->toggle = gpio_bit(gpio); |
478 | AWA_DUMMY_READ(toggle); | 478 | AWA_DUMMY_READ(toggle); |
479 | local_irq_restore(flags); | 479 | local_irq_restore_hw(flags); |
480 | } | 480 | } |
481 | #else | 481 | #else |
482 | void set_gpio_toggle(unsigned gpio) | 482 | void set_gpio_toggle(unsigned gpio) |
@@ -494,10 +494,10 @@ EXPORT_SYMBOL(set_gpio_toggle); | |||
494 | void set_gpiop_ ## name(unsigned gpio, unsigned short arg) \ | 494 | void set_gpiop_ ## name(unsigned gpio, unsigned short arg) \ |
495 | { \ | 495 | { \ |
496 | unsigned long flags; \ | 496 | unsigned long flags; \ |
497 | local_irq_save(flags); \ | 497 | local_irq_save_hw(flags); \ |
498 | gpio_bankb[gpio_bank(gpio)]->name = arg; \ | 498 | gpio_bankb[gpio_bank(gpio)]->name = arg; \ |
499 | AWA_DUMMY_READ(name); \ | 499 | AWA_DUMMY_READ(name); \ |
500 | local_irq_restore(flags); \ | 500 | local_irq_restore_hw(flags); \ |
501 | } \ | 501 | } \ |
502 | EXPORT_SYMBOL(set_gpiop_ ## name); | 502 | EXPORT_SYMBOL(set_gpiop_ ## name); |
503 | #else | 503 | #else |
@@ -525,10 +525,10 @@ unsigned short get_gpio_ ## name(unsigned gpio) \ | |||
525 | { \ | 525 | { \ |
526 | unsigned long flags; \ | 526 | unsigned long flags; \ |
527 | unsigned short ret; \ | 527 | unsigned short ret; \ |
528 | local_irq_save(flags); \ | 528 | local_irq_save_hw(flags); \ |
529 | ret = 0x01 & (gpio_bankb[gpio_bank(gpio)]->name >> gpio_sub_n(gpio)); \ | 529 | ret = 0x01 & (gpio_bankb[gpio_bank(gpio)]->name >> gpio_sub_n(gpio)); \ |
530 | AWA_DUMMY_READ(name); \ | 530 | AWA_DUMMY_READ(name); \ |
531 | local_irq_restore(flags); \ | 531 | local_irq_restore_hw(flags); \ |
532 | return ret; \ | 532 | return ret; \ |
533 | } \ | 533 | } \ |
534 | EXPORT_SYMBOL(get_gpio_ ## name); | 534 | EXPORT_SYMBOL(get_gpio_ ## name); |
@@ -558,10 +558,10 @@ unsigned short get_gpiop_ ## name(unsigned gpio) \ | |||
558 | { \ | 558 | { \ |
559 | unsigned long flags; \ | 559 | unsigned long flags; \ |
560 | unsigned short ret; \ | 560 | unsigned short ret; \ |
561 | local_irq_save(flags); \ | 561 | local_irq_save_hw(flags); \ |
562 | ret = (gpio_bankb[gpio_bank(gpio)]->name); \ | 562 | ret = (gpio_bankb[gpio_bank(gpio)]->name); \ |
563 | AWA_DUMMY_READ(name); \ | 563 | AWA_DUMMY_READ(name); \ |
564 | local_irq_restore(flags); \ | 564 | local_irq_restore_hw(flags); \ |
565 | return ret; \ | 565 | return ret; \ |
566 | } \ | 566 | } \ |
567 | EXPORT_SYMBOL(get_gpiop_ ## name); | 567 | EXPORT_SYMBOL(get_gpiop_ ## name); |
@@ -611,10 +611,10 @@ int gpio_pm_wakeup_request(unsigned gpio, unsigned char type) | |||
611 | if ((check_gpio(gpio) < 0) || !type) | 611 | if ((check_gpio(gpio) < 0) || !type) |
612 | return -EINVAL; | 612 | return -EINVAL; |
613 | 613 | ||
614 | local_irq_save(flags); | 614 | local_irq_save_hw(flags); |
615 | wakeup_map[gpio_bank(gpio)] |= gpio_bit(gpio); | 615 | wakeup_map[gpio_bank(gpio)] |= gpio_bit(gpio); |
616 | wakeup_flags_map[gpio] = type; | 616 | wakeup_flags_map[gpio] = type; |
617 | local_irq_restore(flags); | 617 | local_irq_restore_hw(flags); |
618 | 618 | ||
619 | return 0; | 619 | return 0; |
620 | } | 620 | } |
@@ -627,11 +627,11 @@ void gpio_pm_wakeup_free(unsigned gpio) | |||
627 | if (check_gpio(gpio) < 0) | 627 | if (check_gpio(gpio) < 0) |
628 | return; | 628 | return; |
629 | 629 | ||
630 | local_irq_save(flags); | 630 | local_irq_save_hw(flags); |
631 | 631 | ||
632 | wakeup_map[gpio_bank(gpio)] &= ~gpio_bit(gpio); | 632 | wakeup_map[gpio_bank(gpio)] &= ~gpio_bit(gpio); |
633 | 633 | ||
634 | local_irq_restore(flags); | 634 | local_irq_restore_hw(flags); |
635 | } | 635 | } |
636 | EXPORT_SYMBOL(gpio_pm_wakeup_free); | 636 | EXPORT_SYMBOL(gpio_pm_wakeup_free); |
637 | 637 | ||
@@ -882,7 +882,7 @@ int peripheral_request(unsigned short per, const char *label) | |||
882 | if (!(per & P_DEFINED)) | 882 | if (!(per & P_DEFINED)) |
883 | return -ENODEV; | 883 | return -ENODEV; |
884 | 884 | ||
885 | local_irq_save(flags); | 885 | local_irq_save_hw(flags); |
886 | 886 | ||
887 | /* If a pin can be muxed as either GPIO or peripheral, make | 887 | /* If a pin can be muxed as either GPIO or peripheral, make |
888 | * sure it is not already a GPIO pin when we request it. | 888 | * sure it is not already a GPIO pin when we request it. |
@@ -893,7 +893,7 @@ int peripheral_request(unsigned short per, const char *label) | |||
893 | printk(KERN_ERR | 893 | printk(KERN_ERR |
894 | "%s: Peripheral %d is already reserved as GPIO by %s !\n", | 894 | "%s: Peripheral %d is already reserved as GPIO by %s !\n", |
895 | __func__, ident, get_label(ident)); | 895 | __func__, ident, get_label(ident)); |
896 | local_irq_restore(flags); | 896 | local_irq_restore_hw(flags); |
897 | return -EBUSY; | 897 | return -EBUSY; |
898 | } | 898 | } |
899 | 899 | ||
@@ -923,7 +923,7 @@ int peripheral_request(unsigned short per, const char *label) | |||
923 | printk(KERN_ERR | 923 | printk(KERN_ERR |
924 | "%s: Peripheral %d function %d is already reserved by %s !\n", | 924 | "%s: Peripheral %d function %d is already reserved by %s !\n", |
925 | __func__, ident, P_FUNCT2MUX(per), get_label(ident)); | 925 | __func__, ident, P_FUNCT2MUX(per), get_label(ident)); |
926 | local_irq_restore(flags); | 926 | local_irq_restore_hw(flags); |
927 | return -EBUSY; | 927 | return -EBUSY; |
928 | } | 928 | } |
929 | } | 929 | } |
@@ -938,7 +938,7 @@ int peripheral_request(unsigned short per, const char *label) | |||
938 | #endif | 938 | #endif |
939 | port_setup(ident, PERIPHERAL_USAGE); | 939 | port_setup(ident, PERIPHERAL_USAGE); |
940 | 940 | ||
941 | local_irq_restore(flags); | 941 | local_irq_restore_hw(flags); |
942 | set_label(ident, label); | 942 | set_label(ident, label); |
943 | 943 | ||
944 | return 0; | 944 | return 0; |
@@ -980,10 +980,10 @@ void peripheral_free(unsigned short per) | |||
980 | if (check_gpio(ident) < 0) | 980 | if (check_gpio(ident) < 0) |
981 | return; | 981 | return; |
982 | 982 | ||
983 | local_irq_save(flags); | 983 | local_irq_save_hw(flags); |
984 | 984 | ||
985 | if (unlikely(!(reserved_peri_map[gpio_bank(ident)] & gpio_bit(ident)))) { | 985 | if (unlikely(!(reserved_peri_map[gpio_bank(ident)] & gpio_bit(ident)))) { |
986 | local_irq_restore(flags); | 986 | local_irq_restore_hw(flags); |
987 | return; | 987 | return; |
988 | } | 988 | } |
989 | 989 | ||
@@ -994,7 +994,7 @@ void peripheral_free(unsigned short per) | |||
994 | 994 | ||
995 | set_label(ident, "free"); | 995 | set_label(ident, "free"); |
996 | 996 | ||
997 | local_irq_restore(flags); | 997 | local_irq_restore_hw(flags); |
998 | } | 998 | } |
999 | EXPORT_SYMBOL(peripheral_free); | 999 | EXPORT_SYMBOL(peripheral_free); |
1000 | 1000 | ||
@@ -1028,7 +1028,7 @@ int bfin_gpio_request(unsigned gpio, const char *label) | |||
1028 | if (check_gpio(gpio) < 0) | 1028 | if (check_gpio(gpio) < 0) |
1029 | return -EINVAL; | 1029 | return -EINVAL; |
1030 | 1030 | ||
1031 | local_irq_save(flags); | 1031 | local_irq_save_hw(flags); |
1032 | 1032 | ||
1033 | /* | 1033 | /* |
1034 | * Allow that the identical GPIO can | 1034 | * Allow that the identical GPIO can |
@@ -1037,7 +1037,7 @@ int bfin_gpio_request(unsigned gpio, const char *label) | |||
1037 | */ | 1037 | */ |
1038 | 1038 | ||
1039 | if (cmp_label(gpio, label) == 0) { | 1039 | if (cmp_label(gpio, label) == 0) { |
1040 | local_irq_restore(flags); | 1040 | local_irq_restore_hw(flags); |
1041 | return 0; | 1041 | return 0; |
1042 | } | 1042 | } |
1043 | 1043 | ||
@@ -1045,7 +1045,7 @@ int bfin_gpio_request(unsigned gpio, const char *label) | |||
1045 | dump_stack(); | 1045 | dump_stack(); |
1046 | printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved by %s !\n", | 1046 | printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved by %s !\n", |
1047 | gpio, get_label(gpio)); | 1047 | gpio, get_label(gpio)); |
1048 | local_irq_restore(flags); | 1048 | local_irq_restore_hw(flags); |
1049 | return -EBUSY; | 1049 | return -EBUSY; |
1050 | } | 1050 | } |
1051 | if (unlikely(reserved_peri_map[gpio_bank(gpio)] & gpio_bit(gpio))) { | 1051 | if (unlikely(reserved_peri_map[gpio_bank(gpio)] & gpio_bit(gpio))) { |
@@ -1053,7 +1053,7 @@ int bfin_gpio_request(unsigned gpio, const char *label) | |||
1053 | printk(KERN_ERR | 1053 | printk(KERN_ERR |
1054 | "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n", | 1054 | "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n", |
1055 | gpio, get_label(gpio)); | 1055 | gpio, get_label(gpio)); |
1056 | local_irq_restore(flags); | 1056 | local_irq_restore_hw(flags); |
1057 | return -EBUSY; | 1057 | return -EBUSY; |
1058 | } | 1058 | } |
1059 | if (unlikely(reserved_gpio_irq_map[gpio_bank(gpio)] & gpio_bit(gpio))) | 1059 | if (unlikely(reserved_gpio_irq_map[gpio_bank(gpio)] & gpio_bit(gpio))) |
@@ -1063,7 +1063,7 @@ int bfin_gpio_request(unsigned gpio, const char *label) | |||
1063 | reserved_gpio_map[gpio_bank(gpio)] |= gpio_bit(gpio); | 1063 | reserved_gpio_map[gpio_bank(gpio)] |= gpio_bit(gpio); |
1064 | set_label(gpio, label); | 1064 | set_label(gpio, label); |
1065 | 1065 | ||
1066 | local_irq_restore(flags); | 1066 | local_irq_restore_hw(flags); |
1067 | 1067 | ||
1068 | port_setup(gpio, GPIO_USAGE); | 1068 | port_setup(gpio, GPIO_USAGE); |
1069 | 1069 | ||
@@ -1078,12 +1078,12 @@ void bfin_gpio_free(unsigned gpio) | |||
1078 | if (check_gpio(gpio) < 0) | 1078 | if (check_gpio(gpio) < 0) |
1079 | return; | 1079 | return; |
1080 | 1080 | ||
1081 | local_irq_save(flags); | 1081 | local_irq_save_hw(flags); |
1082 | 1082 | ||
1083 | if (unlikely(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio)))) { | 1083 | if (unlikely(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio)))) { |
1084 | dump_stack(); | 1084 | dump_stack(); |
1085 | gpio_error(gpio); | 1085 | gpio_error(gpio); |
1086 | local_irq_restore(flags); | 1086 | local_irq_restore_hw(flags); |
1087 | return; | 1087 | return; |
1088 | } | 1088 | } |
1089 | 1089 | ||
@@ -1091,7 +1091,7 @@ void bfin_gpio_free(unsigned gpio) | |||
1091 | 1091 | ||
1092 | set_label(gpio, "free"); | 1092 | set_label(gpio, "free"); |
1093 | 1093 | ||
1094 | local_irq_restore(flags); | 1094 | local_irq_restore_hw(flags); |
1095 | } | 1095 | } |
1096 | EXPORT_SYMBOL(bfin_gpio_free); | 1096 | EXPORT_SYMBOL(bfin_gpio_free); |
1097 | 1097 | ||
@@ -1102,14 +1102,14 @@ int bfin_gpio_irq_request(unsigned gpio, const char *label) | |||
1102 | if (check_gpio(gpio) < 0) | 1102 | if (check_gpio(gpio) < 0) |
1103 | return -EINVAL; | 1103 | return -EINVAL; |
1104 | 1104 | ||
1105 | local_irq_save(flags); | 1105 | local_irq_save_hw(flags); |
1106 | 1106 | ||
1107 | if (unlikely(reserved_gpio_irq_map[gpio_bank(gpio)] & gpio_bit(gpio))) { | 1107 | if (unlikely(reserved_gpio_irq_map[gpio_bank(gpio)] & gpio_bit(gpio))) { |
1108 | dump_stack(); | 1108 | dump_stack(); |
1109 | printk(KERN_ERR | 1109 | printk(KERN_ERR |
1110 | "bfin-gpio: GPIO %d is already reserved as gpio-irq !\n", | 1110 | "bfin-gpio: GPIO %d is already reserved as gpio-irq !\n", |
1111 | gpio); | 1111 | gpio); |
1112 | local_irq_restore(flags); | 1112 | local_irq_restore_hw(flags); |
1113 | return -EBUSY; | 1113 | return -EBUSY; |
1114 | } | 1114 | } |
1115 | if (unlikely(reserved_peri_map[gpio_bank(gpio)] & gpio_bit(gpio))) { | 1115 | if (unlikely(reserved_peri_map[gpio_bank(gpio)] & gpio_bit(gpio))) { |
@@ -1117,7 +1117,7 @@ int bfin_gpio_irq_request(unsigned gpio, const char *label) | |||
1117 | printk(KERN_ERR | 1117 | printk(KERN_ERR |
1118 | "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n", | 1118 | "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n", |
1119 | gpio, get_label(gpio)); | 1119 | gpio, get_label(gpio)); |
1120 | local_irq_restore(flags); | 1120 | local_irq_restore_hw(flags); |
1121 | return -EBUSY; | 1121 | return -EBUSY; |
1122 | } | 1122 | } |
1123 | if (unlikely(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio))) | 1123 | if (unlikely(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio))) |
@@ -1128,7 +1128,7 @@ int bfin_gpio_irq_request(unsigned gpio, const char *label) | |||
1128 | reserved_gpio_irq_map[gpio_bank(gpio)] |= gpio_bit(gpio); | 1128 | reserved_gpio_irq_map[gpio_bank(gpio)] |= gpio_bit(gpio); |
1129 | set_label(gpio, label); | 1129 | set_label(gpio, label); |
1130 | 1130 | ||
1131 | local_irq_restore(flags); | 1131 | local_irq_restore_hw(flags); |
1132 | 1132 | ||
1133 | port_setup(gpio, GPIO_USAGE); | 1133 | port_setup(gpio, GPIO_USAGE); |
1134 | 1134 | ||
@@ -1142,12 +1142,12 @@ void bfin_gpio_irq_free(unsigned gpio) | |||
1142 | if (check_gpio(gpio) < 0) | 1142 | if (check_gpio(gpio) < 0) |
1143 | return; | 1143 | return; |
1144 | 1144 | ||
1145 | local_irq_save(flags); | 1145 | local_irq_save_hw(flags); |
1146 | 1146 | ||
1147 | if (unlikely(!(reserved_gpio_irq_map[gpio_bank(gpio)] & gpio_bit(gpio)))) { | 1147 | if (unlikely(!(reserved_gpio_irq_map[gpio_bank(gpio)] & gpio_bit(gpio)))) { |
1148 | dump_stack(); | 1148 | dump_stack(); |
1149 | gpio_error(gpio); | 1149 | gpio_error(gpio); |
1150 | local_irq_restore(flags); | 1150 | local_irq_restore_hw(flags); |
1151 | return; | 1151 | return; |
1152 | } | 1152 | } |
1153 | 1153 | ||
@@ -1155,7 +1155,7 @@ void bfin_gpio_irq_free(unsigned gpio) | |||
1155 | 1155 | ||
1156 | set_label(gpio, "free"); | 1156 | set_label(gpio, "free"); |
1157 | 1157 | ||
1158 | local_irq_restore(flags); | 1158 | local_irq_restore_hw(flags); |
1159 | } | 1159 | } |
1160 | 1160 | ||
1161 | 1161 | ||
@@ -1169,10 +1169,10 @@ int bfin_gpio_direction_input(unsigned gpio) | |||
1169 | return -EINVAL; | 1169 | return -EINVAL; |
1170 | } | 1170 | } |
1171 | 1171 | ||
1172 | local_irq_save(flags); | 1172 | local_irq_save_hw(flags); |
1173 | gpio_array[gpio_bank(gpio)]->port_dir_clear = gpio_bit(gpio); | 1173 | gpio_array[gpio_bank(gpio)]->port_dir_clear = gpio_bit(gpio); |
1174 | gpio_array[gpio_bank(gpio)]->port_inen |= gpio_bit(gpio); | 1174 | gpio_array[gpio_bank(gpio)]->port_inen |= gpio_bit(gpio); |
1175 | local_irq_restore(flags); | 1175 | local_irq_restore_hw(flags); |
1176 | 1176 | ||
1177 | return 0; | 1177 | return 0; |
1178 | } | 1178 | } |
@@ -1187,11 +1187,11 @@ int bfin_gpio_direction_output(unsigned gpio, int value) | |||
1187 | return -EINVAL; | 1187 | return -EINVAL; |
1188 | } | 1188 | } |
1189 | 1189 | ||
1190 | local_irq_save(flags); | 1190 | local_irq_save_hw(flags); |
1191 | gpio_array[gpio_bank(gpio)]->port_inen &= ~gpio_bit(gpio); | 1191 | gpio_array[gpio_bank(gpio)]->port_inen &= ~gpio_bit(gpio); |
1192 | gpio_set_value(gpio, value); | 1192 | gpio_set_value(gpio, value); |
1193 | gpio_array[gpio_bank(gpio)]->port_dir_set = gpio_bit(gpio); | 1193 | gpio_array[gpio_bank(gpio)]->port_dir_set = gpio_bit(gpio); |
1194 | local_irq_restore(flags); | 1194 | local_irq_restore_hw(flags); |
1195 | 1195 | ||
1196 | return 0; | 1196 | return 0; |
1197 | } | 1197 | } |
@@ -1218,10 +1218,10 @@ void bfin_gpio_irq_prepare(unsigned gpio) | |||
1218 | 1218 | ||
1219 | port_setup(gpio, GPIO_USAGE); | 1219 | port_setup(gpio, GPIO_USAGE); |
1220 | 1220 | ||
1221 | local_irq_save(flags); | 1221 | local_irq_save_hw(flags); |
1222 | gpio_array[gpio_bank(gpio)]->port_dir_clear = gpio_bit(gpio); | 1222 | gpio_array[gpio_bank(gpio)]->port_dir_clear = gpio_bit(gpio); |
1223 | gpio_array[gpio_bank(gpio)]->port_inen |= gpio_bit(gpio); | 1223 | gpio_array[gpio_bank(gpio)]->port_inen |= gpio_bit(gpio); |
1224 | local_irq_restore(flags); | 1224 | local_irq_restore_hw(flags); |
1225 | } | 1225 | } |
1226 | 1226 | ||
1227 | #else | 1227 | #else |
@@ -1232,11 +1232,11 @@ int bfin_gpio_get_value(unsigned gpio) | |||
1232 | int ret; | 1232 | int ret; |
1233 | 1233 | ||
1234 | if (unlikely(get_gpio_edge(gpio))) { | 1234 | if (unlikely(get_gpio_edge(gpio))) { |
1235 | local_irq_save(flags); | 1235 | local_irq_save_hw(flags); |
1236 | set_gpio_edge(gpio, 0); | 1236 | set_gpio_edge(gpio, 0); |
1237 | ret = get_gpio_data(gpio); | 1237 | ret = get_gpio_data(gpio); |
1238 | set_gpio_edge(gpio, 1); | 1238 | set_gpio_edge(gpio, 1); |
1239 | local_irq_restore(flags); | 1239 | local_irq_restore_hw(flags); |
1240 | 1240 | ||
1241 | return ret; | 1241 | return ret; |
1242 | } else | 1242 | } else |
@@ -1254,11 +1254,11 @@ int bfin_gpio_direction_input(unsigned gpio) | |||
1254 | return -EINVAL; | 1254 | return -EINVAL; |
1255 | } | 1255 | } |
1256 | 1256 | ||
1257 | local_irq_save(flags); | 1257 | local_irq_save_hw(flags); |
1258 | gpio_bankb[gpio_bank(gpio)]->dir &= ~gpio_bit(gpio); | 1258 | gpio_bankb[gpio_bank(gpio)]->dir &= ~gpio_bit(gpio); |
1259 | gpio_bankb[gpio_bank(gpio)]->inen |= gpio_bit(gpio); | 1259 | gpio_bankb[gpio_bank(gpio)]->inen |= gpio_bit(gpio); |
1260 | AWA_DUMMY_READ(inen); | 1260 | AWA_DUMMY_READ(inen); |
1261 | local_irq_restore(flags); | 1261 | local_irq_restore_hw(flags); |
1262 | 1262 | ||
1263 | return 0; | 1263 | return 0; |
1264 | } | 1264 | } |
@@ -1273,7 +1273,7 @@ int bfin_gpio_direction_output(unsigned gpio, int value) | |||
1273 | return -EINVAL; | 1273 | return -EINVAL; |
1274 | } | 1274 | } |
1275 | 1275 | ||
1276 | local_irq_save(flags); | 1276 | local_irq_save_hw(flags); |
1277 | gpio_bankb[gpio_bank(gpio)]->inen &= ~gpio_bit(gpio); | 1277 | gpio_bankb[gpio_bank(gpio)]->inen &= ~gpio_bit(gpio); |
1278 | 1278 | ||
1279 | if (value) | 1279 | if (value) |
@@ -1283,7 +1283,7 @@ int bfin_gpio_direction_output(unsigned gpio, int value) | |||
1283 | 1283 | ||
1284 | gpio_bankb[gpio_bank(gpio)]->dir |= gpio_bit(gpio); | 1284 | gpio_bankb[gpio_bank(gpio)]->dir |= gpio_bit(gpio); |
1285 | AWA_DUMMY_READ(dir); | 1285 | AWA_DUMMY_READ(dir); |
1286 | local_irq_restore(flags); | 1286 | local_irq_restore_hw(flags); |
1287 | 1287 | ||
1288 | return 0; | 1288 | return 0; |
1289 | } | 1289 | } |
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c index 5ef5d1a787fc..87463ce87f5a 100644 --- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c +++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c | |||
@@ -332,7 +332,7 @@ void flush_switched_cplbs(unsigned int cpu) | |||
332 | 332 | ||
333 | nr_cplb_flush[cpu]++; | 333 | nr_cplb_flush[cpu]++; |
334 | 334 | ||
335 | local_irq_save(flags); | 335 | local_irq_save_hw(flags); |
336 | disable_icplb(); | 336 | disable_icplb(); |
337 | for (i = first_switched_icplb; i < MAX_CPLBS; i++) { | 337 | for (i = first_switched_icplb; i < MAX_CPLBS; i++) { |
338 | icplb_tbl[cpu][i].data = 0; | 338 | icplb_tbl[cpu][i].data = 0; |
@@ -346,7 +346,7 @@ void flush_switched_cplbs(unsigned int cpu) | |||
346 | bfin_write32(DCPLB_DATA0 + i * 4, 0); | 346 | bfin_write32(DCPLB_DATA0 + i * 4, 0); |
347 | } | 347 | } |
348 | enable_dcplb(); | 348 | enable_dcplb(); |
349 | local_irq_restore(flags); | 349 | local_irq_restore_hw(flags); |
350 | 350 | ||
351 | } | 351 | } |
352 | 352 | ||
@@ -362,7 +362,7 @@ void set_mask_dcplbs(unsigned long *masks, unsigned int cpu) | |||
362 | return; | 362 | return; |
363 | } | 363 | } |
364 | 364 | ||
365 | local_irq_save(flags); | 365 | local_irq_save_hw(flags); |
366 | current_rwx_mask[cpu] = masks; | 366 | current_rwx_mask[cpu] = masks; |
367 | 367 | ||
368 | d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB; | 368 | d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB; |
@@ -382,5 +382,5 @@ void set_mask_dcplbs(unsigned long *masks, unsigned int cpu) | |||
382 | addr += PAGE_SIZE; | 382 | addr += PAGE_SIZE; |
383 | } | 383 | } |
384 | enable_dcplb(); | 384 | enable_dcplb(); |
385 | local_irq_restore(flags); | 385 | local_irq_restore_hw(flags); |
386 | } | 386 | } |
diff --git a/arch/blackfin/kernel/entry.S b/arch/blackfin/kernel/entry.S index c0c3fe811228..a9cfba9946b5 100644 --- a/arch/blackfin/kernel/entry.S +++ b/arch/blackfin/kernel/entry.S | |||
@@ -42,6 +42,10 @@ | |||
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | ENTRY(_ret_from_fork) | 44 | ENTRY(_ret_from_fork) |
45 | #ifdef CONFIG_IPIPE | ||
46 | [--sp] = reti; /* IRQs on. */ | ||
47 | SP += 4; | ||
48 | #endif /* CONFIG_IPIPE */ | ||
45 | SP += -12; | 49 | SP += -12; |
46 | call _schedule_tail; | 50 | call _schedule_tail; |
47 | SP += 12; | 51 | SP += 12; |
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c new file mode 100644 index 000000000000..339be5a3ae6a --- /dev/null +++ b/arch/blackfin/kernel/ipipe.c | |||
@@ -0,0 +1,428 @@ | |||
1 | /* -*- linux-c -*- | ||
2 | * linux/arch/blackfin/kernel/ipipe.c | ||
3 | * | ||
4 | * Copyright (C) 2005-2007 Philippe Gerum. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, | ||
9 | * USA; either version 2 of the License, or (at your option) any later | ||
10 | * version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | * | ||
21 | * Architecture-dependent I-pipe support for the Blackfin. | ||
22 | */ | ||
23 | |||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/sched.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | #include <linux/percpu.h> | ||
29 | #include <linux/bitops.h> | ||
30 | #include <linux/slab.h> | ||
31 | #include <linux/errno.h> | ||
32 | #include <linux/kthread.h> | ||
33 | #include <asm/unistd.h> | ||
34 | #include <asm/system.h> | ||
35 | #include <asm/atomic.h> | ||
36 | #include <asm/io.h> | ||
37 | |||
38 | static int create_irq_threads; | ||
39 | |||
40 | DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs); | ||
41 | |||
42 | static DEFINE_PER_CPU(unsigned long, pending_irqthread_mask); | ||
43 | |||
44 | static DEFINE_PER_CPU(int [IVG13 + 1], pending_irq_count); | ||
45 | |||
46 | asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs); | ||
47 | |||
48 | static void __ipipe_no_irqtail(void); | ||
49 | |||
50 | unsigned long __ipipe_irq_tail_hook = (unsigned long)&__ipipe_no_irqtail; | ||
51 | EXPORT_SYMBOL(__ipipe_irq_tail_hook); | ||
52 | |||
53 | unsigned long __ipipe_core_clock; | ||
54 | EXPORT_SYMBOL(__ipipe_core_clock); | ||
55 | |||
56 | unsigned long __ipipe_freq_scale; | ||
57 | EXPORT_SYMBOL(__ipipe_freq_scale); | ||
58 | |||
59 | atomic_t __ipipe_irq_lvdepth[IVG15 + 1]; | ||
60 | |||
61 | unsigned long __ipipe_irq_lvmask = __all_masked_irq_flags; | ||
62 | EXPORT_SYMBOL(__ipipe_irq_lvmask); | ||
63 | |||
64 | static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc) | ||
65 | { | ||
66 | desc->ipipe_ack(irq, desc); | ||
67 | } | ||
68 | |||
69 | /* | ||
70 | * __ipipe_enable_pipeline() -- We are running on the boot CPU, hw | ||
71 | * interrupts are off, and secondary CPUs are still lost in space. | ||
72 | */ | ||
73 | void __ipipe_enable_pipeline(void) | ||
74 | { | ||
75 | unsigned irq; | ||
76 | |||
77 | __ipipe_core_clock = get_cclk(); /* Fetch this once. */ | ||
78 | __ipipe_freq_scale = 1000000000UL / __ipipe_core_clock; | ||
79 | |||
80 | for (irq = 0; irq < NR_IRQS; ++irq) | ||
81 | ipipe_virtualize_irq(ipipe_root_domain, | ||
82 | irq, | ||
83 | (ipipe_irq_handler_t)&asm_do_IRQ, | ||
84 | NULL, | ||
85 | &__ipipe_ack_irq, | ||
86 | IPIPE_HANDLE_MASK | IPIPE_PASS_MASK); | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic | ||
91 | * interrupt protection log is maintained here for each domain. Hw | ||
92 | * interrupts are masked on entry. | ||
93 | */ | ||
94 | void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs) | ||
95 | { | ||
96 | struct ipipe_domain *this_domain, *next_domain; | ||
97 | struct list_head *head, *pos; | ||
98 | int m_ack, s = -1; | ||
99 | |||
100 | /* | ||
101 | * Software-triggered IRQs do not need any ack. The contents | ||
102 | * of the register frame should only be used when processing | ||
103 | * the timer interrupt, but not for handling any other | ||
104 | * interrupt. | ||
105 | */ | ||
106 | m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR); | ||
107 | |||
108 | this_domain = ipipe_current_domain; | ||
109 | |||
110 | if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))) | ||
111 | head = &this_domain->p_link; | ||
112 | else { | ||
113 | head = __ipipe_pipeline.next; | ||
114 | next_domain = list_entry(head, struct ipipe_domain, p_link); | ||
115 | if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) { | ||
116 | if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) | ||
117 | next_domain->irqs[irq].acknowledge(irq, irq_desc + irq); | ||
118 | if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags)) | ||
119 | s = __test_and_set_bit(IPIPE_STALL_FLAG, | ||
120 | &ipipe_root_cpudom_var(status)); | ||
121 | __ipipe_dispatch_wired(next_domain, irq); | ||
122 | goto finalize; | ||
123 | return; | ||
124 | } | ||
125 | } | ||
126 | |||
127 | /* Ack the interrupt. */ | ||
128 | |||
129 | pos = head; | ||
130 | |||
131 | while (pos != &__ipipe_pipeline) { | ||
132 | next_domain = list_entry(pos, struct ipipe_domain, p_link); | ||
133 | /* | ||
134 | * For each domain handling the incoming IRQ, mark it | ||
135 | * as pending in its log. | ||
136 | */ | ||
137 | if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) { | ||
138 | /* | ||
139 | * Domains that handle this IRQ are polled for | ||
140 | * acknowledging it by decreasing priority | ||
141 | * order. The interrupt must be made pending | ||
142 | * _first_ in the domain's status flags before | ||
143 | * the PIC is unlocked. | ||
144 | */ | ||
145 | __ipipe_set_irq_pending(next_domain, irq); | ||
146 | |||
147 | if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) { | ||
148 | next_domain->irqs[irq].acknowledge(irq, irq_desc + irq); | ||
149 | m_ack = 1; | ||
150 | } | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * If the domain does not want the IRQ to be passed | ||
155 | * down the interrupt pipe, exit the loop now. | ||
156 | */ | ||
157 | if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control)) | ||
158 | break; | ||
159 | |||
160 | pos = next_domain->p_link.next; | ||
161 | } | ||
162 | |||
163 | /* | ||
164 | * Now walk the pipeline, yielding control to the highest | ||
165 | * priority domain that has pending interrupt(s) or | ||
166 | * immediately to the current domain if the interrupt has been | ||
167 | * marked as 'sticky'. This search does not go beyond the | ||
168 | * current domain in the pipeline. We also enforce the | ||
169 | * additional root stage lock (blackfin-specific). */ | ||
170 | |||
171 | if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags)) | ||
172 | s = __test_and_set_bit(IPIPE_STALL_FLAG, | ||
173 | &ipipe_root_cpudom_var(status)); | ||
174 | finalize: | ||
175 | |||
176 | __ipipe_walk_pipeline(head); | ||
177 | |||
178 | if (!s) | ||
179 | __clear_bit(IPIPE_STALL_FLAG, | ||
180 | &ipipe_root_cpudom_var(status)); | ||
181 | } | ||
182 | |||
183 | int __ipipe_check_root(void) | ||
184 | { | ||
185 | return ipipe_root_domain_p; | ||
186 | } | ||
187 | |||
188 | void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq) | ||
189 | { | ||
190 | struct irq_desc *desc = irq_desc + irq; | ||
191 | int prio = desc->ic_prio; | ||
192 | |||
193 | desc->depth = 0; | ||
194 | if (ipd != &ipipe_root && | ||
195 | atomic_inc_return(&__ipipe_irq_lvdepth[prio]) == 1) | ||
196 | __set_bit(prio, &__ipipe_irq_lvmask); | ||
197 | } | ||
198 | EXPORT_SYMBOL(__ipipe_enable_irqdesc); | ||
199 | |||
200 | void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq) | ||
201 | { | ||
202 | struct irq_desc *desc = irq_desc + irq; | ||
203 | int prio = desc->ic_prio; | ||
204 | |||
205 | if (ipd != &ipipe_root && | ||
206 | atomic_dec_and_test(&__ipipe_irq_lvdepth[prio])) | ||
207 | __clear_bit(prio, &__ipipe_irq_lvmask); | ||
208 | } | ||
209 | EXPORT_SYMBOL(__ipipe_disable_irqdesc); | ||
210 | |||
211 | void __ipipe_stall_root_raw(void) | ||
212 | { | ||
213 | /* | ||
214 | * This code is called by the ins{bwl} routines (see | ||
215 | * arch/blackfin/lib/ins.S), which are heavily used by the | ||
216 | * network stack. It masks all interrupts but those handled by | ||
217 | * non-root domains, so that we keep decent network transfer | ||
218 | * rates for Linux without inducing pathological jitter for | ||
219 | * the real-time domain. | ||
220 | */ | ||
221 | __asm__ __volatile__ ("sti %0;" : : "d"(__ipipe_irq_lvmask)); | ||
222 | |||
223 | __set_bit(IPIPE_STALL_FLAG, | ||
224 | &ipipe_root_cpudom_var(status)); | ||
225 | } | ||
226 | |||
227 | void __ipipe_unstall_root_raw(void) | ||
228 | { | ||
229 | __clear_bit(IPIPE_STALL_FLAG, | ||
230 | &ipipe_root_cpudom_var(status)); | ||
231 | |||
232 | __asm__ __volatile__ ("sti %0;" : : "d"(bfin_irq_flags)); | ||
233 | } | ||
234 | |||
235 | int __ipipe_syscall_root(struct pt_regs *regs) | ||
236 | { | ||
237 | unsigned long flags; | ||
238 | |||
239 | /* We need to run the IRQ tail hook whenever we don't | ||
240 | * propagate a syscall to higher domains, because we know that | ||
241 | * important operations might be pending there (e.g. Xenomai | ||
242 | * deferred rescheduling). */ | ||
243 | |||
244 | if (!__ipipe_syscall_watched_p(current, regs->orig_p0)) { | ||
245 | void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook; | ||
246 | hook(); | ||
247 | return 0; | ||
248 | } | ||
249 | |||
250 | /* | ||
251 | * This routine either returns: | ||
252 | * 0 -- if the syscall is to be passed to Linux; | ||
253 | * 1 -- if the syscall should not be passed to Linux, and no | ||
254 | * tail work should be performed; | ||
255 | * -1 -- if the syscall should not be passed to Linux but the | ||
256 | * tail work has to be performed (for handling signals etc). | ||
257 | */ | ||
258 | |||
259 | if (__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL) && | ||
260 | __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs) > 0) { | ||
261 | if (ipipe_root_domain_p && !in_atomic()) { | ||
262 | /* | ||
263 | * Sync pending VIRQs before _TIF_NEED_RESCHED | ||
264 | * is tested. | ||
265 | */ | ||
266 | local_irq_save_hw(flags); | ||
267 | if ((ipipe_root_cpudom_var(irqpend_himask) & IPIPE_IRQMASK_VIRT) != 0) | ||
268 | __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT); | ||
269 | local_irq_restore_hw(flags); | ||
270 | return -1; | ||
271 | } | ||
272 | return 1; | ||
273 | } | ||
274 | |||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | unsigned long ipipe_critical_enter(void (*syncfn) (void)) | ||
279 | { | ||
280 | unsigned long flags; | ||
281 | |||
282 | local_irq_save_hw(flags); | ||
283 | |||
284 | return flags; | ||
285 | } | ||
286 | |||
287 | void ipipe_critical_exit(unsigned long flags) | ||
288 | { | ||
289 | local_irq_restore_hw(flags); | ||
290 | } | ||
291 | |||
292 | static void __ipipe_no_irqtail(void) | ||
293 | { | ||
294 | } | ||
295 | |||
296 | int ipipe_get_sysinfo(struct ipipe_sysinfo *info) | ||
297 | { | ||
298 | info->ncpus = num_online_cpus(); | ||
299 | info->cpufreq = ipipe_cpu_freq(); | ||
300 | info->archdep.tmirq = IPIPE_TIMER_IRQ; | ||
301 | info->archdep.tmfreq = info->cpufreq; | ||
302 | |||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | /* | ||
307 | * ipipe_trigger_irq() -- Push the interrupt at front of the pipeline | ||
308 | * just like if it has been actually received from a hw source. Also | ||
309 | * works for virtual interrupts. | ||
310 | */ | ||
311 | int ipipe_trigger_irq(unsigned irq) | ||
312 | { | ||
313 | unsigned long flags; | ||
314 | |||
315 | if (irq >= IPIPE_NR_IRQS || | ||
316 | (ipipe_virtual_irq_p(irq) | ||
317 | && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map))) | ||
318 | return -EINVAL; | ||
319 | |||
320 | local_irq_save_hw(flags); | ||
321 | |||
322 | __ipipe_handle_irq(irq, NULL); | ||
323 | |||
324 | local_irq_restore_hw(flags); | ||
325 | |||
326 | return 1; | ||
327 | } | ||
328 | |||
329 | /* Move Linux IRQ to threads. */ | ||
330 | |||
331 | static int do_irqd(void *__desc) | ||
332 | { | ||
333 | struct irq_desc *desc = __desc; | ||
334 | unsigned irq = desc - irq_desc; | ||
335 | int thrprio = desc->thr_prio; | ||
336 | int thrmask = 1 << thrprio; | ||
337 | int cpu = smp_processor_id(); | ||
338 | cpumask_t cpumask; | ||
339 | |||
340 | sigfillset(¤t->blocked); | ||
341 | current->flags |= PF_NOFREEZE; | ||
342 | cpumask = cpumask_of_cpu(cpu); | ||
343 | set_cpus_allowed(current, cpumask); | ||
344 | ipipe_setscheduler_root(current, SCHED_FIFO, 50 + thrprio); | ||
345 | |||
346 | while (!kthread_should_stop()) { | ||
347 | local_irq_disable(); | ||
348 | if (!(desc->status & IRQ_SCHEDULED)) { | ||
349 | set_current_state(TASK_INTERRUPTIBLE); | ||
350 | resched: | ||
351 | local_irq_enable(); | ||
352 | schedule(); | ||
353 | local_irq_disable(); | ||
354 | } | ||
355 | __set_current_state(TASK_RUNNING); | ||
356 | /* | ||
357 | * If higher priority interrupt servers are ready to | ||
358 | * run, reschedule immediately. We need this for the | ||
359 | * GPIO demux IRQ handler to unmask the interrupt line | ||
360 | * _last_, after all GPIO IRQs have run. | ||
361 | */ | ||
362 | if (per_cpu(pending_irqthread_mask, cpu) & ~(thrmask|(thrmask-1))) | ||
363 | goto resched; | ||
364 | if (--per_cpu(pending_irq_count[thrprio], cpu) == 0) | ||
365 | per_cpu(pending_irqthread_mask, cpu) &= ~thrmask; | ||
366 | desc->status &= ~IRQ_SCHEDULED; | ||
367 | desc->thr_handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)); | ||
368 | local_irq_enable(); | ||
369 | } | ||
370 | __set_current_state(TASK_RUNNING); | ||
371 | return 0; | ||
372 | } | ||
373 | |||
374 | static void kick_irqd(unsigned irq, void *cookie) | ||
375 | { | ||
376 | struct irq_desc *desc = irq_desc + irq; | ||
377 | int thrprio = desc->thr_prio; | ||
378 | int thrmask = 1 << thrprio; | ||
379 | int cpu = smp_processor_id(); | ||
380 | |||
381 | if (!(desc->status & IRQ_SCHEDULED)) { | ||
382 | desc->status |= IRQ_SCHEDULED; | ||
383 | per_cpu(pending_irqthread_mask, cpu) |= thrmask; | ||
384 | ++per_cpu(pending_irq_count[thrprio], cpu); | ||
385 | wake_up_process(desc->thread); | ||
386 | } | ||
387 | } | ||
388 | |||
389 | int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc) | ||
390 | { | ||
391 | if (desc->thread || !create_irq_threads) | ||
392 | return 0; | ||
393 | |||
394 | desc->thread = kthread_create(do_irqd, desc, "IRQ %d", irq); | ||
395 | if (desc->thread == NULL) { | ||
396 | printk(KERN_ERR "irqd: could not create IRQ thread %d!\n", irq); | ||
397 | return -ENOMEM; | ||
398 | } | ||
399 | |||
400 | wake_up_process(desc->thread); | ||
401 | |||
402 | desc->thr_handler = ipipe_root_domain->irqs[irq].handler; | ||
403 | ipipe_root_domain->irqs[irq].handler = &kick_irqd; | ||
404 | |||
405 | return 0; | ||
406 | } | ||
407 | |||
408 | void __init ipipe_init_irq_threads(void) | ||
409 | { | ||
410 | unsigned irq; | ||
411 | struct irq_desc *desc; | ||
412 | |||
413 | create_irq_threads = 1; | ||
414 | |||
415 | for (irq = 0; irq < NR_IRQS; irq++) { | ||
416 | desc = irq_desc + irq; | ||
417 | if (desc->action != NULL || | ||
418 | (desc->status & IRQ_NOREQUEST) != 0) | ||
419 | ipipe_start_irq_thread(irq, desc); | ||
420 | } | ||
421 | } | ||
422 | |||
423 | EXPORT_SYMBOL(show_stack); | ||
424 | |||
425 | #ifdef CONFIG_IPIPE_TRACE_MCOUNT | ||
426 | void notrace _mcount(void); | ||
427 | EXPORT_SYMBOL(_mcount); | ||
428 | #endif /* CONFIG_IPIPE_TRACE_MCOUNT */ | ||
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c index 1624e1129681..ab8209cbbad0 100644 --- a/arch/blackfin/kernel/irqchip.c +++ b/arch/blackfin/kernel/irqchip.c | |||
@@ -108,8 +108,9 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) | |||
108 | { | 108 | { |
109 | struct pt_regs *old_regs; | 109 | struct pt_regs *old_regs; |
110 | struct irq_desc *desc = irq_desc + irq; | 110 | struct irq_desc *desc = irq_desc + irq; |
111 | #ifndef CONFIG_IPIPE | ||
111 | unsigned short pending, other_ints; | 112 | unsigned short pending, other_ints; |
112 | 113 | #endif | |
113 | old_regs = set_irq_regs(regs); | 114 | old_regs = set_irq_regs(regs); |
114 | 115 | ||
115 | /* | 116 | /* |
@@ -137,6 +138,7 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) | |||
137 | #endif | 138 | #endif |
138 | generic_handle_irq(irq); | 139 | generic_handle_irq(irq); |
139 | 140 | ||
141 | #ifndef CONFIG_IPIPE /* Useless and bugous over the I-pipe: IRQs are threaded. */ | ||
140 | /* If we're the only interrupt running (ignoring IRQ15 which is for | 142 | /* If we're the only interrupt running (ignoring IRQ15 which is for |
141 | syscalls), lower our priority to IRQ14 so that softirqs run at | 143 | syscalls), lower our priority to IRQ14 so that softirqs run at |
142 | that level. If there's another, lower-level interrupt, irq_exit | 144 | that level. If there's another, lower-level interrupt, irq_exit |
@@ -146,6 +148,7 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) | |||
146 | other_ints = pending & (pending - 1); | 148 | other_ints = pending & (pending - 1); |
147 | if (other_ints == 0) | 149 | if (other_ints == 0) |
148 | lower_to_irq14(); | 150 | lower_to_irq14(); |
151 | #endif /* !CONFIG_IPIPE */ | ||
149 | irq_exit(); | 152 | irq_exit(); |
150 | 153 | ||
151 | set_irq_regs(old_regs); | 154 | set_irq_regs(old_regs); |
diff --git a/arch/blackfin/kernel/mcount.S b/arch/blackfin/kernel/mcount.S new file mode 100644 index 000000000000..edcfb3865f46 --- /dev/null +++ b/arch/blackfin/kernel/mcount.S | |||
@@ -0,0 +1,70 @@ | |||
1 | /* | ||
2 | * linux/arch/blackfin/mcount.S | ||
3 | * | ||
4 | * Copyright (C) 2006 Analog Devices Inc. | ||
5 | * | ||
6 | * 2007/04/12 Save index, length, modify and base registers. --rpm | ||
7 | */ | ||
8 | |||
9 | #include <linux/linkage.h> | ||
10 | #include <asm/blackfin.h> | ||
11 | |||
12 | .text | ||
13 | |||
14 | .align 4 /* just in case */ | ||
15 | |||
16 | ENTRY(__mcount) | ||
17 | [--sp] = i0; | ||
18 | [--sp] = i1; | ||
19 | [--sp] = i2; | ||
20 | [--sp] = i3; | ||
21 | [--sp] = l0; | ||
22 | [--sp] = l1; | ||
23 | [--sp] = l2; | ||
24 | [--sp] = l3; | ||
25 | [--sp] = m0; | ||
26 | [--sp] = m1; | ||
27 | [--sp] = m2; | ||
28 | [--sp] = m3; | ||
29 | [--sp] = b0; | ||
30 | [--sp] = b1; | ||
31 | [--sp] = b2; | ||
32 | [--sp] = b3; | ||
33 | [--sp] = ( r7:0, p5:0 ); | ||
34 | [--sp] = ASTAT; | ||
35 | |||
36 | p1.L = _ipipe_trace_enable; | ||
37 | p1.H = _ipipe_trace_enable; | ||
38 | r7 = [p1]; | ||
39 | CC = r7 == 0; | ||
40 | if CC jump out; | ||
41 | link 0x10; | ||
42 | r0 = 0x0; | ||
43 | [sp + 0xc] = r0; /* v */ | ||
44 | r0 = 0x0; /* type: IPIPE_TRACE_FN */ | ||
45 | r1 = rets; | ||
46 | p0 = [fp]; /* p0: Prior FP */ | ||
47 | r2 = [p0 + 4]; /* r2: Prior RETS */ | ||
48 | call ___ipipe_trace; | ||
49 | unlink; | ||
50 | out: | ||
51 | ASTAT = [sp++]; | ||
52 | ( r7:0, p5:0 ) = [sp++]; | ||
53 | b3 = [sp++]; | ||
54 | b2 = [sp++]; | ||
55 | b1 = [sp++]; | ||
56 | b0 = [sp++]; | ||
57 | m3 = [sp++]; | ||
58 | m2 = [sp++]; | ||
59 | m1 = [sp++]; | ||
60 | m0 = [sp++]; | ||
61 | l3 = [sp++]; | ||
62 | l2 = [sp++]; | ||
63 | l1 = [sp++]; | ||
64 | l0 = [sp++]; | ||
65 | i3 = [sp++]; | ||
66 | i2 = [sp++]; | ||
67 | i1 = [sp++]; | ||
68 | i0 = [sp++]; | ||
69 | rts; | ||
70 | ENDPROC(__mcount) | ||
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index 1ec0faa8c68d..33e2e8993f7f 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c | |||
@@ -82,11 +82,14 @@ void cpu_idle(void)__attribute__((l1_text)); | |||
82 | */ | 82 | */ |
83 | static void default_idle(void) | 83 | static void default_idle(void) |
84 | { | 84 | { |
85 | local_irq_disable(); | 85 | #ifdef CONFIG_IPIPE |
86 | ipipe_suspend_domain(); | ||
87 | #endif | ||
88 | local_irq_disable_hw(); | ||
86 | if (!need_resched()) | 89 | if (!need_resched()) |
87 | idle_with_irq_disabled(); | 90 | idle_with_irq_disabled(); |
88 | 91 | ||
89 | local_irq_enable(); | 92 | local_irq_enable_hw(); |
90 | } | 93 | } |
91 | 94 | ||
92 | /* | 95 | /* |
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c index ec4dfa38eb0a..172b4c588467 100644 --- a/arch/blackfin/kernel/time.c +++ b/arch/blackfin/kernel/time.c | |||
@@ -31,7 +31,7 @@ static struct irqaction bfin_timer_irq = { | |||
31 | #endif | 31 | #endif |
32 | }; | 32 | }; |
33 | 33 | ||
34 | #ifdef CONFIG_TICK_SOURCE_SYSTMR0 | 34 | #if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE) |
35 | void __init setup_system_timer0(void) | 35 | void __init setup_system_timer0(void) |
36 | { | 36 | { |
37 | /* Power down the core timer, just to play safe. */ | 37 | /* Power down the core timer, just to play safe. */ |
@@ -74,7 +74,7 @@ void __init setup_core_timer(void) | |||
74 | static void __init | 74 | static void __init |
75 | time_sched_init(irqreturn_t(*timer_routine) (int, void *)) | 75 | time_sched_init(irqreturn_t(*timer_routine) (int, void *)) |
76 | { | 76 | { |
77 | #ifdef CONFIG_TICK_SOURCE_SYSTMR0 | 77 | #if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE) |
78 | setup_system_timer0(); | 78 | setup_system_timer0(); |
79 | bfin_timer_irq.handler = timer_routine; | 79 | bfin_timer_irq.handler = timer_routine; |
80 | setup_irq(IRQ_TIMER0, &bfin_timer_irq); | 80 | setup_irq(IRQ_TIMER0, &bfin_timer_irq); |
@@ -94,7 +94,7 @@ static unsigned long gettimeoffset(void) | |||
94 | unsigned long offset; | 94 | unsigned long offset; |
95 | unsigned long clocks_per_jiffy; | 95 | unsigned long clocks_per_jiffy; |
96 | 96 | ||
97 | #ifdef CONFIG_TICK_SOURCE_SYSTMR0 | 97 | #if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE) |
98 | clocks_per_jiffy = bfin_read_TIMER0_PERIOD(); | 98 | clocks_per_jiffy = bfin_read_TIMER0_PERIOD(); |
99 | offset = bfin_read_TIMER0_COUNTER() / \ | 99 | offset = bfin_read_TIMER0_COUNTER() / \ |
100 | (((clocks_per_jiffy + 1) * HZ) / USEC_PER_SEC); | 100 | (((clocks_per_jiffy + 1) * HZ) / USEC_PER_SEC); |
@@ -133,7 +133,8 @@ irqreturn_t timer_interrupt(int irq, void *dummy) | |||
133 | static long last_rtc_update; | 133 | static long last_rtc_update; |
134 | 134 | ||
135 | write_seqlock(&xtime_lock); | 135 | write_seqlock(&xtime_lock); |
136 | #ifdef CONFIG_TICK_SOURCE_SYSTMR0 | 136 | #if defined(CONFIG_TICK_SOURCE_SYSTMR0) && !defined(CONFIG_IPIPE) |
137 | /* FIXME: Here TIMIL0 is not set when IPIPE enabled, why? */ | ||
137 | if (get_gptimer_status(0) & TIMER_STATUS_TIMIL0) { | 138 | if (get_gptimer_status(0) & TIMER_STATUS_TIMIL0) { |
138 | #endif | 139 | #endif |
139 | do_timer(1); | 140 | do_timer(1); |
@@ -155,13 +156,17 @@ irqreturn_t timer_interrupt(int irq, void *dummy) | |||
155 | /* Do it again in 60s. */ | 156 | /* Do it again in 60s. */ |
156 | last_rtc_update = xtime.tv_sec - 600; | 157 | last_rtc_update = xtime.tv_sec - 600; |
157 | } | 158 | } |
158 | #ifdef CONFIG_TICK_SOURCE_SYSTMR0 | 159 | #if defined(CONFIG_TICK_SOURCE_SYSTMR0) && !defined(CONFIG_IPIPE) |
159 | set_gptimer_status(0, TIMER_STATUS_TIMIL0); | 160 | set_gptimer_status(0, TIMER_STATUS_TIMIL0); |
160 | } | 161 | } |
161 | #endif | 162 | #endif |
162 | write_sequnlock(&xtime_lock); | 163 | write_sequnlock(&xtime_lock); |
163 | 164 | ||
165 | #ifdef CONFIG_IPIPE | ||
166 | update_root_process_times(get_irq_regs()); | ||
167 | #else | ||
164 | update_process_times(user_mode(get_irq_regs())); | 168 | update_process_times(user_mode(get_irq_regs())); |
169 | #endif | ||
165 | profile_tick(CPU_PROFILING); | 170 | profile_tick(CPU_PROFILING); |
166 | 171 | ||
167 | return IRQ_HANDLED; | 172 | return IRQ_HANDLED; |
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c index 950cc822fb75..956aefb84687 100644 --- a/arch/blackfin/kernel/traps.c +++ b/arch/blackfin/kernel/traps.c | |||
@@ -577,10 +577,15 @@ asmlinkage void trap_c(struct pt_regs *fp) | |||
577 | } | 577 | } |
578 | } | 578 | } |
579 | 579 | ||
580 | info.si_signo = sig; | 580 | #ifdef CONFIG_IPIPE |
581 | info.si_errno = 0; | 581 | if (!ipipe_trap_notify(fp->seqstat & 0x3f, fp)) |
582 | info.si_addr = (void __user *)fp->pc; | 582 | #endif |
583 | force_sig_info(sig, &info, current); | 583 | { |
584 | info.si_signo = sig; | ||
585 | info.si_errno = 0; | ||
586 | info.si_addr = (void __user *)fp->pc; | ||
587 | force_sig_info(sig, &info, current); | ||
588 | } | ||
584 | 589 | ||
585 | trace_buffer_restore(j); | 590 | trace_buffer_restore(j); |
586 | return; | 591 | return; |