diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-06-16 20:24:53 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-06-16 20:24:53 -0400 |
commit | 492b057c426e4aa747484958e18e9da29003985d (patch) | |
tree | 34e08c24618688d8bcc190523028b5f94cce0c0b /arch | |
parent | 313485175da221c388f6a8ecf4c30062ba9bea17 (diff) | |
parent | 300df7dc89cc276377fc020704e34875d5c473b6 (diff) |
Merge commit 'origin/master' into next
Diffstat (limited to 'arch')
124 files changed, 3064 insertions, 1979 deletions
diff --git a/arch/alpha/include/asm/errno.h b/arch/alpha/include/asm/errno.h index 69e2655249d2..98099bda9370 100644 --- a/arch/alpha/include/asm/errno.h +++ b/arch/alpha/include/asm/errno.h | |||
@@ -120,4 +120,6 @@ | |||
120 | #define EOWNERDEAD 136 /* Owner died */ | 120 | #define EOWNERDEAD 136 /* Owner died */ |
121 | #define ENOTRECOVERABLE 137 /* State not recoverable */ | 121 | #define ENOTRECOVERABLE 137 /* State not recoverable */ |
122 | 122 | ||
123 | #define ERFKILL 138 /* Operation not possible due to RF-kill */ | ||
124 | |||
123 | #endif | 125 | #endif |
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 442b87476f97..93bb4247b7ed 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
@@ -536,7 +536,7 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, | |||
536 | return err; | 536 | return err; |
537 | } | 537 | } |
538 | 538 | ||
539 | static inline void restart_syscall(struct pt_regs *regs) | 539 | static inline void setup_syscall_restart(struct pt_regs *regs) |
540 | { | 540 | { |
541 | regs->ARM_r0 = regs->ARM_ORIG_r0; | 541 | regs->ARM_r0 = regs->ARM_ORIG_r0; |
542 | regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; | 542 | regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; |
@@ -571,7 +571,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, | |||
571 | } | 571 | } |
572 | /* fallthrough */ | 572 | /* fallthrough */ |
573 | case -ERESTARTNOINTR: | 573 | case -ERESTARTNOINTR: |
574 | restart_syscall(regs); | 574 | setup_syscall_restart(regs); |
575 | } | 575 | } |
576 | } | 576 | } |
577 | 577 | ||
@@ -695,7 +695,7 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall) | |||
695 | if (regs->ARM_r0 == -ERESTARTNOHAND || | 695 | if (regs->ARM_r0 == -ERESTARTNOHAND || |
696 | regs->ARM_r0 == -ERESTARTSYS || | 696 | regs->ARM_r0 == -ERESTARTSYS || |
697 | regs->ARM_r0 == -ERESTARTNOINTR) { | 697 | regs->ARM_r0 == -ERESTARTNOINTR) { |
698 | restart_syscall(regs); | 698 | setup_syscall_restart(regs); |
699 | } | 699 | } |
700 | } | 700 | } |
701 | single_step_set(current); | 701 | single_step_set(current); |
diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c index fb0294bd4310..c31e601eb49c 100644 --- a/arch/arm/mach-pxa/tosa-bt.c +++ b/arch/arm/mach-pxa/tosa-bt.c | |||
@@ -35,21 +35,25 @@ static void tosa_bt_off(struct tosa_bt_data *data) | |||
35 | gpio_set_value(data->gpio_reset, 0); | 35 | gpio_set_value(data->gpio_reset, 0); |
36 | } | 36 | } |
37 | 37 | ||
38 | static int tosa_bt_toggle_radio(void *data, enum rfkill_state state) | 38 | static int tosa_bt_set_block(void *data, bool blocked) |
39 | { | 39 | { |
40 | pr_info("BT_RADIO going: %s\n", | 40 | pr_info("BT_RADIO going: %s\n", blocked ? "off" : "on"); |
41 | state == RFKILL_STATE_ON ? "on" : "off"); | ||
42 | 41 | ||
43 | if (state == RFKILL_STATE_ON) { | 42 | if (!blocked) { |
44 | pr_info("TOSA_BT: going ON\n"); | 43 | pr_info("TOSA_BT: going ON\n"); |
45 | tosa_bt_on(data); | 44 | tosa_bt_on(data); |
46 | } else { | 45 | } else { |
47 | pr_info("TOSA_BT: going OFF\n"); | 46 | pr_info("TOSA_BT: going OFF\n"); |
48 | tosa_bt_off(data); | 47 | tosa_bt_off(data); |
49 | } | 48 | } |
49 | |||
50 | return 0; | 50 | return 0; |
51 | } | 51 | } |
52 | 52 | ||
53 | static const struct rfkill_ops tosa_bt_rfkill_ops = { | ||
54 | .set_block = tosa_bt_set_block, | ||
55 | }; | ||
56 | |||
53 | static int tosa_bt_probe(struct platform_device *dev) | 57 | static int tosa_bt_probe(struct platform_device *dev) |
54 | { | 58 | { |
55 | int rc; | 59 | int rc; |
@@ -70,18 +74,14 @@ static int tosa_bt_probe(struct platform_device *dev) | |||
70 | if (rc) | 74 | if (rc) |
71 | goto err_pwr_dir; | 75 | goto err_pwr_dir; |
72 | 76 | ||
73 | rfk = rfkill_allocate(&dev->dev, RFKILL_TYPE_BLUETOOTH); | 77 | rfk = rfkill_alloc("tosa-bt", &dev->dev, RFKILL_TYPE_BLUETOOTH, |
78 | &tosa_bt_rfkill_ops, data); | ||
74 | if (!rfk) { | 79 | if (!rfk) { |
75 | rc = -ENOMEM; | 80 | rc = -ENOMEM; |
76 | goto err_rfk_alloc; | 81 | goto err_rfk_alloc; |
77 | } | 82 | } |
78 | 83 | ||
79 | rfk->name = "tosa-bt"; | 84 | rfkill_set_led_trigger_name(rfk, "tosa-bt"); |
80 | rfk->toggle_radio = tosa_bt_toggle_radio; | ||
81 | rfk->data = data; | ||
82 | #ifdef CONFIG_RFKILL_LEDS | ||
83 | rfk->led_trigger.name = "tosa-bt"; | ||
84 | #endif | ||
85 | 85 | ||
86 | rc = rfkill_register(rfk); | 86 | rc = rfkill_register(rfk); |
87 | if (rc) | 87 | if (rc) |
@@ -92,9 +92,7 @@ static int tosa_bt_probe(struct platform_device *dev) | |||
92 | return 0; | 92 | return 0; |
93 | 93 | ||
94 | err_rfkill: | 94 | err_rfkill: |
95 | if (rfk) | 95 | rfkill_destroy(rfk); |
96 | rfkill_free(rfk); | ||
97 | rfk = NULL; | ||
98 | err_rfk_alloc: | 96 | err_rfk_alloc: |
99 | tosa_bt_off(data); | 97 | tosa_bt_off(data); |
100 | err_pwr_dir: | 98 | err_pwr_dir: |
@@ -113,8 +111,10 @@ static int __devexit tosa_bt_remove(struct platform_device *dev) | |||
113 | 111 | ||
114 | platform_set_drvdata(dev, NULL); | 112 | platform_set_drvdata(dev, NULL); |
115 | 113 | ||
116 | if (rfk) | 114 | if (rfk) { |
117 | rfkill_unregister(rfk); | 115 | rfkill_unregister(rfk); |
116 | rfkill_destroy(rfk); | ||
117 | } | ||
118 | rfk = NULL; | 118 | rfk = NULL; |
119 | 119 | ||
120 | tosa_bt_off(data); | 120 | tosa_bt_off(data); |
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index 168267a5dfb3..117ad5920e53 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/input.h> | 31 | #include <linux/input.h> |
32 | #include <linux/gpio.h> | 32 | #include <linux/gpio.h> |
33 | #include <linux/pda_power.h> | 33 | #include <linux/pda_power.h> |
34 | #include <linux/rfkill.h> | ||
35 | #include <linux/spi/spi.h> | 34 | #include <linux/spi/spi.h> |
36 | 35 | ||
37 | #include <asm/setup.h> | 36 | #include <asm/setup.h> |
diff --git a/arch/avr32/kernel/signal.c b/arch/avr32/kernel/signal.c index 803d7be0938f..27227561bad6 100644 --- a/arch/avr32/kernel/signal.c +++ b/arch/avr32/kernel/signal.c | |||
@@ -212,7 +212,7 @@ out: | |||
212 | return err; | 212 | return err; |
213 | } | 213 | } |
214 | 214 | ||
215 | static inline void restart_syscall(struct pt_regs *regs) | 215 | static inline void setup_syscall_restart(struct pt_regs *regs) |
216 | { | 216 | { |
217 | if (regs->r12 == -ERESTART_RESTARTBLOCK) | 217 | if (regs->r12 == -ERESTART_RESTARTBLOCK) |
218 | regs->r8 = __NR_restart_syscall; | 218 | regs->r8 = __NR_restart_syscall; |
@@ -296,7 +296,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset, int syscall) | |||
296 | } | 296 | } |
297 | /* fall through */ | 297 | /* fall through */ |
298 | case -ERESTARTNOINTR: | 298 | case -ERESTARTNOINTR: |
299 | restart_syscall(regs); | 299 | setup_syscall_restart(regs); |
300 | } | 300 | } |
301 | } | 301 | } |
302 | 302 | ||
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index a60cfe757914..8ea0d942cdea 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig | |||
@@ -6,59 +6,65 @@ | |||
6 | mainmenu "Blackfin Kernel Configuration" | 6 | mainmenu "Blackfin Kernel Configuration" |
7 | 7 | ||
8 | config MMU | 8 | config MMU |
9 | bool | 9 | def_bool n |
10 | default n | ||
11 | 10 | ||
12 | config FPU | 11 | config FPU |
13 | bool | 12 | def_bool n |
14 | default n | ||
15 | 13 | ||
16 | config RWSEM_GENERIC_SPINLOCK | 14 | config RWSEM_GENERIC_SPINLOCK |
17 | bool | 15 | def_bool y |
18 | default y | ||
19 | 16 | ||
20 | config RWSEM_XCHGADD_ALGORITHM | 17 | config RWSEM_XCHGADD_ALGORITHM |
21 | bool | 18 | def_bool n |
22 | default n | ||
23 | 19 | ||
24 | config BLACKFIN | 20 | config BLACKFIN |
25 | bool | 21 | def_bool y |
26 | default y | 22 | select HAVE_FUNCTION_GRAPH_TRACER |
23 | select HAVE_FUNCTION_TRACER | ||
27 | select HAVE_IDE | 24 | select HAVE_IDE |
25 | select HAVE_KERNEL_GZIP | ||
26 | select HAVE_KERNEL_BZIP2 | ||
27 | select HAVE_KERNEL_LZMA | ||
28 | select HAVE_OPROFILE | 28 | select HAVE_OPROFILE |
29 | select ARCH_WANT_OPTIONAL_GPIOLIB | 29 | select ARCH_WANT_OPTIONAL_GPIOLIB |
30 | 30 | ||
31 | config GENERIC_BUG | ||
32 | def_bool y | ||
33 | depends on BUG | ||
34 | |||
31 | config ZONE_DMA | 35 | config ZONE_DMA |
32 | bool | 36 | def_bool y |
33 | default y | ||
34 | 37 | ||
35 | config GENERIC_FIND_NEXT_BIT | 38 | config GENERIC_FIND_NEXT_BIT |
36 | bool | 39 | def_bool y |
37 | default y | ||
38 | 40 | ||
39 | config GENERIC_HWEIGHT | 41 | config GENERIC_HWEIGHT |
40 | bool | 42 | def_bool y |
41 | default y | ||
42 | 43 | ||
43 | config GENERIC_HARDIRQS | 44 | config GENERIC_HARDIRQS |
44 | bool | 45 | def_bool y |
45 | default y | ||
46 | 46 | ||
47 | config GENERIC_IRQ_PROBE | 47 | config GENERIC_IRQ_PROBE |
48 | bool | 48 | def_bool y |
49 | default y | ||
50 | 49 | ||
51 | config GENERIC_GPIO | 50 | config GENERIC_GPIO |
52 | bool | 51 | def_bool y |
53 | default y | ||
54 | 52 | ||
55 | config FORCE_MAX_ZONEORDER | 53 | config FORCE_MAX_ZONEORDER |
56 | int | 54 | int |
57 | default "14" | 55 | default "14" |
58 | 56 | ||
59 | config GENERIC_CALIBRATE_DELAY | 57 | config GENERIC_CALIBRATE_DELAY |
60 | bool | 58 | def_bool y |
61 | default y | 59 | |
60 | config LOCKDEP_SUPPORT | ||
61 | def_bool y | ||
62 | |||
63 | config STACKTRACE_SUPPORT | ||
64 | def_bool y | ||
65 | |||
66 | config TRACE_IRQFLAGS_SUPPORT | ||
67 | def_bool y | ||
62 | 68 | ||
63 | source "init/Kconfig" | 69 | source "init/Kconfig" |
64 | 70 | ||
@@ -408,12 +414,12 @@ comment "Clock/PLL Setup" | |||
408 | 414 | ||
409 | config CLKIN_HZ | 415 | config CLKIN_HZ |
410 | int "Frequency of the crystal on the board in Hz" | 416 | int "Frequency of the crystal on the board in Hz" |
417 | default "10000000" if BFIN532_IP0X | ||
411 | default "11059200" if BFIN533_STAMP | 418 | default "11059200" if BFIN533_STAMP |
419 | default "24576000" if PNAV10 | ||
420 | default "25000000" # most people use this | ||
412 | default "27000000" if BFIN533_EZKIT | 421 | default "27000000" if BFIN533_EZKIT |
413 | default "25000000" if (BFIN537_STAMP || BFIN527_EZKIT || H8606_HVSISTEMAS || BLACKSTAMP || BFIN526_EZBRD || BFIN538_EZKIT || BFIN518F-EZBRD) | ||
414 | default "30000000" if BFIN561_EZKIT | 422 | default "30000000" if BFIN561_EZKIT |
415 | default "24576000" if PNAV10 | ||
416 | default "10000000" if BFIN532_IP0X | ||
417 | help | 423 | help |
418 | The frequency of CLKIN crystal oscillator on the board in Hz. | 424 | The frequency of CLKIN crystal oscillator on the board in Hz. |
419 | Warning: This value should match the crystal on the board. Otherwise, | 425 | Warning: This value should match the crystal on the board. Otherwise, |
diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile index d54c8283825c..6f9533c3d752 100644 --- a/arch/blackfin/Makefile +++ b/arch/blackfin/Makefile | |||
@@ -137,7 +137,7 @@ archclean: | |||
137 | 137 | ||
138 | INSTALL_PATH ?= /tftpboot | 138 | INSTALL_PATH ?= /tftpboot |
139 | boot := arch/$(ARCH)/boot | 139 | boot := arch/$(ARCH)/boot |
140 | BOOT_TARGETS = vmImage | 140 | BOOT_TARGETS = vmImage vmImage.bz2 vmImage.gz vmImage.lzma |
141 | PHONY += $(BOOT_TARGETS) install | 141 | PHONY += $(BOOT_TARGETS) install |
142 | KBUILD_IMAGE := $(boot)/vmImage | 142 | KBUILD_IMAGE := $(boot)/vmImage |
143 | 143 | ||
@@ -150,7 +150,10 @@ install: | |||
150 | $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install | 150 | $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install |
151 | 151 | ||
152 | define archhelp | 152 | define archhelp |
153 | echo '* vmImage - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage)' | 153 | echo '* vmImage - Alias to selected kernel format (vmImage.gz by default)' |
154 | echo ' vmImage.bz2 - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.bz2)' | ||
155 | echo '* vmImage.gz - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.gz)' | ||
156 | echo ' vmImage.lzma - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.lzma)' | ||
154 | echo ' install - Install kernel using' | 157 | echo ' install - Install kernel using' |
155 | echo ' (your) ~/bin/$(CROSS_COMPILE)installkernel or' | 158 | echo ' (your) ~/bin/$(CROSS_COMPILE)installkernel or' |
156 | echo ' (distribution) PATH: $(CROSS_COMPILE)installkernel or' | 159 | echo ' (distribution) PATH: $(CROSS_COMPILE)installkernel or' |
diff --git a/arch/blackfin/boot/.gitignore b/arch/blackfin/boot/.gitignore index 3ae03994b88d..229e50808677 100644 --- a/arch/blackfin/boot/.gitignore +++ b/arch/blackfin/boot/.gitignore | |||
@@ -1 +1,2 @@ | |||
1 | +vmImage | 1 | vmImage* |
2 | vmlinux* | ||
diff --git a/arch/blackfin/boot/Makefile b/arch/blackfin/boot/Makefile index e028d13481a9..3ab6f23561dd 100644 --- a/arch/blackfin/boot/Makefile +++ b/arch/blackfin/boot/Makefile | |||
@@ -8,24 +8,41 @@ | |||
8 | 8 | ||
9 | MKIMAGE := $(srctree)/scripts/mkuboot.sh | 9 | MKIMAGE := $(srctree)/scripts/mkuboot.sh |
10 | 10 | ||
11 | targets := vmImage | 11 | targets := vmImage vmImage.bz2 vmImage.gz vmImage.lzma |
12 | extra-y += vmlinux.bin vmlinux.gz | 12 | extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma |
13 | 13 | ||
14 | quiet_cmd_uimage = UIMAGE $@ | 14 | quiet_cmd_uimage = UIMAGE $@ |
15 | cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(ARCH) -O linux -T kernel \ | 15 | cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(ARCH) -O linux -T kernel \ |
16 | -C gzip -n 'Linux-$(KERNELRELEASE)' -a $(CONFIG_BOOT_LOAD) \ | 16 | -C $(2) -n 'Linux-$(KERNELRELEASE)' -a $(CONFIG_BOOT_LOAD) \ |
17 | -e $(shell $(NM) vmlinux | awk '$$NF == "__start" {print $$1}') \ | 17 | -e $(shell $(NM) vmlinux | awk '$$NF == "__start" {print $$1}') \ |
18 | -d $< $@ | 18 | -d $< $@ |
19 | 19 | ||
20 | $(obj)/vmlinux.bin: vmlinux FORCE | 20 | $(obj)/vmlinux.bin: vmlinux FORCE |
21 | $(call if_changed,objcopy) | 21 | $(call if_changed,objcopy) |
22 | 22 | ||
23 | $(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE | 23 | $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE |
24 | $(call if_changed,gzip) | 24 | $(call if_changed,gzip) |
25 | 25 | ||
26 | $(obj)/vmImage: $(obj)/vmlinux.gz | 26 | $(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE |
27 | $(call if_changed,uimage) | 27 | $(call if_changed,bzip2) |
28 | @$(kecho) 'Kernel: $@ is ready' | 28 | |
29 | $(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE | ||
30 | $(call if_changed,lzma) | ||
31 | |||
32 | $(obj)/vmImage.bz2: $(obj)/vmlinux.bin.bz2 | ||
33 | $(call if_changed,uimage,bzip2) | ||
34 | |||
35 | $(obj)/vmImage.gz: $(obj)/vmlinux.bin.gz | ||
36 | $(call if_changed,uimage,gzip) | ||
37 | |||
38 | $(obj)/vmImage.lzma: $(obj)/vmlinux.bin.lzma | ||
39 | $(call if_changed,uimage,lzma) | ||
40 | |||
41 | suffix-$(CONFIG_KERNEL_GZIP) := gz | ||
42 | suffix-$(CONFIG_KERNEL_BZIP2) := bz2 | ||
43 | suffix-$(CONFIG_KERNEL_LZMA) := lzma | ||
44 | $(obj)/vmImage: $(obj)/vmImage.$(suffix-y) | ||
45 | @ln -sf $(notdir $<) $@ | ||
29 | 46 | ||
30 | install: | 47 | install: |
31 | sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)" | 48 | sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)" |
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h index 7bbf44e4ddf9..b1d92f13ef96 100644 --- a/arch/blackfin/include/asm/atomic.h +++ b/arch/blackfin/include/asm/atomic.h | |||
@@ -90,7 +90,7 @@ static inline int atomic_test_mask(int mask, atomic_t *v) | |||
90 | 90 | ||
91 | static inline void atomic_add(int i, atomic_t *v) | 91 | static inline void atomic_add(int i, atomic_t *v) |
92 | { | 92 | { |
93 | long flags; | 93 | unsigned long flags; |
94 | 94 | ||
95 | local_irq_save_hw(flags); | 95 | local_irq_save_hw(flags); |
96 | v->counter += i; | 96 | v->counter += i; |
@@ -99,7 +99,7 @@ static inline void atomic_add(int i, atomic_t *v) | |||
99 | 99 | ||
100 | static inline void atomic_sub(int i, atomic_t *v) | 100 | static inline void atomic_sub(int i, atomic_t *v) |
101 | { | 101 | { |
102 | long flags; | 102 | unsigned long flags; |
103 | 103 | ||
104 | local_irq_save_hw(flags); | 104 | local_irq_save_hw(flags); |
105 | v->counter -= i; | 105 | v->counter -= i; |
@@ -110,7 +110,7 @@ static inline void atomic_sub(int i, atomic_t *v) | |||
110 | static inline int atomic_add_return(int i, atomic_t *v) | 110 | static inline int atomic_add_return(int i, atomic_t *v) |
111 | { | 111 | { |
112 | int __temp = 0; | 112 | int __temp = 0; |
113 | long flags; | 113 | unsigned long flags; |
114 | 114 | ||
115 | local_irq_save_hw(flags); | 115 | local_irq_save_hw(flags); |
116 | v->counter += i; | 116 | v->counter += i; |
@@ -124,7 +124,7 @@ static inline int atomic_add_return(int i, atomic_t *v) | |||
124 | static inline int atomic_sub_return(int i, atomic_t *v) | 124 | static inline int atomic_sub_return(int i, atomic_t *v) |
125 | { | 125 | { |
126 | int __temp = 0; | 126 | int __temp = 0; |
127 | long flags; | 127 | unsigned long flags; |
128 | 128 | ||
129 | local_irq_save_hw(flags); | 129 | local_irq_save_hw(flags); |
130 | v->counter -= i; | 130 | v->counter -= i; |
@@ -136,7 +136,7 @@ static inline int atomic_sub_return(int i, atomic_t *v) | |||
136 | 136 | ||
137 | static inline void atomic_inc(volatile atomic_t *v) | 137 | static inline void atomic_inc(volatile atomic_t *v) |
138 | { | 138 | { |
139 | long flags; | 139 | unsigned long flags; |
140 | 140 | ||
141 | local_irq_save_hw(flags); | 141 | local_irq_save_hw(flags); |
142 | v->counter++; | 142 | v->counter++; |
@@ -145,7 +145,7 @@ static inline void atomic_inc(volatile atomic_t *v) | |||
145 | 145 | ||
146 | static inline void atomic_dec(volatile atomic_t *v) | 146 | static inline void atomic_dec(volatile atomic_t *v) |
147 | { | 147 | { |
148 | long flags; | 148 | unsigned long flags; |
149 | 149 | ||
150 | local_irq_save_hw(flags); | 150 | local_irq_save_hw(flags); |
151 | v->counter--; | 151 | v->counter--; |
@@ -154,7 +154,7 @@ static inline void atomic_dec(volatile atomic_t *v) | |||
154 | 154 | ||
155 | static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) | 155 | static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) |
156 | { | 156 | { |
157 | long flags; | 157 | unsigned long flags; |
158 | 158 | ||
159 | local_irq_save_hw(flags); | 159 | local_irq_save_hw(flags); |
160 | v->counter &= ~mask; | 160 | v->counter &= ~mask; |
@@ -163,7 +163,7 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) | |||
163 | 163 | ||
164 | static inline void atomic_set_mask(unsigned int mask, atomic_t *v) | 164 | static inline void atomic_set_mask(unsigned int mask, atomic_t *v) |
165 | { | 165 | { |
166 | long flags; | 166 | unsigned long flags; |
167 | 167 | ||
168 | local_irq_save_hw(flags); | 168 | local_irq_save_hw(flags); |
169 | v->counter |= mask; | 169 | v->counter |= mask; |
diff --git a/arch/blackfin/include/asm/bfin-global.h b/arch/blackfin/include/asm/bfin-global.h index daffc0684e75..e39277ea43e8 100644 --- a/arch/blackfin/include/asm/bfin-global.h +++ b/arch/blackfin/include/asm/bfin-global.h | |||
@@ -31,7 +31,7 @@ | |||
31 | 31 | ||
32 | #ifndef __ASSEMBLY__ | 32 | #ifndef __ASSEMBLY__ |
33 | 33 | ||
34 | #include <asm-generic/sections.h> | 34 | #include <asm/sections.h> |
35 | #include <asm/ptrace.h> | 35 | #include <asm/ptrace.h> |
36 | #include <asm/user.h> | 36 | #include <asm/user.h> |
37 | #include <linux/linkage.h> | 37 | #include <linux/linkage.h> |
@@ -99,15 +99,6 @@ extern const char bfin_board_name[]; | |||
99 | extern unsigned long bfin_sic_iwr[]; | 99 | extern unsigned long bfin_sic_iwr[]; |
100 | extern unsigned vr_wakeup; | 100 | extern unsigned vr_wakeup; |
101 | extern u16 _bfin_swrst; /* shadow for Software Reset Register (SWRST) */ | 101 | extern u16 _bfin_swrst; /* shadow for Software Reset Register (SWRST) */ |
102 | extern unsigned long _ramstart, _ramend, _rambase; | ||
103 | extern unsigned long memory_start, memory_end, physical_mem_end; | ||
104 | extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[], | ||
105 | _ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _sbss_b_l1[], _ebss_b_l1[], | ||
106 | _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[], | ||
107 | _ebss_l2[], _l2_lma_start[]; | ||
108 | |||
109 | /* only used when MTD_UCLINUX */ | ||
110 | extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size; | ||
111 | 102 | ||
112 | #ifdef CONFIG_BFIN_ICACHE_LOCK | 103 | #ifdef CONFIG_BFIN_ICACHE_LOCK |
113 | extern void cache_grab_lock(int way); | 104 | extern void cache_grab_lock(int way); |
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h index 21b036eadab1..75fee2f7d9f2 100644 --- a/arch/blackfin/include/asm/bitops.h +++ b/arch/blackfin/include/asm/bitops.h | |||
@@ -109,7 +109,8 @@ static inline void clear_bit(int nr, volatile unsigned long *addr) | |||
109 | 109 | ||
110 | static inline void change_bit(int nr, volatile unsigned long *addr) | 110 | static inline void change_bit(int nr, volatile unsigned long *addr) |
111 | { | 111 | { |
112 | int mask, flags; | 112 | int mask; |
113 | unsigned long flags; | ||
113 | unsigned long *ADDR = (unsigned long *)addr; | 114 | unsigned long *ADDR = (unsigned long *)addr; |
114 | 115 | ||
115 | ADDR += nr >> 5; | 116 | ADDR += nr >> 5; |
diff --git a/arch/blackfin/include/asm/bug.h b/arch/blackfin/include/asm/bug.h index 6d3e11b1fc57..655e49540e41 100644 --- a/arch/blackfin/include/asm/bug.h +++ b/arch/blackfin/include/asm/bug.h | |||
@@ -2,13 +2,58 @@ | |||
2 | #define _BLACKFIN_BUG_H | 2 | #define _BLACKFIN_BUG_H |
3 | 3 | ||
4 | #ifdef CONFIG_BUG | 4 | #ifdef CONFIG_BUG |
5 | #define HAVE_ARCH_BUG | ||
6 | 5 | ||
7 | #define BUG() do { \ | 6 | #define BFIN_BUG_OPCODE 0xefcd |
8 | dump_bfin_trace_buffer(); \ | 7 | |
9 | printk(KERN_EMERG "BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ | 8 | #ifdef CONFIG_DEBUG_BUGVERBOSE |
10 | panic("BUG!"); \ | 9 | |
11 | } while (0) | 10 | #define _BUG_OR_WARN(flags) \ |
11 | asm volatile( \ | ||
12 | "1: .hword %0\n" \ | ||
13 | " .section __bug_table,\"a\",@progbits\n" \ | ||
14 | "2: .long 1b\n" \ | ||
15 | " .long %1\n" \ | ||
16 | " .short %2\n" \ | ||
17 | " .short %3\n" \ | ||
18 | " .org 2b + %4\n" \ | ||
19 | " .previous" \ | ||
20 | : \ | ||
21 | : "i"(BFIN_BUG_OPCODE), "i"(__FILE__), \ | ||
22 | "i"(__LINE__), "i"(flags), \ | ||
23 | "i"(sizeof(struct bug_entry))) | ||
24 | |||
25 | #else | ||
26 | |||
27 | #define _BUG_OR_WARN(flags) \ | ||
28 | asm volatile( \ | ||
29 | "1: .hword %0\n" \ | ||
30 | " .section __bug_table,\"a\",@progbits\n" \ | ||
31 | "2: .long 1b\n" \ | ||
32 | " .short %1\n" \ | ||
33 | " .org 2b + %2\n" \ | ||
34 | " .previous" \ | ||
35 | : \ | ||
36 | : "i"(BFIN_BUG_OPCODE), "i"(flags), \ | ||
37 | "i"(sizeof(struct bug_entry))) | ||
38 | |||
39 | #endif /* CONFIG_DEBUG_BUGVERBOSE */ | ||
40 | |||
41 | #define BUG() \ | ||
42 | do { \ | ||
43 | _BUG_OR_WARN(0); \ | ||
44 | for (;;); \ | ||
45 | } while (0) | ||
46 | |||
47 | #define WARN_ON(condition) \ | ||
48 | ({ \ | ||
49 | int __ret_warn_on = !!(condition); \ | ||
50 | if (unlikely(__ret_warn_on)) \ | ||
51 | _BUG_OR_WARN(BUGFLAG_WARNING); \ | ||
52 | unlikely(__ret_warn_on); \ | ||
53 | }) | ||
54 | |||
55 | #define HAVE_ARCH_BUG | ||
56 | #define HAVE_ARCH_WARN_ON | ||
12 | 57 | ||
13 | #endif | 58 | #endif |
14 | 59 | ||
diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h index 86637814cf25..2ef669ed9222 100644 --- a/arch/blackfin/include/asm/cache.h +++ b/arch/blackfin/include/asm/cache.h | |||
@@ -34,9 +34,13 @@ | |||
34 | #define L1_CACHE_SHIFT_MAX 5 | 34 | #define L1_CACHE_SHIFT_MAX 5 |
35 | 35 | ||
36 | #if defined(CONFIG_SMP) && \ | 36 | #if defined(CONFIG_SMP) && \ |
37 | !defined(CONFIG_BFIN_CACHE_COHERENT) && \ | 37 | !defined(CONFIG_BFIN_CACHE_COHERENT) |
38 | defined(CONFIG_BFIN_DCACHE) | 38 | # if defined(CONFIG_BFIN_ICACHE) |
39 | #define __ARCH_SYNC_CORE_DCACHE | 39 | # define __ARCH_SYNC_CORE_ICACHE |
40 | # endif | ||
41 | # if defined(CONFIG_BFIN_DCACHE) | ||
42 | # define __ARCH_SYNC_CORE_DCACHE | ||
43 | # endif | ||
40 | #ifndef __ASSEMBLY__ | 44 | #ifndef __ASSEMBLY__ |
41 | asmlinkage void __raw_smp_mark_barrier_asm(void); | 45 | asmlinkage void __raw_smp_mark_barrier_asm(void); |
42 | asmlinkage void __raw_smp_check_barrier_asm(void); | 46 | asmlinkage void __raw_smp_check_barrier_asm(void); |
@@ -51,6 +55,7 @@ static inline void smp_check_barrier(void) | |||
51 | } | 55 | } |
52 | 56 | ||
53 | void resync_core_dcache(void); | 57 | void resync_core_dcache(void); |
58 | void resync_core_icache(void); | ||
54 | #endif | 59 | #endif |
55 | #endif | 60 | #endif |
56 | 61 | ||
diff --git a/arch/blackfin/include/asm/cacheflush.h b/arch/blackfin/include/asm/cacheflush.h index 94697f0f6f40..5c17dee53b5d 100644 --- a/arch/blackfin/include/asm/cacheflush.h +++ b/arch/blackfin/include/asm/cacheflush.h | |||
@@ -37,6 +37,7 @@ extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned lo | |||
37 | extern void blackfin_dcache_invalidate_range(unsigned long start_address, unsigned long end_address); | 37 | extern void blackfin_dcache_invalidate_range(unsigned long start_address, unsigned long end_address); |
38 | extern void blackfin_dflush_page(void *page); | 38 | extern void blackfin_dflush_page(void *page); |
39 | extern void blackfin_invalidate_entire_dcache(void); | 39 | extern void blackfin_invalidate_entire_dcache(void); |
40 | extern void blackfin_invalidate_entire_icache(void); | ||
40 | 41 | ||
41 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 42 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
42 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | 43 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
@@ -97,7 +98,7 @@ do { memcpy(dst, src, len); \ | |||
97 | extern unsigned long reserved_mem_dcache_on; | 98 | extern unsigned long reserved_mem_dcache_on; |
98 | extern unsigned long reserved_mem_icache_on; | 99 | extern unsigned long reserved_mem_icache_on; |
99 | 100 | ||
100 | static inline int bfin_addr_dcachable(unsigned long addr) | 101 | static inline int bfin_addr_dcacheable(unsigned long addr) |
101 | { | 102 | { |
102 | #ifdef CONFIG_BFIN_DCACHE | 103 | #ifdef CONFIG_BFIN_DCACHE |
103 | if (addr < (_ramend - DMA_UNCACHED_REGION)) | 104 | if (addr < (_ramend - DMA_UNCACHED_REGION)) |
diff --git a/arch/blackfin/include/asm/cpu.h b/arch/blackfin/include/asm/cpu.h index c2594ef877f6..565b8136855e 100644 --- a/arch/blackfin/include/asm/cpu.h +++ b/arch/blackfin/include/asm/cpu.h | |||
@@ -34,6 +34,7 @@ struct blackfin_cpudata { | |||
34 | unsigned int dmemctl; | 34 | unsigned int dmemctl; |
35 | unsigned long loops_per_jiffy; | 35 | unsigned long loops_per_jiffy; |
36 | unsigned long dcache_invld_count; | 36 | unsigned long dcache_invld_count; |
37 | unsigned long icache_invld_count; | ||
37 | }; | 38 | }; |
38 | 39 | ||
39 | DECLARE_PER_CPU(struct blackfin_cpudata, cpu_data); | 40 | DECLARE_PER_CPU(struct blackfin_cpudata, cpu_data); |
diff --git a/arch/blackfin/include/asm/ftrace.h b/arch/blackfin/include/asm/ftrace.h index 40a8c178f10d..8643680f0f78 100644 --- a/arch/blackfin/include/asm/ftrace.h +++ b/arch/blackfin/include/asm/ftrace.h | |||
@@ -1 +1,13 @@ | |||
1 | /* empty */ | 1 | /* |
2 | * Blackfin ftrace code | ||
3 | * | ||
4 | * Copyright 2009 Analog Devices Inc. | ||
5 | * Licensed under the GPL-2 or later. | ||
6 | */ | ||
7 | |||
8 | #ifndef __ASM_BFIN_FTRACE_H__ | ||
9 | #define __ASM_BFIN_FTRACE_H__ | ||
10 | |||
11 | #define MCOUNT_INSN_SIZE 8 /* sizeof mcount call: LINK + CALL */ | ||
12 | |||
13 | #endif | ||
diff --git a/arch/blackfin/include/asm/ipipe.h b/arch/blackfin/include/asm/ipipe.h index 51d0bf5e2899..bbe1c3726b69 100644 --- a/arch/blackfin/include/asm/ipipe.h +++ b/arch/blackfin/include/asm/ipipe.h | |||
@@ -35,10 +35,10 @@ | |||
35 | #include <asm/atomic.h> | 35 | #include <asm/atomic.h> |
36 | #include <asm/traps.h> | 36 | #include <asm/traps.h> |
37 | 37 | ||
38 | #define IPIPE_ARCH_STRING "1.9-01" | 38 | #define IPIPE_ARCH_STRING "1.10-00" |
39 | #define IPIPE_MAJOR_NUMBER 1 | 39 | #define IPIPE_MAJOR_NUMBER 1 |
40 | #define IPIPE_MINOR_NUMBER 9 | 40 | #define IPIPE_MINOR_NUMBER 10 |
41 | #define IPIPE_PATCH_NUMBER 1 | 41 | #define IPIPE_PATCH_NUMBER 0 |
42 | 42 | ||
43 | #ifdef CONFIG_SMP | 43 | #ifdef CONFIG_SMP |
44 | #error "I-pipe/blackfin: SMP not implemented" | 44 | #error "I-pipe/blackfin: SMP not implemented" |
@@ -54,10 +54,11 @@ do { \ | |||
54 | 54 | ||
55 | #define task_hijacked(p) \ | 55 | #define task_hijacked(p) \ |
56 | ({ \ | 56 | ({ \ |
57 | int __x__ = ipipe_current_domain != ipipe_root_domain; \ | 57 | int __x__ = __ipipe_root_domain_p; \ |
58 | /* We would need to clear the SYNC flag for the root domain */ \ | 58 | __clear_bit(IPIPE_SYNC_FLAG, &ipipe_root_cpudom_var(status)); \ |
59 | /* over the current processor in SMP mode. */ \ | 59 | if (__x__) \ |
60 | local_irq_enable_hw(); __x__; \ | 60 | local_irq_enable_hw(); \ |
61 | !__x__; \ | ||
61 | }) | 62 | }) |
62 | 63 | ||
63 | struct ipipe_domain; | 64 | struct ipipe_domain; |
@@ -179,23 +180,24 @@ static inline unsigned long __ipipe_ffnz(unsigned long ul) | |||
179 | 180 | ||
180 | #define __ipipe_run_isr(ipd, irq) \ | 181 | #define __ipipe_run_isr(ipd, irq) \ |
181 | do { \ | 182 | do { \ |
182 | if (ipd == ipipe_root_domain) { \ | 183 | if (!__ipipe_pipeline_head_p(ipd)) \ |
183 | local_irq_enable_hw(); \ | 184 | local_irq_enable_hw(); \ |
184 | if (ipipe_virtual_irq_p(irq)) \ | 185 | if (ipd == ipipe_root_domain) { \ |
186 | if (unlikely(ipipe_virtual_irq_p(irq))) { \ | ||
187 | irq_enter(); \ | ||
185 | ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \ | 188 | ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \ |
186 | else \ | 189 | irq_exit(); \ |
190 | } else \ | ||
187 | ipd->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)); \ | 191 | ipd->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)); \ |
188 | local_irq_disable_hw(); \ | ||
189 | } else { \ | 192 | } else { \ |
190 | __clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \ | 193 | __clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \ |
191 | local_irq_enable_nohead(ipd); \ | ||
192 | ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \ | 194 | ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \ |
193 | /* Attempt to exit the outer interrupt level before \ | 195 | /* Attempt to exit the outer interrupt level before \ |
194 | * starting the deferred IRQ processing. */ \ | 196 | * starting the deferred IRQ processing. */ \ |
195 | local_irq_disable_nohead(ipd); \ | ||
196 | __ipipe_run_irqtail(); \ | 197 | __ipipe_run_irqtail(); \ |
197 | __set_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \ | 198 | __set_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \ |
198 | } \ | 199 | } \ |
200 | local_irq_disable_hw(); \ | ||
199 | } while (0) | 201 | } while (0) |
200 | 202 | ||
201 | #define __ipipe_syscall_watched_p(p, sc) \ | 203 | #define __ipipe_syscall_watched_p(p, sc) \ |
diff --git a/arch/blackfin/include/asm/irq.h b/arch/blackfin/include/asm/irq.h index 7645e85a5f6f..400bdd52ce87 100644 --- a/arch/blackfin/include/asm/irq.h +++ b/arch/blackfin/include/asm/irq.h | |||
@@ -17,270 +17,17 @@ | |||
17 | #ifndef _BFIN_IRQ_H_ | 17 | #ifndef _BFIN_IRQ_H_ |
18 | #define _BFIN_IRQ_H_ | 18 | #define _BFIN_IRQ_H_ |
19 | 19 | ||
20 | /* SYS_IRQS and NR_IRQS are defined in <mach-bf5xx/irq.h>*/ | 20 | #include <linux/irqflags.h> |
21 | #include <mach/irq.h> | ||
22 | #include <asm/pda.h> | ||
23 | #include <asm/processor.h> | ||
24 | |||
25 | #ifdef CONFIG_SMP | ||
26 | /* Forward decl needed due to cdef inter dependencies */ | ||
27 | static inline uint32_t __pure bfin_dspid(void); | ||
28 | # define blackfin_core_id() (bfin_dspid() & 0xff) | ||
29 | # define bfin_irq_flags cpu_pda[blackfin_core_id()].imask | ||
30 | #else | ||
31 | extern unsigned long bfin_irq_flags; | ||
32 | #endif | ||
33 | |||
34 | #ifdef CONFIG_IPIPE | ||
35 | |||
36 | #include <linux/ipipe_trace.h> | ||
37 | 21 | ||
38 | void __ipipe_unstall_root(void); | 22 | /* SYS_IRQS and NR_IRQS are defined in <mach-bf5xx/irq.h> */ |
39 | 23 | #include <mach/irq.h> | |
40 | void __ipipe_restore_root(unsigned long flags); | ||
41 | |||
42 | #ifdef CONFIG_DEBUG_HWERR | ||
43 | # define __all_masked_irq_flags 0x3f | ||
44 | # define __save_and_cli_hw(x) \ | ||
45 | __asm__ __volatile__( \ | ||
46 | "cli %0;" \ | ||
47 | "sti %1;" \ | ||
48 | : "=&d"(x) \ | ||
49 | : "d" (0x3F) \ | ||
50 | ) | ||
51 | #else | ||
52 | # define __all_masked_irq_flags 0x1f | ||
53 | # define __save_and_cli_hw(x) \ | ||
54 | __asm__ __volatile__( \ | ||
55 | "cli %0;" \ | ||
56 | : "=&d"(x) \ | ||
57 | ) | ||
58 | #endif | ||
59 | |||
60 | #define irqs_enabled_from_flags_hw(x) ((x) != __all_masked_irq_flags) | ||
61 | #define raw_irqs_disabled_flags(flags) (!irqs_enabled_from_flags_hw(flags)) | ||
62 | #define local_test_iflag_hw(x) irqs_enabled_from_flags_hw(x) | ||
63 | |||
64 | #define local_save_flags(x) \ | ||
65 | do { \ | ||
66 | (x) = __ipipe_test_root() ? \ | ||
67 | __all_masked_irq_flags : bfin_irq_flags; \ | ||
68 | barrier(); \ | ||
69 | } while (0) | ||
70 | |||
71 | #define local_irq_save(x) \ | ||
72 | do { \ | ||
73 | (x) = __ipipe_test_and_stall_root() ? \ | ||
74 | __all_masked_irq_flags : bfin_irq_flags; \ | ||
75 | barrier(); \ | ||
76 | } while (0) | ||
77 | |||
78 | static inline void local_irq_restore(unsigned long x) | ||
79 | { | ||
80 | barrier(); | ||
81 | __ipipe_restore_root(x == __all_masked_irq_flags); | ||
82 | } | ||
83 | |||
84 | #define local_irq_disable() \ | ||
85 | do { \ | ||
86 | __ipipe_stall_root(); \ | ||
87 | barrier(); \ | ||
88 | } while (0) | ||
89 | |||
90 | static inline void local_irq_enable(void) | ||
91 | { | ||
92 | barrier(); | ||
93 | __ipipe_unstall_root(); | ||
94 | } | ||
95 | |||
96 | #define irqs_disabled() __ipipe_test_root() | ||
97 | |||
98 | #define local_save_flags_hw(x) \ | ||
99 | __asm__ __volatile__( \ | ||
100 | "cli %0;" \ | ||
101 | "sti %0;" \ | ||
102 | : "=d"(x) \ | ||
103 | ) | ||
104 | |||
105 | #define irqs_disabled_hw() \ | ||
106 | ({ \ | ||
107 | unsigned long flags; \ | ||
108 | local_save_flags_hw(flags); \ | ||
109 | !irqs_enabled_from_flags_hw(flags); \ | ||
110 | }) | ||
111 | |||
112 | static inline unsigned long raw_mangle_irq_bits(int virt, unsigned long real) | ||
113 | { | ||
114 | /* Merge virtual and real interrupt mask bits into a single | ||
115 | 32bit word. */ | ||
116 | return (real & ~(1 << 31)) | ((virt != 0) << 31); | ||
117 | } | ||
118 | |||
119 | static inline int raw_demangle_irq_bits(unsigned long *x) | ||
120 | { | ||
121 | int virt = (*x & (1 << 31)) != 0; | ||
122 | *x &= ~(1L << 31); | ||
123 | return virt; | ||
124 | } | ||
125 | |||
126 | #ifdef CONFIG_IPIPE_TRACE_IRQSOFF | ||
127 | |||
128 | #define local_irq_disable_hw() \ | ||
129 | do { \ | ||
130 | int _tmp_dummy; \ | ||
131 | if (!irqs_disabled_hw()) \ | ||
132 | ipipe_trace_begin(0x80000000); \ | ||
133 | __asm__ __volatile__ ("cli %0;" : "=d" (_tmp_dummy) : ); \ | ||
134 | } while (0) | ||
135 | |||
136 | #define local_irq_enable_hw() \ | ||
137 | do { \ | ||
138 | if (irqs_disabled_hw()) \ | ||
139 | ipipe_trace_end(0x80000000); \ | ||
140 | __asm__ __volatile__ ("sti %0;" : : "d"(bfin_irq_flags)); \ | ||
141 | } while (0) | ||
142 | |||
143 | #define local_irq_save_hw(x) \ | ||
144 | do { \ | ||
145 | __save_and_cli_hw(x); \ | ||
146 | if (local_test_iflag_hw(x)) \ | ||
147 | ipipe_trace_begin(0x80000001); \ | ||
148 | } while (0) | ||
149 | |||
150 | #define local_irq_restore_hw(x) \ | ||
151 | do { \ | ||
152 | if (local_test_iflag_hw(x)) { \ | ||
153 | ipipe_trace_end(0x80000001); \ | ||
154 | local_irq_enable_hw_notrace(); \ | ||
155 | } \ | ||
156 | } while (0) | ||
157 | |||
158 | #define local_irq_disable_hw_notrace() \ | ||
159 | do { \ | ||
160 | int _tmp_dummy; \ | ||
161 | __asm__ __volatile__ ("cli %0;" : "=d" (_tmp_dummy) : ); \ | ||
162 | } while (0) | ||
163 | |||
164 | #define local_irq_enable_hw_notrace() \ | ||
165 | __asm__ __volatile__( \ | ||
166 | "sti %0;" \ | ||
167 | : \ | ||
168 | : "d"(bfin_irq_flags) \ | ||
169 | ) | ||
170 | |||
171 | #define local_irq_save_hw_notrace(x) __save_and_cli_hw(x) | ||
172 | |||
173 | #define local_irq_restore_hw_notrace(x) \ | ||
174 | do { \ | ||
175 | if (local_test_iflag_hw(x)) \ | ||
176 | local_irq_enable_hw_notrace(); \ | ||
177 | } while (0) | ||
178 | |||
179 | #else /* CONFIG_IPIPE_TRACE_IRQSOFF */ | ||
180 | |||
181 | #define local_irq_enable_hw() \ | ||
182 | __asm__ __volatile__( \ | ||
183 | "sti %0;" \ | ||
184 | : \ | ||
185 | : "d"(bfin_irq_flags) \ | ||
186 | ) | ||
187 | |||
188 | #define local_irq_disable_hw() \ | ||
189 | do { \ | ||
190 | int _tmp_dummy; \ | ||
191 | __asm__ __volatile__ ( \ | ||
192 | "cli %0;" \ | ||
193 | : "=d" (_tmp_dummy)); \ | ||
194 | } while (0) | ||
195 | |||
196 | #define local_irq_restore_hw(x) \ | ||
197 | do { \ | ||
198 | if (irqs_enabled_from_flags_hw(x)) \ | ||
199 | local_irq_enable_hw(); \ | ||
200 | } while (0) | ||
201 | |||
202 | #define local_irq_save_hw(x) __save_and_cli_hw(x) | ||
203 | |||
204 | #define local_irq_disable_hw_notrace() local_irq_disable_hw() | ||
205 | #define local_irq_enable_hw_notrace() local_irq_enable_hw() | ||
206 | #define local_irq_save_hw_notrace(x) local_irq_save_hw(x) | ||
207 | #define local_irq_restore_hw_notrace(x) local_irq_restore_hw(x) | ||
208 | |||
209 | #endif /* CONFIG_IPIPE_TRACE_IRQSOFF */ | ||
210 | |||
211 | #else /* !CONFIG_IPIPE */ | ||
212 | |||
213 | /* | ||
214 | * Interrupt configuring macros. | ||
215 | */ | ||
216 | #define local_irq_disable() \ | ||
217 | do { \ | ||
218 | int __tmp_dummy; \ | ||
219 | __asm__ __volatile__( \ | ||
220 | "cli %0;" \ | ||
221 | : "=d" (__tmp_dummy) \ | ||
222 | ); \ | ||
223 | } while (0) | ||
224 | |||
225 | #define local_irq_enable() \ | ||
226 | __asm__ __volatile__( \ | ||
227 | "sti %0;" \ | ||
228 | : \ | ||
229 | : "d" (bfin_irq_flags) \ | ||
230 | ) | ||
231 | |||
232 | #ifdef CONFIG_DEBUG_HWERR | ||
233 | # define __save_and_cli(x) \ | ||
234 | __asm__ __volatile__( \ | ||
235 | "cli %0;" \ | ||
236 | "sti %1;" \ | ||
237 | : "=&d" (x) \ | ||
238 | : "d" (0x3F) \ | ||
239 | ) | ||
240 | #else | ||
241 | # define __save_and_cli(x) \ | ||
242 | __asm__ __volatile__( \ | ||
243 | "cli %0;" \ | ||
244 | : "=&d" (x) \ | ||
245 | ) | ||
246 | #endif | ||
247 | |||
248 | #define local_save_flags(x) \ | ||
249 | __asm__ __volatile__( \ | ||
250 | "cli %0;" \ | ||
251 | "sti %0;" \ | ||
252 | : "=d" (x) \ | ||
253 | ) | ||
254 | |||
255 | #ifdef CONFIG_DEBUG_HWERR | ||
256 | #define irqs_enabled_from_flags(x) (((x) & ~0x3f) != 0) | ||
257 | #else | ||
258 | #define irqs_enabled_from_flags(x) ((x) != 0x1f) | ||
259 | #endif | ||
260 | |||
261 | #define local_irq_restore(x) \ | ||
262 | do { \ | ||
263 | if (irqs_enabled_from_flags(x)) \ | ||
264 | local_irq_enable(); \ | ||
265 | } while (0) | ||
266 | |||
267 | /* For spinlocks etc */ | ||
268 | #define local_irq_save(x) __save_and_cli(x) | ||
269 | |||
270 | #define irqs_disabled() \ | ||
271 | ({ \ | ||
272 | unsigned long flags; \ | ||
273 | local_save_flags(flags); \ | ||
274 | !irqs_enabled_from_flags(flags); \ | ||
275 | }) | ||
276 | |||
277 | #define local_irq_save_hw(x) local_irq_save(x) | ||
278 | #define local_irq_restore_hw(x) local_irq_restore(x) | ||
279 | #define local_irq_enable_hw() local_irq_enable() | ||
280 | #define local_irq_disable_hw() local_irq_disable() | ||
281 | #define irqs_disabled_hw() irqs_disabled() | ||
282 | 24 | ||
283 | #endif /* !CONFIG_IPIPE */ | 25 | /* Xenomai IPIPE helpers */ |
26 | #define local_irq_restore_hw(x) local_irq_restore(x) | ||
27 | #define local_irq_save_hw(x) local_irq_save(x) | ||
28 | #define local_irq_enable_hw(x) local_irq_enable(x) | ||
29 | #define local_irq_disable_hw(x) local_irq_disable(x) | ||
30 | #define irqs_disabled_hw(x) irqs_disabled(x) | ||
284 | 31 | ||
285 | #if ANOMALY_05000244 && defined(CONFIG_BFIN_ICACHE) | 32 | #if ANOMALY_05000244 && defined(CONFIG_BFIN_ICACHE) |
286 | # define NOP_PAD_ANOMALY_05000244 "nop; nop;" | 33 | # define NOP_PAD_ANOMALY_05000244 "nop; nop;" |
diff --git a/arch/blackfin/include/asm/irqflags.h b/arch/blackfin/include/asm/irqflags.h new file mode 100644 index 000000000000..139cba4651b1 --- /dev/null +++ b/arch/blackfin/include/asm/irqflags.h | |||
@@ -0,0 +1,63 @@ | |||
1 | /* | ||
2 | * interface to Blackfin CEC | ||
3 | * | ||
4 | * Copyright 2009 Analog Devices Inc. | ||
5 | * Licensed under the GPL-2 or later. | ||
6 | */ | ||
7 | |||
8 | #ifndef __ASM_BFIN_IRQFLAGS_H__ | ||
9 | #define __ASM_BFIN_IRQFLAGS_H__ | ||
10 | |||
11 | #ifdef CONFIG_SMP | ||
12 | # include <asm/pda.h> | ||
13 | # include <asm/processor.h> | ||
14 | /* Forward decl needed due to cdef inter dependencies */ | ||
15 | static inline uint32_t __pure bfin_dspid(void); | ||
16 | # define blackfin_core_id() (bfin_dspid() & 0xff) | ||
17 | # define bfin_irq_flags cpu_pda[blackfin_core_id()].imask | ||
18 | #else | ||
19 | extern unsigned long bfin_irq_flags; | ||
20 | #endif | ||
21 | |||
22 | static inline void bfin_sti(unsigned long flags) | ||
23 | { | ||
24 | asm volatile("sti %0;" : : "d" (flags)); | ||
25 | } | ||
26 | |||
27 | static inline unsigned long bfin_cli(void) | ||
28 | { | ||
29 | unsigned long flags; | ||
30 | asm volatile("cli %0;" : "=d" (flags)); | ||
31 | return flags; | ||
32 | } | ||
33 | |||
34 | static inline void raw_local_irq_disable(void) | ||
35 | { | ||
36 | bfin_cli(); | ||
37 | } | ||
38 | static inline void raw_local_irq_enable(void) | ||
39 | { | ||
40 | bfin_sti(bfin_irq_flags); | ||
41 | } | ||
42 | |||
43 | #define raw_local_save_flags(flags) do { (flags) = bfin_read_IMASK(); } while (0) | ||
44 | |||
45 | #define raw_irqs_disabled_flags(flags) (((flags) & ~0x3f) == 0) | ||
46 | |||
47 | static inline void raw_local_irq_restore(unsigned long flags) | ||
48 | { | ||
49 | if (!raw_irqs_disabled_flags(flags)) | ||
50 | raw_local_irq_enable(); | ||
51 | } | ||
52 | |||
53 | static inline unsigned long __raw_local_irq_save(void) | ||
54 | { | ||
55 | unsigned long flags = bfin_cli(); | ||
56 | #ifdef CONFIG_DEBUG_HWERR | ||
57 | bfin_sti(0x3f); | ||
58 | #endif | ||
59 | return flags; | ||
60 | } | ||
61 | #define raw_local_irq_save(flags) do { (flags) = __raw_local_irq_save(); } while (0) | ||
62 | |||
63 | #endif | ||
diff --git a/arch/blackfin/include/asm/mutex-dec.h b/arch/blackfin/include/asm/mutex-dec.h deleted file mode 100644 index 0134151656af..000000000000 --- a/arch/blackfin/include/asm/mutex-dec.h +++ /dev/null | |||
@@ -1,112 +0,0 @@ | |||
1 | /* | ||
2 | * include/asm-generic/mutex-dec.h | ||
3 | * | ||
4 | * Generic implementation of the mutex fastpath, based on atomic | ||
5 | * decrement/increment. | ||
6 | */ | ||
7 | #ifndef _ASM_GENERIC_MUTEX_DEC_H | ||
8 | #define _ASM_GENERIC_MUTEX_DEC_H | ||
9 | |||
10 | /** | ||
11 | * __mutex_fastpath_lock - try to take the lock by moving the count | ||
12 | * from 1 to a 0 value | ||
13 | * @count: pointer of type atomic_t | ||
14 | * @fail_fn: function to call if the original value was not 1 | ||
15 | * | ||
16 | * Change the count from 1 to a value lower than 1, and call <fail_fn> if | ||
17 | * it wasn't 1 originally. This function MUST leave the value lower than | ||
18 | * 1 even when the "1" assertion wasn't true. | ||
19 | */ | ||
20 | static inline void | ||
21 | __mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) | ||
22 | { | ||
23 | if (unlikely(atomic_dec_return(count) < 0)) | ||
24 | fail_fn(count); | ||
25 | else | ||
26 | smp_mb(); | ||
27 | } | ||
28 | |||
29 | /** | ||
30 | * __mutex_fastpath_lock_retval - try to take the lock by moving the count | ||
31 | * from 1 to a 0 value | ||
32 | * @count: pointer of type atomic_t | ||
33 | * @fail_fn: function to call if the original value was not 1 | ||
34 | * | ||
35 | * Change the count from 1 to a value lower than 1, and call <fail_fn> if | ||
36 | * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, | ||
37 | * or anything the slow path function returns. | ||
38 | */ | ||
39 | static inline int | ||
40 | __mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *)) | ||
41 | { | ||
42 | if (unlikely(atomic_dec_return(count) < 0)) | ||
43 | return fail_fn(count); | ||
44 | else { | ||
45 | smp_mb(); | ||
46 | return 0; | ||
47 | } | ||
48 | } | ||
49 | |||
50 | /** | ||
51 | * __mutex_fastpath_unlock - try to promote the count from 0 to 1 | ||
52 | * @count: pointer of type atomic_t | ||
53 | * @fail_fn: function to call if the original value was not 0 | ||
54 | * | ||
55 | * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>. | ||
56 | * In the failure case, this function is allowed to either set the value to | ||
57 | * 1, or to set it to a value lower than 1. | ||
58 | * | ||
59 | * If the implementation sets it to a value of lower than 1, then the | ||
60 | * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs | ||
61 | * to return 0 otherwise. | ||
62 | */ | ||
63 | static inline void | ||
64 | __mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) | ||
65 | { | ||
66 | smp_mb(); | ||
67 | if (unlikely(atomic_inc_return(count) <= 0)) | ||
68 | fail_fn(count); | ||
69 | } | ||
70 | |||
71 | #define __mutex_slowpath_needs_to_unlock() 1 | ||
72 | |||
73 | /** | ||
74 | * __mutex_fastpath_trylock - try to acquire the mutex, without waiting | ||
75 | * | ||
76 | * @count: pointer of type atomic_t | ||
77 | * @fail_fn: fallback function | ||
78 | * | ||
79 | * Change the count from 1 to a value lower than 1, and return 0 (failure) | ||
80 | * if it wasn't 1 originally, or return 1 (success) otherwise. This function | ||
81 | * MUST leave the value lower than 1 even when the "1" assertion wasn't true. | ||
82 | * Additionally, if the value was < 0 originally, this function must not leave | ||
83 | * it to 0 on failure. | ||
84 | * | ||
85 | * If the architecture has no effective trylock variant, it should call the | ||
86 | * <fail_fn> spinlock-based trylock variant unconditionally. | ||
87 | */ | ||
88 | static inline int | ||
89 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) | ||
90 | { | ||
91 | /* | ||
92 | * We have two variants here. The cmpxchg based one is the best one | ||
93 | * because it never induce a false contention state. It is included | ||
94 | * here because architectures using the inc/dec algorithms over the | ||
95 | * xchg ones are much more likely to support cmpxchg natively. | ||
96 | * | ||
97 | * If not we fall back to the spinlock based variant - that is | ||
98 | * just as efficient (and simpler) as a 'destructive' probing of | ||
99 | * the mutex state would be. | ||
100 | */ | ||
101 | #ifdef __HAVE_ARCH_CMPXCHG | ||
102 | if (likely(atomic_cmpxchg(count, 1, 0) == 1)) { | ||
103 | smp_mb(); | ||
104 | return 1; | ||
105 | } | ||
106 | return 0; | ||
107 | #else | ||
108 | return fail_fn(count); | ||
109 | #endif | ||
110 | } | ||
111 | |||
112 | #endif | ||
diff --git a/arch/blackfin/include/asm/sections.h b/arch/blackfin/include/asm/sections.h index 1443c3353a8c..e7fd0ecd73f7 100644 --- a/arch/blackfin/include/asm/sections.h +++ b/arch/blackfin/include/asm/sections.h | |||
@@ -4,4 +4,15 @@ | |||
4 | /* nothing to see, move along */ | 4 | /* nothing to see, move along */ |
5 | #include <asm-generic/sections.h> | 5 | #include <asm-generic/sections.h> |
6 | 6 | ||
7 | /* only used when MTD_UCLINUX */ | ||
8 | extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size; | ||
9 | |||
10 | extern unsigned long _ramstart, _ramend, _rambase; | ||
11 | extern unsigned long memory_start, memory_end, physical_mem_end; | ||
12 | |||
13 | extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[], | ||
14 | _ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _sbss_b_l1[], _ebss_b_l1[], | ||
15 | _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[], | ||
16 | _ebss_l2[], _l2_lma_start[]; | ||
17 | |||
7 | #endif | 18 | #endif |
diff --git a/arch/blackfin/include/asm/system.h b/arch/blackfin/include/asm/system.h index a4c8254bec55..294dbda24164 100644 --- a/arch/blackfin/include/asm/system.h +++ b/arch/blackfin/include/asm/system.h | |||
@@ -35,10 +35,10 @@ | |||
35 | #define _BLACKFIN_SYSTEM_H | 35 | #define _BLACKFIN_SYSTEM_H |
36 | 36 | ||
37 | #include <linux/linkage.h> | 37 | #include <linux/linkage.h> |
38 | #include <linux/compiler.h> | 38 | #include <linux/irqflags.h> |
39 | #include <mach/anomaly.h> | 39 | #include <mach/anomaly.h> |
40 | #include <asm/cache.h> | ||
40 | #include <asm/pda.h> | 41 | #include <asm/pda.h> |
41 | #include <asm/processor.h> | ||
42 | #include <asm/irq.h> | 42 | #include <asm/irq.h> |
43 | 43 | ||
44 | /* | 44 | /* |
diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h index cf5066d3efd2..da35133c171d 100644 --- a/arch/blackfin/include/asm/unistd.h +++ b/arch/blackfin/include/asm/unistd.h | |||
@@ -380,8 +380,9 @@ | |||
380 | #define __NR_inotify_init1 365 | 380 | #define __NR_inotify_init1 365 |
381 | #define __NR_preadv 366 | 381 | #define __NR_preadv 366 |
382 | #define __NR_pwritev 367 | 382 | #define __NR_pwritev 367 |
383 | #define __NR_rt_tgsigqueueinfo 368 | ||
383 | 384 | ||
384 | #define __NR_syscall 368 | 385 | #define __NR_syscall 369 |
385 | #define NR_syscalls __NR_syscall | 386 | #define NR_syscalls __NR_syscall |
386 | 387 | ||
387 | /* Old optional stuff no one actually uses */ | 388 | /* Old optional stuff no one actually uses */ |
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile index fd4d4328a0f2..3731088e181b 100644 --- a/arch/blackfin/kernel/Makefile +++ b/arch/blackfin/kernel/Makefile | |||
@@ -15,6 +15,10 @@ else | |||
15 | obj-y += time.o | 15 | obj-y += time.o |
16 | endif | 16 | endif |
17 | 17 | ||
18 | obj-$(CONFIG_FUNCTION_TRACER) += ftrace-entry.o | ||
19 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | ||
20 | CFLAGS_REMOVE_ftrace.o = -pg | ||
21 | |||
18 | obj-$(CONFIG_IPIPE) += ipipe.o | 22 | obj-$(CONFIG_IPIPE) += ipipe.o |
19 | obj-$(CONFIG_IPIPE_TRACE_MCOUNT) += mcount.o | 23 | obj-$(CONFIG_IPIPE_TRACE_MCOUNT) += mcount.o |
20 | obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o | 24 | obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o |
@@ -23,6 +27,7 @@ obj-$(CONFIG_MODULES) += module.o | |||
23 | obj-$(CONFIG_KGDB) += kgdb.o | 27 | obj-$(CONFIG_KGDB) += kgdb.o |
24 | obj-$(CONFIG_KGDB_TESTS) += kgdb_test.o | 28 | obj-$(CONFIG_KGDB_TESTS) += kgdb_test.o |
25 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | 29 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o |
30 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | ||
26 | 31 | ||
27 | # the kgdb test puts code into L2 and without linker | 32 | # the kgdb test puts code into L2 and without linker |
28 | # relaxation, we need to force long calls to/from it | 33 | # relaxation, we need to force long calls to/from it |
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c index 763ed84ba459..e0bf8cc06907 100644 --- a/arch/blackfin/kernel/bfin_dma_5xx.c +++ b/arch/blackfin/kernel/bfin_dma_5xx.c | |||
@@ -453,10 +453,10 @@ void *dma_memcpy(void *pdst, const void *psrc, size_t size) | |||
453 | unsigned long src = (unsigned long)psrc; | 453 | unsigned long src = (unsigned long)psrc; |
454 | size_t bulk, rest; | 454 | size_t bulk, rest; |
455 | 455 | ||
456 | if (bfin_addr_dcachable(src)) | 456 | if (bfin_addr_dcacheable(src)) |
457 | blackfin_dcache_flush_range(src, src + size); | 457 | blackfin_dcache_flush_range(src, src + size); |
458 | 458 | ||
459 | if (bfin_addr_dcachable(dst)) | 459 | if (bfin_addr_dcacheable(dst)) |
460 | blackfin_dcache_invalidate_range(dst, dst + size); | 460 | blackfin_dcache_invalidate_range(dst, dst + size); |
461 | 461 | ||
462 | bulk = size & ~0xffff; | 462 | bulk = size & ~0xffff; |
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c index 53e893ff708a..aa05e638fb7c 100644 --- a/arch/blackfin/kernel/bfin_ksyms.c +++ b/arch/blackfin/kernel/bfin_ksyms.c | |||
@@ -103,3 +103,8 @@ EXPORT_SYMBOL(__raw_smp_mark_barrier_asm); | |||
103 | EXPORT_SYMBOL(__raw_smp_check_barrier_asm); | 103 | EXPORT_SYMBOL(__raw_smp_check_barrier_asm); |
104 | #endif | 104 | #endif |
105 | #endif | 105 | #endif |
106 | |||
107 | #ifdef CONFIG_FUNCTION_TRACER | ||
108 | extern void _mcount(void); | ||
109 | EXPORT_SYMBOL(_mcount); | ||
110 | #endif | ||
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c index 87463ce87f5a..784923e52a9a 100644 --- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c +++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c | |||
@@ -151,7 +151,7 @@ static noinline int dcplb_miss(unsigned int cpu) | |||
151 | 151 | ||
152 | d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB; | 152 | d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB; |
153 | #ifdef CONFIG_BFIN_DCACHE | 153 | #ifdef CONFIG_BFIN_DCACHE |
154 | if (bfin_addr_dcachable(addr)) { | 154 | if (bfin_addr_dcacheable(addr)) { |
155 | d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND; | 155 | d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND; |
156 | #ifdef CONFIG_BFIN_WT | 156 | #ifdef CONFIG_BFIN_WT |
157 | d_data |= CPLB_L1_AOW | CPLB_WT; | 157 | d_data |= CPLB_L1_AOW | CPLB_WT; |
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c index 8cbb47c7b663..12b030842fdb 100644 --- a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c +++ b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/cplbinit.h> | 28 | #include <asm/cplbinit.h> |
29 | #include <asm/cplb.h> | 29 | #include <asm/cplb.h> |
30 | #include <asm/mmu_context.h> | 30 | #include <asm/mmu_context.h> |
31 | #include <asm/traps.h> | ||
31 | 32 | ||
32 | /* | 33 | /* |
33 | * WARNING | 34 | * WARNING |
@@ -100,28 +101,6 @@ static inline void write_icplb_data(int cpu, int idx, unsigned long data, | |||
100 | #endif | 101 | #endif |
101 | } | 102 | } |
102 | 103 | ||
103 | /* | ||
104 | * Given the contents of the status register, return the index of the | ||
105 | * CPLB that caused the fault. | ||
106 | */ | ||
107 | static inline int faulting_cplb_index(int status) | ||
108 | { | ||
109 | int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF); | ||
110 | return 30 - signbits; | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * Given the contents of the status register and the DCPLB_DATA contents, | ||
115 | * return true if a write access should be permitted. | ||
116 | */ | ||
117 | static inline int write_permitted(int status, unsigned long data) | ||
118 | { | ||
119 | if (status & FAULT_USERSUPV) | ||
120 | return !!(data & CPLB_SUPV_WR); | ||
121 | else | ||
122 | return !!(data & CPLB_USER_WR); | ||
123 | } | ||
124 | |||
125 | /* Counters to implement round-robin replacement. */ | 104 | /* Counters to implement round-robin replacement. */ |
126 | static int icplb_rr_index[NR_CPUS] PDT_ATTR; | 105 | static int icplb_rr_index[NR_CPUS] PDT_ATTR; |
127 | static int dcplb_rr_index[NR_CPUS] PDT_ATTR; | 106 | static int dcplb_rr_index[NR_CPUS] PDT_ATTR; |
@@ -245,43 +224,16 @@ MGR_ATTR static int dcplb_miss(int cpu) | |||
245 | return CPLB_RELOADED; | 224 | return CPLB_RELOADED; |
246 | } | 225 | } |
247 | 226 | ||
248 | MGR_ATTR static noinline int dcplb_protection_fault(int cpu) | ||
249 | { | ||
250 | int status = bfin_read_DCPLB_STATUS(); | ||
251 | |||
252 | nr_dcplb_prot[cpu]++; | ||
253 | |||
254 | if (likely(status & FAULT_RW)) { | ||
255 | int idx = faulting_cplb_index(status); | ||
256 | unsigned long regaddr = DCPLB_DATA0 + idx * 4; | ||
257 | unsigned long data = bfin_read32(regaddr); | ||
258 | |||
259 | /* Check if fault is to dirty a clean page */ | ||
260 | if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) && | ||
261 | write_permitted(status, data)) { | ||
262 | |||
263 | dcplb_tbl[cpu][idx].data = data; | ||
264 | bfin_write32(regaddr, data); | ||
265 | return CPLB_RELOADED; | ||
266 | } | ||
267 | } | ||
268 | |||
269 | return CPLB_PROT_VIOL; | ||
270 | } | ||
271 | |||
272 | MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs) | 227 | MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs) |
273 | { | 228 | { |
274 | int cause = seqstat & 0x3f; | 229 | int cause = seqstat & 0x3f; |
275 | unsigned int cpu = smp_processor_id(); | 230 | unsigned int cpu = smp_processor_id(); |
276 | switch (cause) { | 231 | switch (cause) { |
277 | case 0x2C: | 232 | case VEC_CPLB_I_M: |
278 | return icplb_miss(cpu); | 233 | return icplb_miss(cpu); |
279 | case 0x26: | 234 | case VEC_CPLB_M: |
280 | return dcplb_miss(cpu); | 235 | return dcplb_miss(cpu); |
281 | default: | 236 | default: |
282 | if (unlikely(cause == 0x23)) | ||
283 | return dcplb_protection_fault(cpu); | ||
284 | |||
285 | return CPLB_UNKNOWN_ERR; | 237 | return CPLB_UNKNOWN_ERR; |
286 | } | 238 | } |
287 | } | 239 | } |
diff --git a/arch/blackfin/kernel/early_printk.c b/arch/blackfin/kernel/early_printk.c index 3302719173ca..2ab56811841c 100644 --- a/arch/blackfin/kernel/early_printk.c +++ b/arch/blackfin/kernel/early_printk.c | |||
@@ -202,11 +202,15 @@ asmlinkage void __init init_early_exception_vectors(void) | |||
202 | asmlinkage void __init early_trap_c(struct pt_regs *fp, void *retaddr) | 202 | asmlinkage void __init early_trap_c(struct pt_regs *fp, void *retaddr) |
203 | { | 203 | { |
204 | /* This can happen before the uart is initialized, so initialize | 204 | /* This can happen before the uart is initialized, so initialize |
205 | * the UART now | 205 | * the UART now (but only if we are running on the processor we think |
206 | * we are compiled for - otherwise we write to MMRs that don't exist, | ||
207 | * and cause other problems. Nothing comes out the UART, but it does | ||
208 | * end up in the __buf_log. | ||
206 | */ | 209 | */ |
207 | if (likely(early_console == NULL)) | 210 | if (likely(early_console == NULL) && CPUID == bfin_cpuid()) |
208 | setup_early_printk(DEFAULT_EARLY_PORT); | 211 | setup_early_printk(DEFAULT_EARLY_PORT); |
209 | 212 | ||
213 | printk(KERN_EMERG "Early panic\n"); | ||
210 | dump_bfin_mem(fp); | 214 | dump_bfin_mem(fp); |
211 | show_regs(fp); | 215 | show_regs(fp); |
212 | dump_bfin_trace_buffer(); | 216 | dump_bfin_trace_buffer(); |
diff --git a/arch/blackfin/kernel/ftrace-entry.S b/arch/blackfin/kernel/ftrace-entry.S new file mode 100644 index 000000000000..6980b7a0615d --- /dev/null +++ b/arch/blackfin/kernel/ftrace-entry.S | |||
@@ -0,0 +1,140 @@ | |||
1 | /* | ||
2 | * mcount and friends -- ftrace stuff | ||
3 | * | ||
4 | * Copyright (C) 2009 Analog Devices Inc. | ||
5 | * Licensed under the GPL-2 or later. | ||
6 | */ | ||
7 | |||
8 | #include <linux/linkage.h> | ||
9 | #include <asm/ftrace.h> | ||
10 | |||
11 | .text | ||
12 | |||
13 | /* GCC will have called us before setting up the function prologue, so we | ||
14 | * can clobber the normal scratch registers, but we need to make sure to | ||
15 | * save/restore the registers used for argument passing (R0-R2) in case | ||
16 | * the profiled function is using them. With data registers, R3 is the | ||
17 | * only one we can blow away. With pointer registers, we have P0-P2. | ||
18 | * | ||
19 | * Upon entry, the RETS will point to the top of the current profiled | ||
20 | * function. And since GCC setup the frame for us, the previous function | ||
21 | * will be waiting there. mmmm pie. | ||
22 | */ | ||
23 | ENTRY(__mcount) | ||
24 | /* save third function arg early so we can do testing below */ | ||
25 | [--sp] = r2; | ||
26 | |||
27 | /* load the function pointer to the tracer */ | ||
28 | p0.l = _ftrace_trace_function; | ||
29 | p0.h = _ftrace_trace_function; | ||
30 | r3 = [p0]; | ||
31 | |||
32 | /* optional micro optimization: don't call the stub tracer */ | ||
33 | r2.l = _ftrace_stub; | ||
34 | r2.h = _ftrace_stub; | ||
35 | cc = r2 == r3; | ||
36 | if ! cc jump .Ldo_trace; | ||
37 | |||
38 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
39 | /* if the ftrace_graph_return function pointer is not set to | ||
40 | * the ftrace_stub entry, call prepare_ftrace_return(). | ||
41 | */ | ||
42 | p0.l = _ftrace_graph_return; | ||
43 | p0.h = _ftrace_graph_return; | ||
44 | r3 = [p0]; | ||
45 | cc = r2 == r3; | ||
46 | if ! cc jump _ftrace_graph_caller; | ||
47 | |||
48 | /* similarly, if the ftrace_graph_entry function pointer is not | ||
49 | * set to the ftrace_graph_entry_stub entry, ... | ||
50 | */ | ||
51 | p0.l = _ftrace_graph_entry; | ||
52 | p0.h = _ftrace_graph_entry; | ||
53 | r2.l = _ftrace_graph_entry_stub; | ||
54 | r2.h = _ftrace_graph_entry_stub; | ||
55 | r3 = [p0]; | ||
56 | cc = r2 == r3; | ||
57 | if ! cc jump _ftrace_graph_caller; | ||
58 | #endif | ||
59 | |||
60 | r2 = [sp++]; | ||
61 | rts; | ||
62 | |||
63 | .Ldo_trace: | ||
64 | |||
65 | /* save first/second function arg and the return register */ | ||
66 | [--sp] = r0; | ||
67 | [--sp] = r1; | ||
68 | [--sp] = rets; | ||
69 | |||
70 | /* setup the tracer function */ | ||
71 | p0 = r3; | ||
72 | |||
73 | /* tracer(ulong frompc, ulong selfpc): | ||
74 | * frompc: the pc that did the call to ... | ||
75 | * selfpc: ... this location | ||
76 | * the selfpc itself will need adjusting for the mcount call | ||
77 | */ | ||
78 | r1 = rets; | ||
79 | r0 = [fp + 4]; | ||
80 | r1 += -MCOUNT_INSN_SIZE; | ||
81 | |||
82 | /* call the tracer */ | ||
83 | call (p0); | ||
84 | |||
85 | /* restore state and get out of dodge */ | ||
86 | .Lfinish_trace: | ||
87 | rets = [sp++]; | ||
88 | r1 = [sp++]; | ||
89 | r0 = [sp++]; | ||
90 | r2 = [sp++]; | ||
91 | |||
92 | .globl _ftrace_stub | ||
93 | _ftrace_stub: | ||
94 | rts; | ||
95 | ENDPROC(__mcount) | ||
96 | |||
97 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
98 | /* The prepare_ftrace_return() function is similar to the trace function | ||
99 | * except it takes a pointer to the location of the frompc. This is so | ||
100 | * the prepare_ftrace_return() can hijack it temporarily for probing | ||
101 | * purposes. | ||
102 | */ | ||
103 | ENTRY(_ftrace_graph_caller) | ||
104 | /* save first/second function arg and the return register */ | ||
105 | [--sp] = r0; | ||
106 | [--sp] = r1; | ||
107 | [--sp] = rets; | ||
108 | |||
109 | r0 = fp; | ||
110 | r1 = rets; | ||
111 | r0 += 4; | ||
112 | r1 += -MCOUNT_INSN_SIZE; | ||
113 | call _prepare_ftrace_return; | ||
114 | |||
115 | jump .Lfinish_trace; | ||
116 | ENDPROC(_ftrace_graph_caller) | ||
117 | |||
118 | /* Undo the rewrite caused by ftrace_graph_caller(). The common function | ||
119 | * ftrace_return_to_handler() will return the original rets so we can | ||
120 | * restore it and be on our way. | ||
121 | */ | ||
122 | ENTRY(_return_to_handler) | ||
123 | /* make sure original return values are saved */ | ||
124 | [--sp] = p0; | ||
125 | [--sp] = r0; | ||
126 | [--sp] = r1; | ||
127 | |||
128 | /* get original return address */ | ||
129 | call _ftrace_return_to_handler; | ||
130 | rets = r0; | ||
131 | |||
132 | /* anomaly 05000371 - make sure we have at least three instructions | ||
133 | * between rets setting and the return | ||
134 | */ | ||
135 | r1 = [sp++]; | ||
136 | r0 = [sp++]; | ||
137 | p0 = [sp++]; | ||
138 | rts; | ||
139 | ENDPROC(_return_to_handler) | ||
140 | #endif | ||
diff --git a/arch/blackfin/kernel/ftrace.c b/arch/blackfin/kernel/ftrace.c new file mode 100644 index 000000000000..905bfc40a00b --- /dev/null +++ b/arch/blackfin/kernel/ftrace.c | |||
@@ -0,0 +1,42 @@ | |||
1 | /* | ||
2 | * ftrace graph code | ||
3 | * | ||
4 | * Copyright (C) 2009 Analog Devices Inc. | ||
5 | * Licensed under the GPL-2 or later. | ||
6 | */ | ||
7 | |||
8 | #include <linux/ftrace.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <asm/atomic.h> | ||
12 | |||
13 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
14 | |||
15 | /* | ||
16 | * Hook the return address and push it in the stack of return addrs | ||
17 | * in current thread info. | ||
18 | */ | ||
19 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | ||
20 | { | ||
21 | struct ftrace_graph_ent trace; | ||
22 | unsigned long return_hooker = (unsigned long)&return_to_handler; | ||
23 | |||
24 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | ||
25 | return; | ||
26 | |||
27 | if (ftrace_push_return_trace(*parent, self_addr, &trace.depth) == -EBUSY) | ||
28 | return; | ||
29 | |||
30 | trace.func = self_addr; | ||
31 | |||
32 | /* Only trace if the calling function expects to */ | ||
33 | if (!ftrace_graph_entry(&trace)) { | ||
34 | current->curr_ret_stack--; | ||
35 | return; | ||
36 | } | ||
37 | |||
38 | /* all is well in the world ! hijack RETS ... */ | ||
39 | *parent = return_hooker; | ||
40 | } | ||
41 | |||
42 | #endif | ||
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c index 5fc424803a17..d8cde1fc5cb9 100644 --- a/arch/blackfin/kernel/ipipe.c +++ b/arch/blackfin/kernel/ipipe.c | |||
@@ -99,7 +99,7 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs) | |||
99 | * interrupt. | 99 | * interrupt. |
100 | */ | 100 | */ |
101 | m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR); | 101 | m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR); |
102 | this_domain = ipipe_current_domain; | 102 | this_domain = __ipipe_current_domain; |
103 | 103 | ||
104 | if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))) | 104 | if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))) |
105 | head = &this_domain->p_link; | 105 | head = &this_domain->p_link; |
@@ -212,7 +212,9 @@ void __ipipe_unstall_root_raw(void) | |||
212 | 212 | ||
213 | int __ipipe_syscall_root(struct pt_regs *regs) | 213 | int __ipipe_syscall_root(struct pt_regs *regs) |
214 | { | 214 | { |
215 | struct ipipe_percpu_domain_data *p; | ||
215 | unsigned long flags; | 216 | unsigned long flags; |
217 | int ret; | ||
216 | 218 | ||
217 | /* | 219 | /* |
218 | * We need to run the IRQ tail hook whenever we don't | 220 | * We need to run the IRQ tail hook whenever we don't |
@@ -231,29 +233,31 @@ int __ipipe_syscall_root(struct pt_regs *regs) | |||
231 | /* | 233 | /* |
232 | * This routine either returns: | 234 | * This routine either returns: |
233 | * 0 -- if the syscall is to be passed to Linux; | 235 | * 0 -- if the syscall is to be passed to Linux; |
234 | * 1 -- if the syscall should not be passed to Linux, and no | 236 | * >0 -- if the syscall should not be passed to Linux, and no |
235 | * tail work should be performed; | 237 | * tail work should be performed; |
236 | * -1 -- if the syscall should not be passed to Linux but the | 238 | * <0 -- if the syscall should not be passed to Linux but the |
237 | * tail work has to be performed (for handling signals etc). | 239 | * tail work has to be performed (for handling signals etc). |
238 | */ | 240 | */ |
239 | 241 | ||
240 | if (__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL) && | 242 | if (!__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL)) |
241 | __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs) > 0) { | 243 | return 0; |
242 | if (ipipe_root_domain_p && !in_atomic()) { | 244 | |
243 | /* | 245 | ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs); |
244 | * Sync pending VIRQs before _TIF_NEED_RESCHED | 246 | |
245 | * is tested. | 247 | local_irq_save_hw(flags); |
246 | */ | 248 | |
247 | local_irq_save_hw(flags); | 249 | if (!__ipipe_root_domain_p) { |
248 | if ((ipipe_root_cpudom_var(irqpend_himask) & IPIPE_IRQMASK_VIRT) != 0) | 250 | local_irq_restore_hw(flags); |
249 | __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT); | ||
250 | local_irq_restore_hw(flags); | ||
251 | return -1; | ||
252 | } | ||
253 | return 1; | 251 | return 1; |
254 | } | 252 | } |
255 | 253 | ||
256 | return 0; | 254 | p = ipipe_root_cpudom_ptr(); |
255 | if ((p->irqpend_himask & IPIPE_IRQMASK_VIRT) != 0) | ||
256 | __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT); | ||
257 | |||
258 | local_irq_restore_hw(flags); | ||
259 | |||
260 | return -ret; | ||
257 | } | 261 | } |
258 | 262 | ||
259 | unsigned long ipipe_critical_enter(void (*syncfn) (void)) | 263 | unsigned long ipipe_critical_enter(void (*syncfn) (void)) |
@@ -329,9 +333,7 @@ asmlinkage void __ipipe_sync_root(void) | |||
329 | 333 | ||
330 | void ___ipipe_sync_pipeline(unsigned long syncmask) | 334 | void ___ipipe_sync_pipeline(unsigned long syncmask) |
331 | { | 335 | { |
332 | struct ipipe_domain *ipd = ipipe_current_domain; | 336 | if (__ipipe_root_domain_p) { |
333 | |||
334 | if (ipd == ipipe_root_domain) { | ||
335 | if (test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status))) | 337 | if (test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status))) |
336 | return; | 338 | return; |
337 | } | 339 | } |
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c index 80447f99c2b5..6454babdfaff 100644 --- a/arch/blackfin/kernel/setup.c +++ b/arch/blackfin/kernel/setup.c | |||
@@ -1098,7 +1098,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
1098 | CPUID, bfin_cpuid()); | 1098 | CPUID, bfin_cpuid()); |
1099 | 1099 | ||
1100 | seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n" | 1100 | seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n" |
1101 | "stepping\t: %d\n", | 1101 | "stepping\t: %d ", |
1102 | cpu, cclk/1000000, sclk/1000000, | 1102 | cpu, cclk/1000000, sclk/1000000, |
1103 | #ifdef CONFIG_MPU | 1103 | #ifdef CONFIG_MPU |
1104 | "mpu on", | 1104 | "mpu on", |
@@ -1107,7 +1107,16 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
1107 | #endif | 1107 | #endif |
1108 | revid); | 1108 | revid); |
1109 | 1109 | ||
1110 | seq_printf(m, "cpu MHz\t\t: %lu.%03lu/%lu.%03lu\n", | 1110 | if (bfin_revid() != bfin_compiled_revid()) { |
1111 | if (bfin_compiled_revid() == -1) | ||
1112 | seq_printf(m, "(Compiled for Rev none)"); | ||
1113 | else if (bfin_compiled_revid() == 0xffff) | ||
1114 | seq_printf(m, "(Compiled for Rev any)"); | ||
1115 | else | ||
1116 | seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid()); | ||
1117 | } | ||
1118 | |||
1119 | seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n", | ||
1111 | cclk/1000000, cclk%1000000, | 1120 | cclk/1000000, cclk%1000000, |
1112 | sclk/1000000, sclk%1000000); | 1121 | sclk/1000000, sclk%1000000); |
1113 | seq_printf(m, "bogomips\t: %lu.%02lu\n" | 1122 | seq_printf(m, "bogomips\t: %lu.%02lu\n" |
@@ -1172,6 +1181,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
1172 | #ifdef __ARCH_SYNC_CORE_DCACHE | 1181 | #ifdef __ARCH_SYNC_CORE_DCACHE |
1173 | seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count); | 1182 | seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count); |
1174 | #endif | 1183 | #endif |
1184 | #ifdef __ARCH_SYNC_CORE_ICACHE | ||
1185 | seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", cpudata->icache_invld_count); | ||
1186 | #endif | ||
1175 | #ifdef CONFIG_BFIN_ICACHE_LOCK | 1187 | #ifdef CONFIG_BFIN_ICACHE_LOCK |
1176 | switch ((cpudata->imemctl >> 3) & WAYALL_L) { | 1188 | switch ((cpudata->imemctl >> 3) & WAYALL_L) { |
1177 | case WAY0_L: | 1189 | case WAY0_L: |
diff --git a/arch/blackfin/kernel/stacktrace.c b/arch/blackfin/kernel/stacktrace.c new file mode 100644 index 000000000000..30301e1eace5 --- /dev/null +++ b/arch/blackfin/kernel/stacktrace.c | |||
@@ -0,0 +1,53 @@ | |||
1 | /* | ||
2 | * Blackfin stacktrace code (mostly copied from avr32) | ||
3 | * | ||
4 | * Copyright 2009 Analog Devices Inc. | ||
5 | * Licensed under the GPL-2 or later. | ||
6 | */ | ||
7 | |||
8 | #include <linux/sched.h> | ||
9 | #include <linux/stacktrace.h> | ||
10 | #include <linux/thread_info.h> | ||
11 | #include <linux/module.h> | ||
12 | |||
13 | register unsigned long current_frame_pointer asm("FP"); | ||
14 | |||
15 | struct stackframe { | ||
16 | unsigned long fp; | ||
17 | unsigned long rets; | ||
18 | }; | ||
19 | |||
20 | /* | ||
21 | * Save stack-backtrace addresses into a stack_trace buffer. | ||
22 | */ | ||
23 | void save_stack_trace(struct stack_trace *trace) | ||
24 | { | ||
25 | unsigned long low, high; | ||
26 | unsigned long fp; | ||
27 | struct stackframe *frame; | ||
28 | int skip = trace->skip; | ||
29 | |||
30 | low = (unsigned long)task_stack_page(current); | ||
31 | high = low + THREAD_SIZE; | ||
32 | fp = current_frame_pointer; | ||
33 | |||
34 | while (fp >= low && fp <= (high - sizeof(*frame))) { | ||
35 | frame = (struct stackframe *)fp; | ||
36 | |||
37 | if (skip) { | ||
38 | skip--; | ||
39 | } else { | ||
40 | trace->entries[trace->nr_entries++] = frame->rets; | ||
41 | if (trace->nr_entries >= trace->max_entries) | ||
42 | break; | ||
43 | } | ||
44 | |||
45 | /* | ||
46 | * The next frame must be at a higher address than the | ||
47 | * current frame. | ||
48 | */ | ||
49 | low = fp + sizeof(*frame); | ||
50 | fp = frame->fp; | ||
51 | } | ||
52 | } | ||
53 | EXPORT_SYMBOL_GPL(save_stack_trace); | ||
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c index aa76dfb0226e..d279552fe9b0 100644 --- a/arch/blackfin/kernel/traps.c +++ b/arch/blackfin/kernel/traps.c | |||
@@ -27,6 +27,7 @@ | |||
27 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 27 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
28 | */ | 28 | */ |
29 | 29 | ||
30 | #include <linux/bug.h> | ||
30 | #include <linux/uaccess.h> | 31 | #include <linux/uaccess.h> |
31 | #include <linux/interrupt.h> | 32 | #include <linux/interrupt.h> |
32 | #include <linux/module.h> | 33 | #include <linux/module.h> |
@@ -238,6 +239,11 @@ asmlinkage void double_fault_c(struct pt_regs *fp) | |||
238 | 239 | ||
239 | } | 240 | } |
240 | 241 | ||
242 | static int kernel_mode_regs(struct pt_regs *regs) | ||
243 | { | ||
244 | return regs->ipend & 0xffc0; | ||
245 | } | ||
246 | |||
241 | asmlinkage void trap_c(struct pt_regs *fp) | 247 | asmlinkage void trap_c(struct pt_regs *fp) |
242 | { | 248 | { |
243 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON | 249 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON |
@@ -246,6 +252,7 @@ asmlinkage void trap_c(struct pt_regs *fp) | |||
246 | #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO | 252 | #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO |
247 | unsigned int cpu = smp_processor_id(); | 253 | unsigned int cpu = smp_processor_id(); |
248 | #endif | 254 | #endif |
255 | const char *strerror = NULL; | ||
249 | int sig = 0; | 256 | int sig = 0; |
250 | siginfo_t info; | 257 | siginfo_t info; |
251 | unsigned long trapnr = fp->seqstat & SEQSTAT_EXCAUSE; | 258 | unsigned long trapnr = fp->seqstat & SEQSTAT_EXCAUSE; |
@@ -259,27 +266,10 @@ asmlinkage void trap_c(struct pt_regs *fp) | |||
259 | * double faults if the stack has become corrupt | 266 | * double faults if the stack has become corrupt |
260 | */ | 267 | */ |
261 | 268 | ||
262 | /* If the fault was caused by a kernel thread, or interrupt handler | 269 | #ifndef CONFIG_KGDB |
263 | * we will kernel panic, so the system reboots. | 270 | /* IPEND is skipped if KGDB isn't enabled (see entry code) */ |
264 | * If KGDB is enabled, don't set this for kernel breakpoints | 271 | fp->ipend = bfin_read_IPEND(); |
265 | */ | ||
266 | |||
267 | /* TODO: check to see if we are in some sort of deferred HWERR | ||
268 | * that we should be able to recover from, not kernel panic | ||
269 | */ | ||
270 | if ((bfin_read_IPEND() & 0xFFC0) && (trapnr != VEC_STEP) | ||
271 | #ifdef CONFIG_KGDB | ||
272 | && (trapnr != VEC_EXCPT02) | ||
273 | #endif | 272 | #endif |
274 | ){ | ||
275 | console_verbose(); | ||
276 | oops_in_progress = 1; | ||
277 | } else if (current) { | ||
278 | if (current->mm == NULL) { | ||
279 | console_verbose(); | ||
280 | oops_in_progress = 1; | ||
281 | } | ||
282 | } | ||
283 | 273 | ||
284 | /* trap_c() will be called for exceptions. During exceptions | 274 | /* trap_c() will be called for exceptions. During exceptions |
285 | * processing, the pc value should be set with retx value. | 275 | * processing, the pc value should be set with retx value. |
@@ -307,15 +297,15 @@ asmlinkage void trap_c(struct pt_regs *fp) | |||
307 | sig = SIGTRAP; | 297 | sig = SIGTRAP; |
308 | CHK_DEBUGGER_TRAP_MAYBE(); | 298 | CHK_DEBUGGER_TRAP_MAYBE(); |
309 | /* Check if this is a breakpoint in kernel space */ | 299 | /* Check if this is a breakpoint in kernel space */ |
310 | if (fp->ipend & 0xffc0) | 300 | if (kernel_mode_regs(fp)) |
311 | return; | 301 | goto traps_done; |
312 | else | 302 | else |
313 | break; | 303 | break; |
314 | /* 0x03 - User Defined, userspace stack overflow */ | 304 | /* 0x03 - User Defined, userspace stack overflow */ |
315 | case VEC_EXCPT03: | 305 | case VEC_EXCPT03: |
316 | info.si_code = SEGV_STACKFLOW; | 306 | info.si_code = SEGV_STACKFLOW; |
317 | sig = SIGSEGV; | 307 | sig = SIGSEGV; |
318 | verbose_printk(KERN_NOTICE EXC_0x03(KERN_NOTICE)); | 308 | strerror = KERN_NOTICE EXC_0x03(KERN_NOTICE); |
319 | CHK_DEBUGGER_TRAP_MAYBE(); | 309 | CHK_DEBUGGER_TRAP_MAYBE(); |
320 | break; | 310 | break; |
321 | /* 0x02 - KGDB initial connection and break signal trap */ | 311 | /* 0x02 - KGDB initial connection and break signal trap */ |
@@ -324,7 +314,7 @@ asmlinkage void trap_c(struct pt_regs *fp) | |||
324 | info.si_code = TRAP_ILLTRAP; | 314 | info.si_code = TRAP_ILLTRAP; |
325 | sig = SIGTRAP; | 315 | sig = SIGTRAP; |
326 | CHK_DEBUGGER_TRAP(); | 316 | CHK_DEBUGGER_TRAP(); |
327 | return; | 317 | goto traps_done; |
328 | #endif | 318 | #endif |
329 | /* 0x04 - User Defined */ | 319 | /* 0x04 - User Defined */ |
330 | /* 0x05 - User Defined */ | 320 | /* 0x05 - User Defined */ |
@@ -344,7 +334,7 @@ asmlinkage void trap_c(struct pt_regs *fp) | |||
344 | case VEC_EXCPT04 ... VEC_EXCPT15: | 334 | case VEC_EXCPT04 ... VEC_EXCPT15: |
345 | info.si_code = ILL_ILLPARAOP; | 335 | info.si_code = ILL_ILLPARAOP; |
346 | sig = SIGILL; | 336 | sig = SIGILL; |
347 | verbose_printk(KERN_NOTICE EXC_0x04(KERN_NOTICE)); | 337 | strerror = KERN_NOTICE EXC_0x04(KERN_NOTICE); |
348 | CHK_DEBUGGER_TRAP_MAYBE(); | 338 | CHK_DEBUGGER_TRAP_MAYBE(); |
349 | break; | 339 | break; |
350 | /* 0x10 HW Single step, handled here */ | 340 | /* 0x10 HW Single step, handled here */ |
@@ -353,15 +343,15 @@ asmlinkage void trap_c(struct pt_regs *fp) | |||
353 | sig = SIGTRAP; | 343 | sig = SIGTRAP; |
354 | CHK_DEBUGGER_TRAP_MAYBE(); | 344 | CHK_DEBUGGER_TRAP_MAYBE(); |
355 | /* Check if this is a single step in kernel space */ | 345 | /* Check if this is a single step in kernel space */ |
356 | if (fp->ipend & 0xffc0) | 346 | if (kernel_mode_regs(fp)) |
357 | return; | 347 | goto traps_done; |
358 | else | 348 | else |
359 | break; | 349 | break; |
360 | /* 0x11 - Trace Buffer Full, handled here */ | 350 | /* 0x11 - Trace Buffer Full, handled here */ |
361 | case VEC_OVFLOW: | 351 | case VEC_OVFLOW: |
362 | info.si_code = TRAP_TRACEFLOW; | 352 | info.si_code = TRAP_TRACEFLOW; |
363 | sig = SIGTRAP; | 353 | sig = SIGTRAP; |
364 | verbose_printk(KERN_NOTICE EXC_0x11(KERN_NOTICE)); | 354 | strerror = KERN_NOTICE EXC_0x11(KERN_NOTICE); |
365 | CHK_DEBUGGER_TRAP_MAYBE(); | 355 | CHK_DEBUGGER_TRAP_MAYBE(); |
366 | break; | 356 | break; |
367 | /* 0x12 - Reserved, Caught by default */ | 357 | /* 0x12 - Reserved, Caught by default */ |
@@ -381,37 +371,54 @@ asmlinkage void trap_c(struct pt_regs *fp) | |||
381 | /* 0x20 - Reserved, Caught by default */ | 371 | /* 0x20 - Reserved, Caught by default */ |
382 | /* 0x21 - Undefined Instruction, handled here */ | 372 | /* 0x21 - Undefined Instruction, handled here */ |
383 | case VEC_UNDEF_I: | 373 | case VEC_UNDEF_I: |
374 | #ifdef CONFIG_BUG | ||
375 | if (kernel_mode_regs(fp)) { | ||
376 | switch (report_bug(fp->pc, fp)) { | ||
377 | case BUG_TRAP_TYPE_NONE: | ||
378 | break; | ||
379 | case BUG_TRAP_TYPE_WARN: | ||
380 | dump_bfin_trace_buffer(); | ||
381 | fp->pc += 2; | ||
382 | goto traps_done; | ||
383 | case BUG_TRAP_TYPE_BUG: | ||
384 | /* call to panic() will dump trace, and it is | ||
385 | * off at this point, so it won't be clobbered | ||
386 | */ | ||
387 | panic("BUG()"); | ||
388 | } | ||
389 | } | ||
390 | #endif | ||
384 | info.si_code = ILL_ILLOPC; | 391 | info.si_code = ILL_ILLOPC; |
385 | sig = SIGILL; | 392 | sig = SIGILL; |
386 | verbose_printk(KERN_NOTICE EXC_0x21(KERN_NOTICE)); | 393 | strerror = KERN_NOTICE EXC_0x21(KERN_NOTICE); |
387 | CHK_DEBUGGER_TRAP_MAYBE(); | 394 | CHK_DEBUGGER_TRAP_MAYBE(); |
388 | break; | 395 | break; |
389 | /* 0x22 - Illegal Instruction Combination, handled here */ | 396 | /* 0x22 - Illegal Instruction Combination, handled here */ |
390 | case VEC_ILGAL_I: | 397 | case VEC_ILGAL_I: |
391 | info.si_code = ILL_ILLPARAOP; | 398 | info.si_code = ILL_ILLPARAOP; |
392 | sig = SIGILL; | 399 | sig = SIGILL; |
393 | verbose_printk(KERN_NOTICE EXC_0x22(KERN_NOTICE)); | 400 | strerror = KERN_NOTICE EXC_0x22(KERN_NOTICE); |
394 | CHK_DEBUGGER_TRAP_MAYBE(); | 401 | CHK_DEBUGGER_TRAP_MAYBE(); |
395 | break; | 402 | break; |
396 | /* 0x23 - Data CPLB protection violation, handled here */ | 403 | /* 0x23 - Data CPLB protection violation, handled here */ |
397 | case VEC_CPLB_VL: | 404 | case VEC_CPLB_VL: |
398 | info.si_code = ILL_CPLB_VI; | 405 | info.si_code = ILL_CPLB_VI; |
399 | sig = SIGBUS; | 406 | sig = SIGBUS; |
400 | verbose_printk(KERN_NOTICE EXC_0x23(KERN_NOTICE)); | 407 | strerror = KERN_NOTICE EXC_0x23(KERN_NOTICE); |
401 | CHK_DEBUGGER_TRAP_MAYBE(); | 408 | CHK_DEBUGGER_TRAP_MAYBE(); |
402 | break; | 409 | break; |
403 | /* 0x24 - Data access misaligned, handled here */ | 410 | /* 0x24 - Data access misaligned, handled here */ |
404 | case VEC_MISALI_D: | 411 | case VEC_MISALI_D: |
405 | info.si_code = BUS_ADRALN; | 412 | info.si_code = BUS_ADRALN; |
406 | sig = SIGBUS; | 413 | sig = SIGBUS; |
407 | verbose_printk(KERN_NOTICE EXC_0x24(KERN_NOTICE)); | 414 | strerror = KERN_NOTICE EXC_0x24(KERN_NOTICE); |
408 | CHK_DEBUGGER_TRAP_MAYBE(); | 415 | CHK_DEBUGGER_TRAP_MAYBE(); |
409 | break; | 416 | break; |
410 | /* 0x25 - Unrecoverable Event, handled here */ | 417 | /* 0x25 - Unrecoverable Event, handled here */ |
411 | case VEC_UNCOV: | 418 | case VEC_UNCOV: |
412 | info.si_code = ILL_ILLEXCPT; | 419 | info.si_code = ILL_ILLEXCPT; |
413 | sig = SIGILL; | 420 | sig = SIGILL; |
414 | verbose_printk(KERN_NOTICE EXC_0x25(KERN_NOTICE)); | 421 | strerror = KERN_NOTICE EXC_0x25(KERN_NOTICE); |
415 | CHK_DEBUGGER_TRAP_MAYBE(); | 422 | CHK_DEBUGGER_TRAP_MAYBE(); |
416 | break; | 423 | break; |
417 | /* 0x26 - Data CPLB Miss, normal case is handled in _cplb_hdr, | 424 | /* 0x26 - Data CPLB Miss, normal case is handled in _cplb_hdr, |
@@ -419,7 +426,7 @@ asmlinkage void trap_c(struct pt_regs *fp) | |||
419 | case VEC_CPLB_M: | 426 | case VEC_CPLB_M: |
420 | info.si_code = BUS_ADRALN; | 427 | info.si_code = BUS_ADRALN; |
421 | sig = SIGBUS; | 428 | sig = SIGBUS; |
422 | verbose_printk(KERN_NOTICE EXC_0x26(KERN_NOTICE)); | 429 | strerror = KERN_NOTICE EXC_0x26(KERN_NOTICE); |
423 | break; | 430 | break; |
424 | /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero, handled here */ | 431 | /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero, handled here */ |
425 | case VEC_CPLB_MHIT: | 432 | case VEC_CPLB_MHIT: |
@@ -427,10 +434,10 @@ asmlinkage void trap_c(struct pt_regs *fp) | |||
427 | sig = SIGSEGV; | 434 | sig = SIGSEGV; |
428 | #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO | 435 | #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO |
429 | if (cpu_pda[cpu].dcplb_fault_addr < FIXED_CODE_START) | 436 | if (cpu_pda[cpu].dcplb_fault_addr < FIXED_CODE_START) |
430 | verbose_printk(KERN_NOTICE "NULL pointer access\n"); | 437 | strerror = KERN_NOTICE "NULL pointer access\n"; |
431 | else | 438 | else |
432 | #endif | 439 | #endif |
433 | verbose_printk(KERN_NOTICE EXC_0x27(KERN_NOTICE)); | 440 | strerror = KERN_NOTICE EXC_0x27(KERN_NOTICE); |
434 | CHK_DEBUGGER_TRAP_MAYBE(); | 441 | CHK_DEBUGGER_TRAP_MAYBE(); |
435 | break; | 442 | break; |
436 | /* 0x28 - Emulation Watchpoint, handled here */ | 443 | /* 0x28 - Emulation Watchpoint, handled here */ |
@@ -440,8 +447,8 @@ asmlinkage void trap_c(struct pt_regs *fp) | |||
440 | pr_debug(EXC_0x28(KERN_DEBUG)); | 447 | pr_debug(EXC_0x28(KERN_DEBUG)); |
441 | CHK_DEBUGGER_TRAP_MAYBE(); | 448 | CHK_DEBUGGER_TRAP_MAYBE(); |
442 | /* Check if this is a watchpoint in kernel space */ | 449 | /* Check if this is a watchpoint in kernel space */ |
443 | if (fp->ipend & 0xffc0) | 450 | if (kernel_mode_regs(fp)) |
444 | return; | 451 | goto traps_done; |
445 | else | 452 | else |
446 | break; | 453 | break; |
447 | #ifdef CONFIG_BF535 | 454 | #ifdef CONFIG_BF535 |
@@ -449,7 +456,7 @@ asmlinkage void trap_c(struct pt_regs *fp) | |||
449 | case VEC_ISTRU_VL: /* ADSP-BF535 only (MH) */ | 456 | case VEC_ISTRU_VL: /* ADSP-BF535 only (MH) */ |
450 | info.si_code = BUS_OPFETCH; | 457 | info.si_code = BUS_OPFETCH; |
451 | sig = SIGBUS; | 458 | sig = SIGBUS; |
452 | verbose_printk(KERN_NOTICE "BF535: VEC_ISTRU_VL\n"); | 459 | strerror = KERN_NOTICE "BF535: VEC_ISTRU_VL\n"; |
453 | CHK_DEBUGGER_TRAP_MAYBE(); | 460 | CHK_DEBUGGER_TRAP_MAYBE(); |
454 | break; | 461 | break; |
455 | #else | 462 | #else |
@@ -459,21 +466,21 @@ asmlinkage void trap_c(struct pt_regs *fp) | |||
459 | case VEC_MISALI_I: | 466 | case VEC_MISALI_I: |
460 | info.si_code = BUS_ADRALN; | 467 | info.si_code = BUS_ADRALN; |
461 | sig = SIGBUS; | 468 | sig = SIGBUS; |
462 | verbose_printk(KERN_NOTICE EXC_0x2A(KERN_NOTICE)); | 469 | strerror = KERN_NOTICE EXC_0x2A(KERN_NOTICE); |
463 | CHK_DEBUGGER_TRAP_MAYBE(); | 470 | CHK_DEBUGGER_TRAP_MAYBE(); |
464 | break; | 471 | break; |
465 | /* 0x2B - Instruction CPLB protection violation, handled here */ | 472 | /* 0x2B - Instruction CPLB protection violation, handled here */ |
466 | case VEC_CPLB_I_VL: | 473 | case VEC_CPLB_I_VL: |
467 | info.si_code = ILL_CPLB_VI; | 474 | info.si_code = ILL_CPLB_VI; |
468 | sig = SIGBUS; | 475 | sig = SIGBUS; |
469 | verbose_printk(KERN_NOTICE EXC_0x2B(KERN_NOTICE)); | 476 | strerror = KERN_NOTICE EXC_0x2B(KERN_NOTICE); |
470 | CHK_DEBUGGER_TRAP_MAYBE(); | 477 | CHK_DEBUGGER_TRAP_MAYBE(); |
471 | break; | 478 | break; |
472 | /* 0x2C - Instruction CPLB miss, handled in _cplb_hdr */ | 479 | /* 0x2C - Instruction CPLB miss, handled in _cplb_hdr */ |
473 | case VEC_CPLB_I_M: | 480 | case VEC_CPLB_I_M: |
474 | info.si_code = ILL_CPLB_MISS; | 481 | info.si_code = ILL_CPLB_MISS; |
475 | sig = SIGBUS; | 482 | sig = SIGBUS; |
476 | verbose_printk(KERN_NOTICE EXC_0x2C(KERN_NOTICE)); | 483 | strerror = KERN_NOTICE EXC_0x2C(KERN_NOTICE); |
477 | break; | 484 | break; |
478 | /* 0x2D - Instruction CPLB Multiple Hits, handled here */ | 485 | /* 0x2D - Instruction CPLB Multiple Hits, handled here */ |
479 | case VEC_CPLB_I_MHIT: | 486 | case VEC_CPLB_I_MHIT: |
@@ -481,17 +488,17 @@ asmlinkage void trap_c(struct pt_regs *fp) | |||
481 | sig = SIGSEGV; | 488 | sig = SIGSEGV; |
482 | #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO | 489 | #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO |
483 | if (cpu_pda[cpu].icplb_fault_addr < FIXED_CODE_START) | 490 | if (cpu_pda[cpu].icplb_fault_addr < FIXED_CODE_START) |
484 | verbose_printk(KERN_NOTICE "Jump to NULL address\n"); | 491 | strerror = KERN_NOTICE "Jump to NULL address\n"; |
485 | else | 492 | else |
486 | #endif | 493 | #endif |
487 | verbose_printk(KERN_NOTICE EXC_0x2D(KERN_NOTICE)); | 494 | strerror = KERN_NOTICE EXC_0x2D(KERN_NOTICE); |
488 | CHK_DEBUGGER_TRAP_MAYBE(); | 495 | CHK_DEBUGGER_TRAP_MAYBE(); |
489 | break; | 496 | break; |
490 | /* 0x2E - Illegal use of Supervisor Resource, handled here */ | 497 | /* 0x2E - Illegal use of Supervisor Resource, handled here */ |
491 | case VEC_ILL_RES: | 498 | case VEC_ILL_RES: |
492 | info.si_code = ILL_PRVOPC; | 499 | info.si_code = ILL_PRVOPC; |
493 | sig = SIGILL; | 500 | sig = SIGILL; |
494 | verbose_printk(KERN_NOTICE EXC_0x2E(KERN_NOTICE)); | 501 | strerror = KERN_NOTICE EXC_0x2E(KERN_NOTICE); |
495 | CHK_DEBUGGER_TRAP_MAYBE(); | 502 | CHK_DEBUGGER_TRAP_MAYBE(); |
496 | break; | 503 | break; |
497 | /* 0x2F - Reserved, Caught by default */ | 504 | /* 0x2F - Reserved, Caught by default */ |
@@ -519,17 +526,17 @@ asmlinkage void trap_c(struct pt_regs *fp) | |||
519 | case (SEQSTAT_HWERRCAUSE_SYSTEM_MMR): | 526 | case (SEQSTAT_HWERRCAUSE_SYSTEM_MMR): |
520 | info.si_code = BUS_ADRALN; | 527 | info.si_code = BUS_ADRALN; |
521 | sig = SIGBUS; | 528 | sig = SIGBUS; |
522 | verbose_printk(KERN_NOTICE HWC_x2(KERN_NOTICE)); | 529 | strerror = KERN_NOTICE HWC_x2(KERN_NOTICE); |
523 | break; | 530 | break; |
524 | /* External Memory Addressing Error */ | 531 | /* External Memory Addressing Error */ |
525 | case (SEQSTAT_HWERRCAUSE_EXTERN_ADDR): | 532 | case (SEQSTAT_HWERRCAUSE_EXTERN_ADDR): |
526 | info.si_code = BUS_ADRERR; | 533 | info.si_code = BUS_ADRERR; |
527 | sig = SIGBUS; | 534 | sig = SIGBUS; |
528 | verbose_printk(KERN_NOTICE HWC_x3(KERN_NOTICE)); | 535 | strerror = KERN_NOTICE HWC_x3(KERN_NOTICE); |
529 | break; | 536 | break; |
530 | /* Performance Monitor Overflow */ | 537 | /* Performance Monitor Overflow */ |
531 | case (SEQSTAT_HWERRCAUSE_PERF_FLOW): | 538 | case (SEQSTAT_HWERRCAUSE_PERF_FLOW): |
532 | verbose_printk(KERN_NOTICE HWC_x12(KERN_NOTICE)); | 539 | strerror = KERN_NOTICE HWC_x12(KERN_NOTICE); |
533 | break; | 540 | break; |
534 | /* RAISE 5 instruction */ | 541 | /* RAISE 5 instruction */ |
535 | case (SEQSTAT_HWERRCAUSE_RAISE_5): | 542 | case (SEQSTAT_HWERRCAUSE_RAISE_5): |
@@ -546,7 +553,6 @@ asmlinkage void trap_c(struct pt_regs *fp) | |||
546 | * if we get here we hit a reserved one, so panic | 553 | * if we get here we hit a reserved one, so panic |
547 | */ | 554 | */ |
548 | default: | 555 | default: |
549 | oops_in_progress = 1; | ||
550 | info.si_code = ILL_ILLPARAOP; | 556 | info.si_code = ILL_ILLPARAOP; |
551 | sig = SIGILL; | 557 | sig = SIGILL; |
552 | verbose_printk(KERN_EMERG "Caught Unhandled Exception, code = %08lx\n", | 558 | verbose_printk(KERN_EMERG "Caught Unhandled Exception, code = %08lx\n", |
@@ -557,6 +563,16 @@ asmlinkage void trap_c(struct pt_regs *fp) | |||
557 | 563 | ||
558 | BUG_ON(sig == 0); | 564 | BUG_ON(sig == 0); |
559 | 565 | ||
566 | /* If the fault was caused by a kernel thread, or interrupt handler | ||
567 | * we will kernel panic, so the system reboots. | ||
568 | */ | ||
569 | if (kernel_mode_regs(fp) || (current && !current->mm)) { | ||
570 | console_verbose(); | ||
571 | oops_in_progress = 1; | ||
572 | if (strerror) | ||
573 | verbose_printk(strerror); | ||
574 | } | ||
575 | |||
560 | if (sig != SIGTRAP) { | 576 | if (sig != SIGTRAP) { |
561 | dump_bfin_process(fp); | 577 | dump_bfin_process(fp); |
562 | dump_bfin_mem(fp); | 578 | dump_bfin_mem(fp); |
@@ -606,8 +622,8 @@ asmlinkage void trap_c(struct pt_regs *fp) | |||
606 | if (ANOMALY_05000461 && trapnr == VEC_HWERR && !access_ok(VERIFY_READ, fp->pc, 8)) | 622 | if (ANOMALY_05000461 && trapnr == VEC_HWERR && !access_ok(VERIFY_READ, fp->pc, 8)) |
607 | fp->pc = SAFE_USER_INSTRUCTION; | 623 | fp->pc = SAFE_USER_INSTRUCTION; |
608 | 624 | ||
625 | traps_done: | ||
609 | trace_buffer_restore(j); | 626 | trace_buffer_restore(j); |
610 | return; | ||
611 | } | 627 | } |
612 | 628 | ||
613 | /* Typical exception handling routines */ | 629 | /* Typical exception handling routines */ |
@@ -792,6 +808,18 @@ void dump_bfin_trace_buffer(void) | |||
792 | } | 808 | } |
793 | EXPORT_SYMBOL(dump_bfin_trace_buffer); | 809 | EXPORT_SYMBOL(dump_bfin_trace_buffer); |
794 | 810 | ||
811 | #ifdef CONFIG_BUG | ||
812 | int is_valid_bugaddr(unsigned long addr) | ||
813 | { | ||
814 | unsigned short opcode; | ||
815 | |||
816 | if (!get_instruction(&opcode, (unsigned short *)addr)) | ||
817 | return 0; | ||
818 | |||
819 | return opcode == BFIN_BUG_OPCODE; | ||
820 | } | ||
821 | #endif | ||
822 | |||
795 | /* | 823 | /* |
796 | * Checks to see if the address pointed to is either a | 824 | * Checks to see if the address pointed to is either a |
797 | * 16-bit CALL instruction, or a 32-bit CALL instruction | 825 | * 16-bit CALL instruction, or a 32-bit CALL instruction |
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index 8b67167cb4f4..6ac307ca0d80 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S | |||
@@ -54,6 +54,7 @@ SECTIONS | |||
54 | SCHED_TEXT | 54 | SCHED_TEXT |
55 | #endif | 55 | #endif |
56 | LOCK_TEXT | 56 | LOCK_TEXT |
57 | IRQENTRY_TEXT | ||
57 | KPROBES_TEXT | 58 | KPROBES_TEXT |
58 | *(.text.*) | 59 | *(.text.*) |
59 | *(.fixup) | 60 | *(.fixup) |
@@ -166,6 +167,20 @@ SECTIONS | |||
166 | } | 167 | } |
167 | PERCPU(4) | 168 | PERCPU(4) |
168 | SECURITY_INIT | 169 | SECURITY_INIT |
170 | |||
171 | /* we have to discard exit text and such at runtime, not link time, to | ||
172 | * handle embedded cross-section references (alt instructions, bug | ||
173 | * table, eh_frame, etc...) | ||
174 | */ | ||
175 | .exit.text : | ||
176 | { | ||
177 | EXIT_TEXT | ||
178 | } | ||
179 | .exit.data : | ||
180 | { | ||
181 | EXIT_DATA | ||
182 | } | ||
183 | |||
169 | .init.ramfs : | 184 | .init.ramfs : |
170 | { | 185 | { |
171 | . = ALIGN(4); | 186 | . = ALIGN(4); |
@@ -264,8 +279,6 @@ SECTIONS | |||
264 | 279 | ||
265 | /DISCARD/ : | 280 | /DISCARD/ : |
266 | { | 281 | { |
267 | EXIT_TEXT | ||
268 | EXIT_DATA | ||
269 | *(.exitcall.exit) | 282 | *(.exitcall.exit) |
270 | } | 283 | } |
271 | } | 284 | } |
diff --git a/arch/blackfin/lib/checksum.c b/arch/blackfin/lib/checksum.c index 762a7f02970a..cd605e7d8518 100644 --- a/arch/blackfin/lib/checksum.c +++ b/arch/blackfin/lib/checksum.c | |||
@@ -116,6 +116,7 @@ __sum16 ip_compute_csum(const void *buff, int len) | |||
116 | { | 116 | { |
117 | return (__force __sum16)~do_csum(buff, len); | 117 | return (__force __sum16)~do_csum(buff, len); |
118 | } | 118 | } |
119 | EXPORT_SYMBOL(ip_compute_csum); | ||
119 | 120 | ||
120 | /* | 121 | /* |
121 | * copy from fs while checksumming, otherwise like csum_partial | 122 | * copy from fs while checksumming, otherwise like csum_partial |
@@ -130,6 +131,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst, | |||
130 | memcpy(dst, (__force void *)src, len); | 131 | memcpy(dst, (__force void *)src, len); |
131 | return csum_partial(dst, len, sum); | 132 | return csum_partial(dst, len, sum); |
132 | } | 133 | } |
134 | EXPORT_SYMBOL(csum_partial_copy_from_user); | ||
133 | 135 | ||
134 | /* | 136 | /* |
135 | * copy from ds while checksumming, otherwise like csum_partial | 137 | * copy from ds while checksumming, otherwise like csum_partial |
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c index 62bba09bcce6..1382f0382359 100644 --- a/arch/blackfin/mach-bf518/boards/ezbrd.c +++ b/arch/blackfin/mach-bf518/boards/ezbrd.c | |||
@@ -246,7 +246,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { | |||
246 | .modalias = "m25p80", /* Name of spi_driver for this device */ | 246 | .modalias = "m25p80", /* Name of spi_driver for this device */ |
247 | .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ | 247 | .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ |
248 | .bus_num = 0, /* Framework bus number */ | 248 | .bus_num = 0, /* Framework bus number */ |
249 | .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/ | 249 | .chip_select = 2, /* On BF518F-EZBRD it's SPI0_SSEL2 */ |
250 | .platform_data = &bfin_spi_flash_data, | 250 | .platform_data = &bfin_spi_flash_data, |
251 | .controller_data = &spi_flash_chip_info, | 251 | .controller_data = &spi_flash_chip_info, |
252 | .mode = SPI_MODE_3, | 252 | .mode = SPI_MODE_3, |
@@ -369,6 +369,11 @@ static struct resource bfin_spi0_resource[] = { | |||
369 | [1] = { | 369 | [1] = { |
370 | .start = CH_SPI0, | 370 | .start = CH_SPI0, |
371 | .end = CH_SPI0, | 371 | .end = CH_SPI0, |
372 | .flags = IORESOURCE_DMA, | ||
373 | }, | ||
374 | [2] = { | ||
375 | .start = IRQ_SPI0, | ||
376 | .end = IRQ_SPI0, | ||
372 | .flags = IORESOURCE_IRQ, | 377 | .flags = IORESOURCE_IRQ, |
373 | }, | 378 | }, |
374 | }; | 379 | }; |
@@ -399,6 +404,11 @@ static struct resource bfin_spi1_resource[] = { | |||
399 | [1] = { | 404 | [1] = { |
400 | .start = CH_SPI1, | 405 | .start = CH_SPI1, |
401 | .end = CH_SPI1, | 406 | .end = CH_SPI1, |
407 | .flags = IORESOURCE_DMA, | ||
408 | }, | ||
409 | [2] = { | ||
410 | .start = IRQ_SPI1, | ||
411 | .end = IRQ_SPI1, | ||
402 | .flags = IORESOURCE_IRQ, | 412 | .flags = IORESOURCE_IRQ, |
403 | }, | 413 | }, |
404 | }; | 414 | }; |
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c index 6d6f9effa0bb..1eaf27ff722e 100644 --- a/arch/blackfin/mach-bf527/boards/cm_bf527.c +++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c | |||
@@ -664,6 +664,11 @@ static struct resource bfin_spi0_resource[] = { | |||
664 | [1] = { | 664 | [1] = { |
665 | .start = CH_SPI, | 665 | .start = CH_SPI, |
666 | .end = CH_SPI, | 666 | .end = CH_SPI, |
667 | .flags = IORESOURCE_DMA, | ||
668 | }, | ||
669 | [2] = { | ||
670 | .start = IRQ_SPI, | ||
671 | .end = IRQ_SPI, | ||
667 | .flags = IORESOURCE_IRQ, | 672 | .flags = IORESOURCE_IRQ, |
668 | }, | 673 | }, |
669 | }; | 674 | }; |
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c index 1435c5d38cd5..9f9c0005dcf1 100644 --- a/arch/blackfin/mach-bf527/boards/ezbrd.c +++ b/arch/blackfin/mach-bf527/boards/ezbrd.c | |||
@@ -467,6 +467,11 @@ static struct resource bfin_spi0_resource[] = { | |||
467 | [1] = { | 467 | [1] = { |
468 | .start = CH_SPI, | 468 | .start = CH_SPI, |
469 | .end = CH_SPI, | 469 | .end = CH_SPI, |
470 | .flags = IORESOURCE_DMA, | ||
471 | }, | ||
472 | [2] = { | ||
473 | .start = IRQ_SPI, | ||
474 | .end = IRQ_SPI, | ||
470 | .flags = IORESOURCE_IRQ, | 475 | .flags = IORESOURCE_IRQ, |
471 | }, | 476 | }, |
472 | }; | 477 | }; |
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c index 147edd1eb1ad..3e5b7db6b065 100644 --- a/arch/blackfin/mach-bf527/boards/ezkit.c +++ b/arch/blackfin/mach-bf527/boards/ezkit.c | |||
@@ -723,6 +723,11 @@ static struct resource bfin_spi0_resource[] = { | |||
723 | [1] = { | 723 | [1] = { |
724 | .start = CH_SPI, | 724 | .start = CH_SPI, |
725 | .end = CH_SPI, | 725 | .end = CH_SPI, |
726 | .flags = IORESOURCE_DMA, | ||
727 | }, | ||
728 | [2] = { | ||
729 | .start = IRQ_SPI, | ||
730 | .end = IRQ_SPI, | ||
726 | .flags = IORESOURCE_IRQ, | 731 | .flags = IORESOURCE_IRQ, |
727 | }, | 732 | }, |
728 | }; | 733 | }; |
diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c index 895f213ea454..38cf8ffd6d74 100644 --- a/arch/blackfin/mach-bf533/boards/H8606.c +++ b/arch/blackfin/mach-bf533/boards/H8606.c | |||
@@ -266,6 +266,11 @@ static struct resource bfin_spi0_resource[] = { | |||
266 | [1] = { | 266 | [1] = { |
267 | .start = CH_SPI, | 267 | .start = CH_SPI, |
268 | .end = CH_SPI, | 268 | .end = CH_SPI, |
269 | .flags = IORESOURCE_DMA, | ||
270 | }, | ||
271 | [2] = { | ||
272 | .start = IRQ_SPI, | ||
273 | .end = IRQ_SPI, | ||
269 | .flags = IORESOURCE_IRQ, | 274 | .flags = IORESOURCE_IRQ, |
270 | } | 275 | } |
271 | }; | 276 | }; |
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c index 0765872a8ada..9ecdc361fa6d 100644 --- a/arch/blackfin/mach-bf533/boards/blackstamp.c +++ b/arch/blackfin/mach-bf533/boards/blackstamp.c | |||
@@ -162,6 +162,11 @@ static struct resource bfin_spi0_resource[] = { | |||
162 | [1] = { | 162 | [1] = { |
163 | .start = CH_SPI, | 163 | .start = CH_SPI, |
164 | .end = CH_SPI, | 164 | .end = CH_SPI, |
165 | .flags = IORESOURCE_DMA, | ||
166 | }, | ||
167 | [2] = { | ||
168 | .start = IRQ_SPI, | ||
169 | .end = IRQ_SPI, | ||
165 | .flags = IORESOURCE_IRQ, | 170 | .flags = IORESOURCE_IRQ, |
166 | } | 171 | } |
167 | }; | 172 | }; |
diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c index a727e538fa28..1443e92d8b62 100644 --- a/arch/blackfin/mach-bf533/boards/cm_bf533.c +++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c | |||
@@ -160,6 +160,11 @@ static struct resource bfin_spi0_resource[] = { | |||
160 | [1] = { | 160 | [1] = { |
161 | .start = CH_SPI, | 161 | .start = CH_SPI, |
162 | .end = CH_SPI, | 162 | .end = CH_SPI, |
163 | .flags = IORESOURCE_DMA, | ||
164 | }, | ||
165 | [2] = { | ||
166 | .start = IRQ_SPI, | ||
167 | .end = IRQ_SPI, | ||
163 | .flags = IORESOURCE_IRQ, | 168 | .flags = IORESOURCE_IRQ, |
164 | } | 169 | } |
165 | }; | 170 | }; |
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c index 842f1c9c2393..89a5ec4ca048 100644 --- a/arch/blackfin/mach-bf533/boards/ezkit.c +++ b/arch/blackfin/mach-bf533/boards/ezkit.c | |||
@@ -196,6 +196,11 @@ static struct resource bfin_spi0_resource[] = { | |||
196 | [1] = { | 196 | [1] = { |
197 | .start = CH_SPI, | 197 | .start = CH_SPI, |
198 | .end = CH_SPI, | 198 | .end = CH_SPI, |
199 | .flags = IORESOURCE_DMA, | ||
200 | }, | ||
201 | [2] = { | ||
202 | .start = IRQ_SPI, | ||
203 | .end = IRQ_SPI, | ||
199 | .flags = IORESOURCE_IRQ, | 204 | .flags = IORESOURCE_IRQ, |
200 | } | 205 | } |
201 | }; | 206 | }; |
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c index e19c565ade16..a68ade8a3ca2 100644 --- a/arch/blackfin/mach-bf533/boards/stamp.c +++ b/arch/blackfin/mach-bf533/boards/stamp.c | |||
@@ -299,6 +299,11 @@ static struct resource bfin_spi0_resource[] = { | |||
299 | [1] = { | 299 | [1] = { |
300 | .start = CH_SPI, | 300 | .start = CH_SPI, |
301 | .end = CH_SPI, | 301 | .end = CH_SPI, |
302 | .flags = IORESOURCE_DMA, | ||
303 | }, | ||
304 | [2] = { | ||
305 | .start = IRQ_SPI, | ||
306 | .end = IRQ_SPI, | ||
302 | .flags = IORESOURCE_IRQ, | 307 | .flags = IORESOURCE_IRQ, |
303 | } | 308 | } |
304 | }; | 309 | }; |
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537.c b/arch/blackfin/mach-bf537/boards/cm_bf537.c index 4fee19673127..2a87d1cfcd06 100644 --- a/arch/blackfin/mach-bf537/boards/cm_bf537.c +++ b/arch/blackfin/mach-bf537/boards/cm_bf537.c | |||
@@ -182,8 +182,13 @@ static struct resource bfin_spi0_resource[] = { | |||
182 | [1] = { | 182 | [1] = { |
183 | .start = CH_SPI, | 183 | .start = CH_SPI, |
184 | .end = CH_SPI, | 184 | .end = CH_SPI, |
185 | .flags = IORESOURCE_DMA, | ||
186 | }, | ||
187 | [2] = { | ||
188 | .start = IRQ_SPI, | ||
189 | .end = IRQ_SPI, | ||
185 | .flags = IORESOURCE_IRQ, | 190 | .flags = IORESOURCE_IRQ, |
186 | } | 191 | }, |
187 | }; | 192 | }; |
188 | 193 | ||
189 | /* SPI controller data */ | 194 | /* SPI controller data */ |
diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c index 3c159819e555..399f81da7b93 100644 --- a/arch/blackfin/mach-bf537/boards/minotaur.c +++ b/arch/blackfin/mach-bf537/boards/minotaur.c | |||
@@ -184,6 +184,11 @@ static struct resource bfin_spi0_resource[] = { | |||
184 | [1] = { | 184 | [1] = { |
185 | .start = CH_SPI, | 185 | .start = CH_SPI, |
186 | .end = CH_SPI, | 186 | .end = CH_SPI, |
187 | .flags = IORESOURCE_DMA, | ||
188 | }, | ||
189 | [2] = { | ||
190 | .start = IRQ_SPI, | ||
191 | .end = IRQ_SPI, | ||
187 | .flags = IORESOURCE_IRQ, | 192 | .flags = IORESOURCE_IRQ, |
188 | }, | 193 | }, |
189 | }; | 194 | }; |
diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c index 26707ce39f29..838240f151f5 100644 --- a/arch/blackfin/mach-bf537/boards/pnav10.c +++ b/arch/blackfin/mach-bf537/boards/pnav10.c | |||
@@ -398,8 +398,13 @@ static struct resource bfin_spi0_resource[] = { | |||
398 | [1] = { | 398 | [1] = { |
399 | .start = CH_SPI, | 399 | .start = CH_SPI, |
400 | .end = CH_SPI, | 400 | .end = CH_SPI, |
401 | .flags = IORESOURCE_DMA, | ||
402 | }, | ||
403 | [2] = { | ||
404 | .start = IRQ_SPI, | ||
405 | .end = IRQ_SPI, | ||
401 | .flags = IORESOURCE_IRQ, | 406 | .flags = IORESOURCE_IRQ, |
402 | } | 407 | }, |
403 | }; | 408 | }; |
404 | 409 | ||
405 | /* SPI controller data */ | 410 | /* SPI controller data */ |
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index dfb5036f8a6b..ff7228caa7da 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c | |||
@@ -1345,7 +1345,7 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = { | |||
1345 | #if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE) | 1345 | #if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE) |
1346 | { | 1346 | { |
1347 | I2C_BOARD_INFO("pmic-adp5520", 0x32), | 1347 | I2C_BOARD_INFO("pmic-adp5520", 0x32), |
1348 | .irq = IRQ_PF7, | 1348 | .irq = IRQ_PG0, |
1349 | .platform_data = (void *)&adp5520_pdev_data, | 1349 | .platform_data = (void *)&adp5520_pdev_data, |
1350 | }, | 1350 | }, |
1351 | #endif | 1351 | #endif |
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c index 280574591201..e523e6e610d0 100644 --- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c +++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c | |||
@@ -182,6 +182,11 @@ static struct resource bfin_spi0_resource[] = { | |||
182 | [1] = { | 182 | [1] = { |
183 | .start = CH_SPI, | 183 | .start = CH_SPI, |
184 | .end = CH_SPI, | 184 | .end = CH_SPI, |
185 | .flags = IORESOURCE_DMA, | ||
186 | }, | ||
187 | [2] = { | ||
188 | .start = IRQ_SPI, | ||
189 | .end = IRQ_SPI, | ||
185 | .flags = IORESOURCE_IRQ, | 190 | .flags = IORESOURCE_IRQ, |
186 | } | 191 | } |
187 | }; | 192 | }; |
diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c index e37cb9378884..57695b4c3c09 100644 --- a/arch/blackfin/mach-bf538/boards/ezkit.c +++ b/arch/blackfin/mach-bf538/boards/ezkit.c | |||
@@ -352,6 +352,11 @@ static struct resource bfin_spi0_resource[] = { | |||
352 | [1] = { | 352 | [1] = { |
353 | .start = CH_SPI0, | 353 | .start = CH_SPI0, |
354 | .end = CH_SPI0, | 354 | .end = CH_SPI0, |
355 | .flags = IORESOURCE_DMA, | ||
356 | }, | ||
357 | [2] = { | ||
358 | .start = IRQ_SPI0, | ||
359 | .end = IRQ_SPI0, | ||
355 | .flags = IORESOURCE_IRQ, | 360 | .flags = IORESOURCE_IRQ, |
356 | } | 361 | } |
357 | }; | 362 | }; |
@@ -366,6 +371,11 @@ static struct resource bfin_spi1_resource[] = { | |||
366 | [1] = { | 371 | [1] = { |
367 | .start = CH_SPI1, | 372 | .start = CH_SPI1, |
368 | .end = CH_SPI1, | 373 | .end = CH_SPI1, |
374 | .flags = IORESOURCE_DMA, | ||
375 | }, | ||
376 | [2] = { | ||
377 | .start = IRQ_SPI1, | ||
378 | .end = IRQ_SPI1, | ||
369 | .flags = IORESOURCE_IRQ, | 379 | .flags = IORESOURCE_IRQ, |
370 | } | 380 | } |
371 | }; | 381 | }; |
diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c index f53ad682530b..f5a3c30a41bd 100644 --- a/arch/blackfin/mach-bf548/boards/cm_bf548.c +++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c | |||
@@ -612,6 +612,11 @@ static struct resource bfin_spi0_resource[] = { | |||
612 | [1] = { | 612 | [1] = { |
613 | .start = CH_SPI0, | 613 | .start = CH_SPI0, |
614 | .end = CH_SPI0, | 614 | .end = CH_SPI0, |
615 | .flags = IORESOURCE_DMA, | ||
616 | }, | ||
617 | [2] = { | ||
618 | .start = IRQ_SPI0, | ||
619 | .end = IRQ_SPI0, | ||
615 | .flags = IORESOURCE_IRQ, | 620 | .flags = IORESOURCE_IRQ, |
616 | } | 621 | } |
617 | }; | 622 | }; |
@@ -626,6 +631,11 @@ static struct resource bfin_spi1_resource[] = { | |||
626 | [1] = { | 631 | [1] = { |
627 | .start = CH_SPI1, | 632 | .start = CH_SPI1, |
628 | .end = CH_SPI1, | 633 | .end = CH_SPI1, |
634 | .flags = IORESOURCE_DMA, | ||
635 | }, | ||
636 | [2] = { | ||
637 | .start = IRQ_SPI1, | ||
638 | .end = IRQ_SPI1, | ||
629 | .flags = IORESOURCE_IRQ, | 639 | .flags = IORESOURCE_IRQ, |
630 | } | 640 | } |
631 | }; | 641 | }; |
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c index add5a17452ce..805a57b5e650 100644 --- a/arch/blackfin/mach-bf548/boards/ezkit.c +++ b/arch/blackfin/mach-bf548/boards/ezkit.c | |||
@@ -396,6 +396,8 @@ static struct platform_device bfin_sir3_device = { | |||
396 | #endif | 396 | #endif |
397 | 397 | ||
398 | #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) | 398 | #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) |
399 | #include <linux/smsc911x.h> | ||
400 | |||
399 | static struct resource smsc911x_resources[] = { | 401 | static struct resource smsc911x_resources[] = { |
400 | { | 402 | { |
401 | .name = "smsc911x-memory", | 403 | .name = "smsc911x-memory", |
@@ -409,11 +411,22 @@ static struct resource smsc911x_resources[] = { | |||
409 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, | 411 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, |
410 | }, | 412 | }, |
411 | }; | 413 | }; |
414 | |||
415 | static struct smsc911x_platform_config smsc911x_config = { | ||
416 | .flags = SMSC911X_USE_32BIT, | ||
417 | .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, | ||
418 | .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, | ||
419 | .phy_interface = PHY_INTERFACE_MODE_MII, | ||
420 | }; | ||
421 | |||
412 | static struct platform_device smsc911x_device = { | 422 | static struct platform_device smsc911x_device = { |
413 | .name = "smsc911x", | 423 | .name = "smsc911x", |
414 | .id = 0, | 424 | .id = 0, |
415 | .num_resources = ARRAY_SIZE(smsc911x_resources), | 425 | .num_resources = ARRAY_SIZE(smsc911x_resources), |
416 | .resource = smsc911x_resources, | 426 | .resource = smsc911x_resources, |
427 | .dev = { | ||
428 | .platform_data = &smsc911x_config, | ||
429 | }, | ||
417 | }; | 430 | }; |
418 | #endif | 431 | #endif |
419 | 432 | ||
@@ -741,6 +754,11 @@ static struct resource bfin_spi0_resource[] = { | |||
741 | [1] = { | 754 | [1] = { |
742 | .start = CH_SPI0, | 755 | .start = CH_SPI0, |
743 | .end = CH_SPI0, | 756 | .end = CH_SPI0, |
757 | .flags = IORESOURCE_DMA, | ||
758 | }, | ||
759 | [2] = { | ||
760 | .start = IRQ_SPI0, | ||
761 | .end = IRQ_SPI0, | ||
744 | .flags = IORESOURCE_IRQ, | 762 | .flags = IORESOURCE_IRQ, |
745 | } | 763 | } |
746 | }; | 764 | }; |
@@ -755,6 +773,11 @@ static struct resource bfin_spi1_resource[] = { | |||
755 | [1] = { | 773 | [1] = { |
756 | .start = CH_SPI1, | 774 | .start = CH_SPI1, |
757 | .end = CH_SPI1, | 775 | .end = CH_SPI1, |
776 | .flags = IORESOURCE_DMA, | ||
777 | }, | ||
778 | [2] = { | ||
779 | .start = IRQ_SPI1, | ||
780 | .end = IRQ_SPI1, | ||
758 | .flags = IORESOURCE_IRQ, | 781 | .flags = IORESOURCE_IRQ, |
759 | } | 782 | } |
760 | }; | 783 | }; |
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c index 0dd9685e5d53..0c9d72c5f5ba 100644 --- a/arch/blackfin/mach-bf561/boards/cm_bf561.c +++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c | |||
@@ -177,8 +177,13 @@ static struct resource bfin_spi0_resource[] = { | |||
177 | [1] = { | 177 | [1] = { |
178 | .start = CH_SPI, | 178 | .start = CH_SPI, |
179 | .end = CH_SPI, | 179 | .end = CH_SPI, |
180 | .flags = IORESOURCE_DMA, | ||
181 | }, | ||
182 | [2] = { | ||
183 | .start = IRQ_SPI, | ||
184 | .end = IRQ_SPI, | ||
180 | .flags = IORESOURCE_IRQ, | 185 | .flags = IORESOURCE_IRQ, |
181 | } | 186 | }, |
182 | }; | 187 | }; |
183 | 188 | ||
184 | /* SPI controller data */ | 189 | /* SPI controller data */ |
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c index 0e2178a1aec5..b5ef7ff7b7bd 100644 --- a/arch/blackfin/mach-bf561/boards/ezkit.c +++ b/arch/blackfin/mach-bf561/boards/ezkit.c | |||
@@ -304,6 +304,11 @@ static struct resource bfin_spi0_resource[] = { | |||
304 | [1] = { | 304 | [1] = { |
305 | .start = CH_SPI, | 305 | .start = CH_SPI, |
306 | .end = CH_SPI, | 306 | .end = CH_SPI, |
307 | .flags = IORESOURCE_DMA, | ||
308 | }, | ||
309 | [2] = { | ||
310 | .start = IRQ_SPI, | ||
311 | .end = IRQ_SPI, | ||
307 | .flags = IORESOURCE_IRQ, | 312 | .flags = IORESOURCE_IRQ, |
308 | } | 313 | } |
309 | }; | 314 | }; |
diff --git a/arch/blackfin/mach-common/cache-c.c b/arch/blackfin/mach-common/cache-c.c index e6ab1f815123..b59ce3cb3807 100644 --- a/arch/blackfin/mach-common/cache-c.c +++ b/arch/blackfin/mach-common/cache-c.c | |||
@@ -16,9 +16,21 @@ | |||
16 | void blackfin_invalidate_entire_dcache(void) | 16 | void blackfin_invalidate_entire_dcache(void) |
17 | { | 17 | { |
18 | u32 dmem = bfin_read_DMEM_CONTROL(); | 18 | u32 dmem = bfin_read_DMEM_CONTROL(); |
19 | SSYNC(); | ||
20 | bfin_write_DMEM_CONTROL(dmem & ~0xc); | 19 | bfin_write_DMEM_CONTROL(dmem & ~0xc); |
21 | SSYNC(); | 20 | SSYNC(); |
22 | bfin_write_DMEM_CONTROL(dmem); | 21 | bfin_write_DMEM_CONTROL(dmem); |
23 | SSYNC(); | 22 | SSYNC(); |
24 | } | 23 | } |
24 | |||
25 | /* Invalidate the Entire Instruction cache by | ||
26 | * clearing IMC bit | ||
27 | */ | ||
28 | void blackfin_invalidate_entire_icache(void) | ||
29 | { | ||
30 | u32 imem = bfin_read_IMEM_CONTROL(); | ||
31 | bfin_write_IMEM_CONTROL(imem & ~0x4); | ||
32 | SSYNC(); | ||
33 | bfin_write_IMEM_CONTROL(imem); | ||
34 | SSYNC(); | ||
35 | } | ||
36 | |||
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index da0558ad1b1a..31fa313e81cf 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <asm/thread_info.h> /* TIF_NEED_RESCHED */ | 42 | #include <asm/thread_info.h> /* TIF_NEED_RESCHED */ |
43 | #include <asm/asm-offsets.h> | 43 | #include <asm/asm-offsets.h> |
44 | #include <asm/trace.h> | 44 | #include <asm/trace.h> |
45 | #include <asm/traps.h> | ||
45 | 46 | ||
46 | #include <asm/context.S> | 47 | #include <asm/context.S> |
47 | 48 | ||
@@ -84,13 +85,15 @@ ENTRY(_ex_workaround_261) | |||
84 | if !cc jump _bfin_return_from_exception; | 85 | if !cc jump _bfin_return_from_exception; |
85 | /* fall through */ | 86 | /* fall through */ |
86 | R7 = P4; | 87 | R7 = P4; |
87 | R6 = 0x26; /* Data CPLB Miss */ | 88 | R6 = VEC_CPLB_M; /* Data CPLB Miss */ |
88 | cc = R6 == R7; | 89 | cc = R6 == R7; |
89 | if cc jump _ex_dcplb_miss (BP); | 90 | if cc jump _ex_dcplb_miss (BP); |
90 | R6 = 0x23; /* Data CPLB Miss */ | 91 | #ifdef CONFIG_MPU |
92 | R6 = VEC_CPLB_VL; /* Data CPLB Violation */ | ||
91 | cc = R6 == R7; | 93 | cc = R6 == R7; |
92 | if cc jump _ex_dcplb_viol (BP); | 94 | if cc jump _ex_dcplb_viol (BP); |
93 | /* Handle 0x23 Data CPLB Protection Violation | 95 | #endif |
96 | /* Handle Data CPLB Protection Violation | ||
94 | * and Data CPLB Multiple Hits - Linux Trap Zero | 97 | * and Data CPLB Multiple Hits - Linux Trap Zero |
95 | */ | 98 | */ |
96 | jump _ex_trap_c; | 99 | jump _ex_trap_c; |
@@ -270,7 +273,7 @@ ENTRY(_bfin_return_from_exception) | |||
270 | r6.l = lo(SEQSTAT_EXCAUSE); | 273 | r6.l = lo(SEQSTAT_EXCAUSE); |
271 | r6.h = hi(SEQSTAT_EXCAUSE); | 274 | r6.h = hi(SEQSTAT_EXCAUSE); |
272 | r7 = r7 & r6; | 275 | r7 = r7 & r6; |
273 | r6 = 0x25; | 276 | r6 = VEC_UNCOV; |
274 | CC = R7 == R6; | 277 | CC = R7 == R6; |
275 | if CC JUMP _double_fault; | 278 | if CC JUMP _double_fault; |
276 | #endif | 279 | #endif |
@@ -1605,6 +1608,7 @@ ENTRY(_sys_call_table) | |||
1605 | .long _sys_inotify_init1 /* 365 */ | 1608 | .long _sys_inotify_init1 /* 365 */ |
1606 | .long _sys_preadv | 1609 | .long _sys_preadv |
1607 | .long _sys_pwritev | 1610 | .long _sys_pwritev |
1611 | .long _sys_rt_tgsigqueueinfo | ||
1608 | 1612 | ||
1609 | .rept NR_syscalls-(.-_sys_call_table)/4 | 1613 | .rept NR_syscalls-(.-_sys_call_table)/4 |
1610 | .long _sys_ni_syscall | 1614 | .long _sys_ni_syscall |
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index 3b8ebaee77f2..61840059dfac 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c | |||
@@ -144,7 +144,7 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) | |||
144 | 144 | ||
145 | static irqreturn_t ipi_handler(int irq, void *dev_instance) | 145 | static irqreturn_t ipi_handler(int irq, void *dev_instance) |
146 | { | 146 | { |
147 | struct ipi_message *msg, *mg; | 147 | struct ipi_message *msg; |
148 | struct ipi_message_queue *msg_queue; | 148 | struct ipi_message_queue *msg_queue; |
149 | unsigned int cpu = smp_processor_id(); | 149 | unsigned int cpu = smp_processor_id(); |
150 | 150 | ||
@@ -154,7 +154,8 @@ static irqreturn_t ipi_handler(int irq, void *dev_instance) | |||
154 | msg_queue->count++; | 154 | msg_queue->count++; |
155 | 155 | ||
156 | spin_lock(&msg_queue->lock); | 156 | spin_lock(&msg_queue->lock); |
157 | list_for_each_entry_safe(msg, mg, &msg_queue->head, list) { | 157 | while (!list_empty(&msg_queue->head)) { |
158 | msg = list_entry(msg_queue->head.next, typeof(*msg), list); | ||
158 | list_del(&msg->list); | 159 | list_del(&msg->list); |
159 | switch (msg->type) { | 160 | switch (msg->type) { |
160 | case BFIN_IPI_RESCHEDULE: | 161 | case BFIN_IPI_RESCHEDULE: |
@@ -221,7 +222,7 @@ int smp_call_function(void (*func)(void *info), void *info, int wait) | |||
221 | for_each_cpu_mask(cpu, callmap) { | 222 | for_each_cpu_mask(cpu, callmap) { |
222 | msg_queue = &per_cpu(ipi_msg_queue, cpu); | 223 | msg_queue = &per_cpu(ipi_msg_queue, cpu); |
223 | spin_lock_irqsave(&msg_queue->lock, flags); | 224 | spin_lock_irqsave(&msg_queue->lock, flags); |
224 | list_add(&msg->list, &msg_queue->head); | 225 | list_add_tail(&msg->list, &msg_queue->head); |
225 | spin_unlock_irqrestore(&msg_queue->lock, flags); | 226 | spin_unlock_irqrestore(&msg_queue->lock, flags); |
226 | platform_send_ipi_cpu(cpu); | 227 | platform_send_ipi_cpu(cpu); |
227 | } | 228 | } |
@@ -261,7 +262,7 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, | |||
261 | 262 | ||
262 | msg_queue = &per_cpu(ipi_msg_queue, cpu); | 263 | msg_queue = &per_cpu(ipi_msg_queue, cpu); |
263 | spin_lock_irqsave(&msg_queue->lock, flags); | 264 | spin_lock_irqsave(&msg_queue->lock, flags); |
264 | list_add(&msg->list, &msg_queue->head); | 265 | list_add_tail(&msg->list, &msg_queue->head); |
265 | spin_unlock_irqrestore(&msg_queue->lock, flags); | 266 | spin_unlock_irqrestore(&msg_queue->lock, flags); |
266 | platform_send_ipi_cpu(cpu); | 267 | platform_send_ipi_cpu(cpu); |
267 | 268 | ||
@@ -292,7 +293,7 @@ void smp_send_reschedule(int cpu) | |||
292 | 293 | ||
293 | msg_queue = &per_cpu(ipi_msg_queue, cpu); | 294 | msg_queue = &per_cpu(ipi_msg_queue, cpu); |
294 | spin_lock_irqsave(&msg_queue->lock, flags); | 295 | spin_lock_irqsave(&msg_queue->lock, flags); |
295 | list_add(&msg->list, &msg_queue->head); | 296 | list_add_tail(&msg->list, &msg_queue->head); |
296 | spin_unlock_irqrestore(&msg_queue->lock, flags); | 297 | spin_unlock_irqrestore(&msg_queue->lock, flags); |
297 | platform_send_ipi_cpu(cpu); | 298 | platform_send_ipi_cpu(cpu); |
298 | 299 | ||
@@ -320,7 +321,7 @@ void smp_send_stop(void) | |||
320 | for_each_cpu_mask(cpu, callmap) { | 321 | for_each_cpu_mask(cpu, callmap) { |
321 | msg_queue = &per_cpu(ipi_msg_queue, cpu); | 322 | msg_queue = &per_cpu(ipi_msg_queue, cpu); |
322 | spin_lock_irqsave(&msg_queue->lock, flags); | 323 | spin_lock_irqsave(&msg_queue->lock, flags); |
323 | list_add(&msg->list, &msg_queue->head); | 324 | list_add_tail(&msg->list, &msg_queue->head); |
324 | spin_unlock_irqrestore(&msg_queue->lock, flags); | 325 | spin_unlock_irqrestore(&msg_queue->lock, flags); |
325 | platform_send_ipi_cpu(cpu); | 326 | platform_send_ipi_cpu(cpu); |
326 | } | 327 | } |
@@ -468,6 +469,17 @@ void smp_icache_flush_range_others(unsigned long start, unsigned long end) | |||
468 | } | 469 | } |
469 | EXPORT_SYMBOL_GPL(smp_icache_flush_range_others); | 470 | EXPORT_SYMBOL_GPL(smp_icache_flush_range_others); |
470 | 471 | ||
472 | #ifdef __ARCH_SYNC_CORE_ICACHE | ||
473 | void resync_core_icache(void) | ||
474 | { | ||
475 | unsigned int cpu = get_cpu(); | ||
476 | blackfin_invalidate_entire_icache(); | ||
477 | ++per_cpu(cpu_data, cpu).icache_invld_count; | ||
478 | put_cpu(); | ||
479 | } | ||
480 | EXPORT_SYMBOL(resync_core_icache); | ||
481 | #endif | ||
482 | |||
471 | #ifdef __ARCH_SYNC_CORE_DCACHE | 483 | #ifdef __ARCH_SYNC_CORE_DCACHE |
472 | unsigned long barrier_mask __attribute__ ((__section__(".l2.bss"))); | 484 | unsigned long barrier_mask __attribute__ ((__section__(".l2.bss"))); |
473 | 485 | ||
diff --git a/arch/ia64/mm/extable.c b/arch/ia64/mm/extable.c index e95d5ad9285d..c99a41e29fe8 100644 --- a/arch/ia64/mm/extable.c +++ b/arch/ia64/mm/extable.c | |||
@@ -8,7 +8,7 @@ | |||
8 | #include <linux/sort.h> | 8 | #include <linux/sort.h> |
9 | 9 | ||
10 | #include <asm/uaccess.h> | 10 | #include <asm/uaccess.h> |
11 | #include <asm/module.h> | 11 | #include <linux/module.h> |
12 | 12 | ||
13 | static int cmp_ex(const void *a, const void *b) | 13 | static int cmp_ex(const void *a, const void *b) |
14 | { | 14 | { |
@@ -55,7 +55,7 @@ void sort_extable (struct exception_table_entry *start, | |||
55 | 55 | ||
56 | static inline unsigned long ex_to_addr(const struct exception_table_entry *x) | 56 | static inline unsigned long ex_to_addr(const struct exception_table_entry *x) |
57 | { | 57 | { |
58 | return (unsigned long)&x->insn + x->insn; | 58 | return (unsigned long)&x->addr + x->addr; |
59 | } | 59 | } |
60 | 60 | ||
61 | #ifdef CONFIG_MODULES | 61 | #ifdef CONFIG_MODULES |
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig index 783da855a2e3..d6d35b2e5fe8 100644 --- a/arch/mips/configs/bigsur_defconfig +++ b/arch/mips/configs/bigsur_defconfig | |||
@@ -963,7 +963,7 @@ CONFIG_EEPROM_LEGACY=y | |||
963 | CONFIG_SENSORS_PCF8574=y | 963 | CONFIG_SENSORS_PCF8574=y |
964 | # CONFIG_PCF8575 is not set | 964 | # CONFIG_PCF8575 is not set |
965 | CONFIG_SENSORS_PCF8591=y | 965 | CONFIG_SENSORS_PCF8591=y |
966 | CONFIG_SENSORS_MAX6875=y | 966 | CONFIG_EEPROM_MAX6875=y |
967 | # CONFIG_SENSORS_TSL2550 is not set | 967 | # CONFIG_SENSORS_TSL2550 is not set |
968 | CONFIG_I2C_DEBUG_CORE=y | 968 | CONFIG_I2C_DEBUG_CORE=y |
969 | CONFIG_I2C_DEBUG_ALGO=y | 969 | CONFIG_I2C_DEBUG_ALGO=y |
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index 8426d3b9501c..fadb351d249b 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig | |||
@@ -1849,7 +1849,7 @@ CONFIG_EEPROM_LEGACY=m | |||
1849 | CONFIG_SENSORS_PCF8574=m | 1849 | CONFIG_SENSORS_PCF8574=m |
1850 | CONFIG_SENSORS_PCA9539=m | 1850 | CONFIG_SENSORS_PCA9539=m |
1851 | CONFIG_SENSORS_PCF8591=m | 1851 | CONFIG_SENSORS_PCF8591=m |
1852 | CONFIG_SENSORS_MAX6875=m | 1852 | CONFIG_EEPROM_MAX6875=m |
1853 | # CONFIG_SENSORS_TSL2550 is not set | 1853 | # CONFIG_SENSORS_TSL2550 is not set |
1854 | # CONFIG_I2C_DEBUG_CORE is not set | 1854 | # CONFIG_I2C_DEBUG_CORE is not set |
1855 | # CONFIG_I2C_DEBUG_ALGO is not set | 1855 | # CONFIG_I2C_DEBUG_ALGO is not set |
diff --git a/arch/mips/include/asm/errno.h b/arch/mips/include/asm/errno.h index 3c0d840e4577..a0efc73819e4 100644 --- a/arch/mips/include/asm/errno.h +++ b/arch/mips/include/asm/errno.h | |||
@@ -119,6 +119,8 @@ | |||
119 | #define EOWNERDEAD 165 /* Owner died */ | 119 | #define EOWNERDEAD 165 /* Owner died */ |
120 | #define ENOTRECOVERABLE 166 /* State not recoverable */ | 120 | #define ENOTRECOVERABLE 166 /* State not recoverable */ |
121 | 121 | ||
122 | #define ERFKILL 167 /* Operation not possible due to RF-kill */ | ||
123 | |||
122 | #define EDQUOT 1133 /* Quota exceeded */ | 124 | #define EDQUOT 1133 /* Quota exceeded */ |
123 | 125 | ||
124 | #ifdef __KERNEL__ | 126 | #ifdef __KERNEL__ |
diff --git a/arch/parisc/include/asm/errno.h b/arch/parisc/include/asm/errno.h index e2f3ddc796be..9992abdd782d 100644 --- a/arch/parisc/include/asm/errno.h +++ b/arch/parisc/include/asm/errno.h | |||
@@ -120,5 +120,6 @@ | |||
120 | #define EOWNERDEAD 254 /* Owner died */ | 120 | #define EOWNERDEAD 254 /* Owner died */ |
121 | #define ENOTRECOVERABLE 255 /* State not recoverable */ | 121 | #define ENOTRECOVERABLE 255 /* State not recoverable */ |
122 | 122 | ||
123 | #define ERFKILL 256 /* Operation not possible due to RF-kill */ | ||
123 | 124 | ||
124 | #endif | 125 | #endif |
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 7d044dfd9236..12dc7c409616 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig | |||
@@ -1808,7 +1808,7 @@ CONFIG_PCF8575=m | |||
1808 | CONFIG_SENSORS_PCA9539=m | 1808 | CONFIG_SENSORS_PCA9539=m |
1809 | CONFIG_SENSORS_PCF8591=m | 1809 | CONFIG_SENSORS_PCF8591=m |
1810 | # CONFIG_TPS65010 is not set | 1810 | # CONFIG_TPS65010 is not set |
1811 | CONFIG_SENSORS_MAX6875=m | 1811 | CONFIG_EEPROM_MAX6875=m |
1812 | CONFIG_SENSORS_TSL2550=m | 1812 | CONFIG_SENSORS_TSL2550=m |
1813 | CONFIG_MCU_MPC8349EMITX=m | 1813 | CONFIG_MCU_MPC8349EMITX=m |
1814 | # CONFIG_I2C_DEBUG_CORE is not set | 1814 | # CONFIG_I2C_DEBUG_CORE is not set |
diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h index e0faf332c9c9..157c5ca581c8 100644 --- a/arch/powerpc/include/asm/qe.h +++ b/arch/powerpc/include/asm/qe.h | |||
@@ -675,6 +675,8 @@ struct ucc_slow_pram { | |||
675 | #define UCC_GETH_UPSMR_RMM 0x00001000 | 675 | #define UCC_GETH_UPSMR_RMM 0x00001000 |
676 | #define UCC_GETH_UPSMR_CAM 0x00000400 | 676 | #define UCC_GETH_UPSMR_CAM 0x00000400 |
677 | #define UCC_GETH_UPSMR_BRO 0x00000200 | 677 | #define UCC_GETH_UPSMR_BRO 0x00000200 |
678 | #define UCC_GETH_UPSMR_SMM 0x00000080 | ||
679 | #define UCC_GETH_UPSMR_SGMM 0x00000020 | ||
678 | 680 | ||
679 | /* UCC Transmit On Demand Register (UTODR) */ | 681 | /* UCC Transmit On Demand Register (UTODR) */ |
680 | #define UCC_SLOW_TOD 0x8000 | 682 | #define UCC_SLOW_TOD 0x8000 |
diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c index 0eb6d7f62241..51fcae41f08a 100644 --- a/arch/powerpc/platforms/82xx/ep8248e.c +++ b/arch/powerpc/platforms/82xx/ep8248e.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | #include <linux/fsl_devices.h> | 15 | #include <linux/fsl_devices.h> |
16 | #include <linux/mdio-bitbang.h> | 16 | #include <linux/mdio-bitbang.h> |
17 | #include <linux/of_mdio.h> | ||
17 | #include <linux/of_platform.h> | 18 | #include <linux/of_platform.h> |
18 | 19 | ||
19 | #include <asm/io.h> | 20 | #include <asm/io.h> |
@@ -115,7 +116,7 @@ static int __devinit ep8248e_mdio_probe(struct of_device *ofdev, | |||
115 | struct mii_bus *bus; | 116 | struct mii_bus *bus; |
116 | struct resource res; | 117 | struct resource res; |
117 | struct device_node *node; | 118 | struct device_node *node; |
118 | int ret, i; | 119 | int ret; |
119 | 120 | ||
120 | node = of_get_parent(ofdev->node); | 121 | node = of_get_parent(ofdev->node); |
121 | of_node_put(node); | 122 | of_node_put(node); |
@@ -130,17 +131,13 @@ static int __devinit ep8248e_mdio_probe(struct of_device *ofdev, | |||
130 | if (!bus) | 131 | if (!bus) |
131 | return -ENOMEM; | 132 | return -ENOMEM; |
132 | 133 | ||
133 | bus->phy_mask = 0; | ||
134 | bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); | 134 | bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); |
135 | 135 | ||
136 | for (i = 0; i < PHY_MAX_ADDR; i++) | ||
137 | bus->irq[i] = -1; | ||
138 | |||
139 | bus->name = "ep8248e-mdio-bitbang"; | 136 | bus->name = "ep8248e-mdio-bitbang"; |
140 | bus->parent = &ofdev->dev; | 137 | bus->parent = &ofdev->dev; |
141 | snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); | 138 | snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); |
142 | 139 | ||
143 | return mdiobus_register(bus); | 140 | return of_mdiobus_register(bus, ofdev->node); |
144 | } | 141 | } |
145 | 142 | ||
146 | static int ep8248e_mdio_remove(struct of_device *ofdev) | 143 | static int ep8248e_mdio_remove(struct of_device *ofdev) |
diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c index 75cc165d5bee..3bf546797cbb 100644 --- a/arch/powerpc/platforms/pasemi/gpio_mdio.c +++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #include <linux/ioport.h> | 29 | #include <linux/ioport.h> |
30 | #include <linux/interrupt.h> | 30 | #include <linux/interrupt.h> |
31 | #include <linux/phy.h> | 31 | #include <linux/phy.h> |
32 | #include <linux/platform_device.h> | 32 | #include <linux/of_mdio.h> |
33 | #include <linux/of_platform.h> | 33 | #include <linux/of_platform.h> |
34 | 34 | ||
35 | #define DELAY 1 | 35 | #define DELAY 1 |
@@ -39,6 +39,7 @@ static void __iomem *gpio_regs; | |||
39 | struct gpio_priv { | 39 | struct gpio_priv { |
40 | int mdc_pin; | 40 | int mdc_pin; |
41 | int mdio_pin; | 41 | int mdio_pin; |
42 | int mdio_irqs[PHY_MAX_ADDR]; | ||
42 | }; | 43 | }; |
43 | 44 | ||
44 | #define MDC_PIN(bus) (((struct gpio_priv *)bus->priv)->mdc_pin) | 45 | #define MDC_PIN(bus) (((struct gpio_priv *)bus->priv)->mdc_pin) |
@@ -218,12 +219,11 @@ static int __devinit gpio_mdio_probe(struct of_device *ofdev, | |||
218 | const struct of_device_id *match) | 219 | const struct of_device_id *match) |
219 | { | 220 | { |
220 | struct device *dev = &ofdev->dev; | 221 | struct device *dev = &ofdev->dev; |
221 | struct device_node *phy_dn, *np = ofdev->node; | 222 | struct device_node *np = ofdev->node; |
222 | struct mii_bus *new_bus; | 223 | struct mii_bus *new_bus; |
223 | struct gpio_priv *priv; | 224 | struct gpio_priv *priv; |
224 | const unsigned int *prop; | 225 | const unsigned int *prop; |
225 | int err; | 226 | int err; |
226 | int i; | ||
227 | 227 | ||
228 | err = -ENOMEM; | 228 | err = -ENOMEM; |
229 | priv = kzalloc(sizeof(struct gpio_priv), GFP_KERNEL); | 229 | priv = kzalloc(sizeof(struct gpio_priv), GFP_KERNEL); |
@@ -244,27 +244,7 @@ static int __devinit gpio_mdio_probe(struct of_device *ofdev, | |||
244 | snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", *prop); | 244 | snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", *prop); |
245 | new_bus->priv = priv; | 245 | new_bus->priv = priv; |
246 | 246 | ||
247 | new_bus->phy_mask = 0; | 247 | new_bus->irq = priv->mdio_irqs; |
248 | |||
249 | new_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); | ||
250 | |||
251 | if (!new_bus->irq) | ||
252 | goto out_free_bus; | ||
253 | |||
254 | for (i = 0; i < PHY_MAX_ADDR; i++) | ||
255 | new_bus->irq[i] = NO_IRQ; | ||
256 | |||
257 | for (phy_dn = of_get_next_child(np, NULL); | ||
258 | phy_dn != NULL; | ||
259 | phy_dn = of_get_next_child(np, phy_dn)) { | ||
260 | const unsigned int *ip, *regp; | ||
261 | |||
262 | ip = of_get_property(phy_dn, "interrupts", NULL); | ||
263 | regp = of_get_property(phy_dn, "reg", NULL); | ||
264 | if (!ip || !regp || *regp >= PHY_MAX_ADDR) | ||
265 | continue; | ||
266 | new_bus->irq[*regp] = irq_create_mapping(NULL, *ip); | ||
267 | } | ||
268 | 248 | ||
269 | prop = of_get_property(np, "mdc-pin", NULL); | 249 | prop = of_get_property(np, "mdc-pin", NULL); |
270 | priv->mdc_pin = *prop; | 250 | priv->mdc_pin = *prop; |
@@ -275,7 +255,7 @@ static int __devinit gpio_mdio_probe(struct of_device *ofdev, | |||
275 | new_bus->parent = dev; | 255 | new_bus->parent = dev; |
276 | dev_set_drvdata(dev, new_bus); | 256 | dev_set_drvdata(dev, new_bus); |
277 | 257 | ||
278 | err = mdiobus_register(new_bus); | 258 | err = of_mdiobus_register(new_bus, np); |
279 | 259 | ||
280 | if (err != 0) { | 260 | if (err != 0) { |
281 | printk(KERN_ERR "%s: Cannot register as MDIO bus, err %d\n", | 261 | printk(KERN_ERR "%s: Cannot register as MDIO bus, err %d\n", |
@@ -286,8 +266,6 @@ static int __devinit gpio_mdio_probe(struct of_device *ofdev, | |||
286 | return 0; | 266 | return 0; |
287 | 267 | ||
288 | out_free_irq: | 268 | out_free_irq: |
289 | kfree(new_bus->irq); | ||
290 | out_free_bus: | ||
291 | kfree(new_bus); | 269 | kfree(new_bus); |
292 | out_free_priv: | 270 | out_free_priv: |
293 | kfree(priv); | 271 | kfree(priv); |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 99dc3ded6b49..a14dba0e4d67 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -348,6 +348,9 @@ config ARCH_ENABLE_MEMORY_HOTPLUG | |||
348 | config ARCH_ENABLE_MEMORY_HOTREMOVE | 348 | config ARCH_ENABLE_MEMORY_HOTREMOVE |
349 | def_bool y | 349 | def_bool y |
350 | 350 | ||
351 | config ARCH_HIBERNATION_POSSIBLE | ||
352 | def_bool y if 64BIT | ||
353 | |||
351 | source "mm/Kconfig" | 354 | source "mm/Kconfig" |
352 | 355 | ||
353 | comment "I/O subsystem configuration" | 356 | comment "I/O subsystem configuration" |
@@ -592,6 +595,12 @@ config SECCOMP | |||
592 | 595 | ||
593 | endmenu | 596 | endmenu |
594 | 597 | ||
598 | menu "Power Management" | ||
599 | |||
600 | source "kernel/power/Kconfig" | ||
601 | |||
602 | endmenu | ||
603 | |||
595 | source "net/Kconfig" | 604 | source "net/Kconfig" |
596 | 605 | ||
597 | config PCMCIA | 606 | config PCMCIA |
diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 578c61f15a4b..0ff387cebf88 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile | |||
@@ -88,7 +88,9 @@ LDFLAGS_vmlinux := -e start | |||
88 | head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o | 88 | head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o |
89 | 89 | ||
90 | core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \ | 90 | core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \ |
91 | arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ | 91 | arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ \ |
92 | arch/s390/power/ | ||
93 | |||
92 | libs-y += arch/s390/lib/ | 94 | libs-y += arch/s390/lib/ |
93 | drivers-y += drivers/s390/ | 95 | drivers-y += drivers/s390/ |
94 | drivers-$(CONFIG_MATHEMU) += arch/s390/math-emu/ | 96 | drivers-$(CONFIG_MATHEMU) += arch/s390/math-emu/ |
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 1dfc7100c7ee..264528e4f58d 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * Exports appldata_register_ops() and appldata_unregister_ops() for the | 5 | * Exports appldata_register_ops() and appldata_unregister_ops() for the |
6 | * data gathering modules. | 6 | * data gathering modules. |
7 | * | 7 | * |
8 | * Copyright IBM Corp. 2003, 2008 | 8 | * Copyright IBM Corp. 2003, 2009 |
9 | * | 9 | * |
10 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> | 10 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> |
11 | */ | 11 | */ |
@@ -26,6 +26,8 @@ | |||
26 | #include <linux/notifier.h> | 26 | #include <linux/notifier.h> |
27 | #include <linux/cpu.h> | 27 | #include <linux/cpu.h> |
28 | #include <linux/workqueue.h> | 28 | #include <linux/workqueue.h> |
29 | #include <linux/suspend.h> | ||
30 | #include <linux/platform_device.h> | ||
29 | #include <asm/appldata.h> | 31 | #include <asm/appldata.h> |
30 | #include <asm/timer.h> | 32 | #include <asm/timer.h> |
31 | #include <asm/uaccess.h> | 33 | #include <asm/uaccess.h> |
@@ -41,6 +43,9 @@ | |||
41 | 43 | ||
42 | #define TOD_MICRO 0x01000 /* nr. of TOD clock units | 44 | #define TOD_MICRO 0x01000 /* nr. of TOD clock units |
43 | for 1 microsecond */ | 45 | for 1 microsecond */ |
46 | |||
47 | static struct platform_device *appldata_pdev; | ||
48 | |||
44 | /* | 49 | /* |
45 | * /proc entries (sysctl) | 50 | * /proc entries (sysctl) |
46 | */ | 51 | */ |
@@ -86,6 +91,7 @@ static atomic_t appldata_expire_count = ATOMIC_INIT(0); | |||
86 | static DEFINE_SPINLOCK(appldata_timer_lock); | 91 | static DEFINE_SPINLOCK(appldata_timer_lock); |
87 | static int appldata_interval = APPLDATA_CPU_INTERVAL; | 92 | static int appldata_interval = APPLDATA_CPU_INTERVAL; |
88 | static int appldata_timer_active; | 93 | static int appldata_timer_active; |
94 | static int appldata_timer_suspended = 0; | ||
89 | 95 | ||
90 | /* | 96 | /* |
91 | * Work queue | 97 | * Work queue |
@@ -475,6 +481,93 @@ void appldata_unregister_ops(struct appldata_ops *ops) | |||
475 | /********************** module-ops management <END> **************************/ | 481 | /********************** module-ops management <END> **************************/ |
476 | 482 | ||
477 | 483 | ||
484 | /**************************** suspend / resume *******************************/ | ||
485 | static int appldata_freeze(struct device *dev) | ||
486 | { | ||
487 | struct appldata_ops *ops; | ||
488 | int rc; | ||
489 | struct list_head *lh; | ||
490 | |||
491 | get_online_cpus(); | ||
492 | spin_lock(&appldata_timer_lock); | ||
493 | if (appldata_timer_active) { | ||
494 | __appldata_vtimer_setup(APPLDATA_DEL_TIMER); | ||
495 | appldata_timer_suspended = 1; | ||
496 | } | ||
497 | spin_unlock(&appldata_timer_lock); | ||
498 | put_online_cpus(); | ||
499 | |||
500 | mutex_lock(&appldata_ops_mutex); | ||
501 | list_for_each(lh, &appldata_ops_list) { | ||
502 | ops = list_entry(lh, struct appldata_ops, list); | ||
503 | if (ops->active == 1) { | ||
504 | rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, | ||
505 | (unsigned long) ops->data, ops->size, | ||
506 | ops->mod_lvl); | ||
507 | if (rc != 0) | ||
508 | pr_err("Stopping the data collection for %s " | ||
509 | "failed with rc=%d\n", ops->name, rc); | ||
510 | } | ||
511 | } | ||
512 | mutex_unlock(&appldata_ops_mutex); | ||
513 | return 0; | ||
514 | } | ||
515 | |||
516 | static int appldata_restore(struct device *dev) | ||
517 | { | ||
518 | struct appldata_ops *ops; | ||
519 | int rc; | ||
520 | struct list_head *lh; | ||
521 | |||
522 | get_online_cpus(); | ||
523 | spin_lock(&appldata_timer_lock); | ||
524 | if (appldata_timer_suspended) { | ||
525 | __appldata_vtimer_setup(APPLDATA_ADD_TIMER); | ||
526 | appldata_timer_suspended = 0; | ||
527 | } | ||
528 | spin_unlock(&appldata_timer_lock); | ||
529 | put_online_cpus(); | ||
530 | |||
531 | mutex_lock(&appldata_ops_mutex); | ||
532 | list_for_each(lh, &appldata_ops_list) { | ||
533 | ops = list_entry(lh, struct appldata_ops, list); | ||
534 | if (ops->active == 1) { | ||
535 | ops->callback(ops->data); // init record | ||
536 | rc = appldata_diag(ops->record_nr, | ||
537 | APPLDATA_START_INTERVAL_REC, | ||
538 | (unsigned long) ops->data, ops->size, | ||
539 | ops->mod_lvl); | ||
540 | if (rc != 0) { | ||
541 | pr_err("Starting the data collection for %s " | ||
542 | "failed with rc=%d\n", ops->name, rc); | ||
543 | } | ||
544 | } | ||
545 | } | ||
546 | mutex_unlock(&appldata_ops_mutex); | ||
547 | return 0; | ||
548 | } | ||
549 | |||
550 | static int appldata_thaw(struct device *dev) | ||
551 | { | ||
552 | return appldata_restore(dev); | ||
553 | } | ||
554 | |||
555 | static struct dev_pm_ops appldata_pm_ops = { | ||
556 | .freeze = appldata_freeze, | ||
557 | .thaw = appldata_thaw, | ||
558 | .restore = appldata_restore, | ||
559 | }; | ||
560 | |||
561 | static struct platform_driver appldata_pdrv = { | ||
562 | .driver = { | ||
563 | .name = "appldata", | ||
564 | .owner = THIS_MODULE, | ||
565 | .pm = &appldata_pm_ops, | ||
566 | }, | ||
567 | }; | ||
568 | /************************* suspend / resume <END> ****************************/ | ||
569 | |||
570 | |||
478 | /******************************* init / exit *********************************/ | 571 | /******************************* init / exit *********************************/ |
479 | 572 | ||
480 | static void __cpuinit appldata_online_cpu(int cpu) | 573 | static void __cpuinit appldata_online_cpu(int cpu) |
@@ -531,11 +624,23 @@ static struct notifier_block __cpuinitdata appldata_nb = { | |||
531 | */ | 624 | */ |
532 | static int __init appldata_init(void) | 625 | static int __init appldata_init(void) |
533 | { | 626 | { |
534 | int i; | 627 | int i, rc; |
628 | |||
629 | rc = platform_driver_register(&appldata_pdrv); | ||
630 | if (rc) | ||
631 | return rc; | ||
535 | 632 | ||
633 | appldata_pdev = platform_device_register_simple("appldata", -1, NULL, | ||
634 | 0); | ||
635 | if (IS_ERR(appldata_pdev)) { | ||
636 | rc = PTR_ERR(appldata_pdev); | ||
637 | goto out_driver; | ||
638 | } | ||
536 | appldata_wq = create_singlethread_workqueue("appldata"); | 639 | appldata_wq = create_singlethread_workqueue("appldata"); |
537 | if (!appldata_wq) | 640 | if (!appldata_wq) { |
538 | return -ENOMEM; | 641 | rc = -ENOMEM; |
642 | goto out_device; | ||
643 | } | ||
539 | 644 | ||
540 | get_online_cpus(); | 645 | get_online_cpus(); |
541 | for_each_online_cpu(i) | 646 | for_each_online_cpu(i) |
@@ -547,6 +652,12 @@ static int __init appldata_init(void) | |||
547 | 652 | ||
548 | appldata_sysctl_header = register_sysctl_table(appldata_dir_table); | 653 | appldata_sysctl_header = register_sysctl_table(appldata_dir_table); |
549 | return 0; | 654 | return 0; |
655 | |||
656 | out_device: | ||
657 | platform_device_unregister(appldata_pdev); | ||
658 | out_driver: | ||
659 | platform_driver_unregister(&appldata_pdrv); | ||
660 | return rc; | ||
550 | } | 661 | } |
551 | 662 | ||
552 | __initcall(appldata_init); | 663 | __initcall(appldata_init); |
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index ba007d8df941..2a5419551176 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h | |||
@@ -1,11 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-s390/ccwdev.h | 2 | * Copyright IBM Corp. 2002, 2009 |
3 | * include/asm-s390x/ccwdev.h | ||
4 | * | 3 | * |
5 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation | 4 | * Author(s): Arnd Bergmann <arndb@de.ibm.com> |
6 | * Author(s): Arnd Bergmann <arndb@de.ibm.com> | ||
7 | * | 5 | * |
8 | * Interface for CCW device drivers | 6 | * Interface for CCW device drivers |
9 | */ | 7 | */ |
10 | #ifndef _S390_CCWDEV_H_ | 8 | #ifndef _S390_CCWDEV_H_ |
11 | #define _S390_CCWDEV_H_ | 9 | #define _S390_CCWDEV_H_ |
@@ -104,6 +102,11 @@ struct ccw_device { | |||
104 | * @set_offline: called when setting device offline | 102 | * @set_offline: called when setting device offline |
105 | * @notify: notify driver of device state changes | 103 | * @notify: notify driver of device state changes |
106 | * @shutdown: called at device shutdown | 104 | * @shutdown: called at device shutdown |
105 | * @prepare: prepare for pm state transition | ||
106 | * @complete: undo work done in @prepare | ||
107 | * @freeze: callback for freezing during hibernation snapshotting | ||
108 | * @thaw: undo work done in @freeze | ||
109 | * @restore: callback for restoring after hibernation | ||
107 | * @driver: embedded device driver structure | 110 | * @driver: embedded device driver structure |
108 | * @name: device driver name | 111 | * @name: device driver name |
109 | */ | 112 | */ |
@@ -116,6 +119,11 @@ struct ccw_driver { | |||
116 | int (*set_offline) (struct ccw_device *); | 119 | int (*set_offline) (struct ccw_device *); |
117 | int (*notify) (struct ccw_device *, int); | 120 | int (*notify) (struct ccw_device *, int); |
118 | void (*shutdown) (struct ccw_device *); | 121 | void (*shutdown) (struct ccw_device *); |
122 | int (*prepare) (struct ccw_device *); | ||
123 | void (*complete) (struct ccw_device *); | ||
124 | int (*freeze)(struct ccw_device *); | ||
125 | int (*thaw) (struct ccw_device *); | ||
126 | int (*restore)(struct ccw_device *); | ||
119 | struct device_driver driver; | 127 | struct device_driver driver; |
120 | char *name; | 128 | char *name; |
121 | }; | 129 | }; |
@@ -184,6 +192,7 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *); | |||
184 | #define to_ccwdrv(n) container_of(n, struct ccw_driver, driver) | 192 | #define to_ccwdrv(n) container_of(n, struct ccw_driver, driver) |
185 | 193 | ||
186 | extern struct ccw_device *ccw_device_probe_console(void); | 194 | extern struct ccw_device *ccw_device_probe_console(void); |
195 | extern int ccw_device_force_console(void); | ||
187 | 196 | ||
188 | // FIXME: these have to go | 197 | // FIXME: these have to go |
189 | extern int _ccw_device_get_subchannel_number(struct ccw_device *); | 198 | extern int _ccw_device_get_subchannel_number(struct ccw_device *); |
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h index a27f68985a79..c79c1e787b86 100644 --- a/arch/s390/include/asm/ccwgroup.h +++ b/arch/s390/include/asm/ccwgroup.h | |||
@@ -38,6 +38,11 @@ struct ccwgroup_device { | |||
38 | * @set_online: function called when device is set online | 38 | * @set_online: function called when device is set online |
39 | * @set_offline: function called when device is set offline | 39 | * @set_offline: function called when device is set offline |
40 | * @shutdown: function called when device is shut down | 40 | * @shutdown: function called when device is shut down |
41 | * @prepare: prepare for pm state transition | ||
42 | * @complete: undo work done in @prepare | ||
43 | * @freeze: callback for freezing during hibernation snapshotting | ||
44 | * @thaw: undo work done in @freeze | ||
45 | * @restore: callback for restoring after hibernation | ||
41 | * @driver: embedded driver structure | 46 | * @driver: embedded driver structure |
42 | */ | 47 | */ |
43 | struct ccwgroup_driver { | 48 | struct ccwgroup_driver { |
@@ -51,6 +56,11 @@ struct ccwgroup_driver { | |||
51 | int (*set_online) (struct ccwgroup_device *); | 56 | int (*set_online) (struct ccwgroup_device *); |
52 | int (*set_offline) (struct ccwgroup_device *); | 57 | int (*set_offline) (struct ccwgroup_device *); |
53 | void (*shutdown)(struct ccwgroup_device *); | 58 | void (*shutdown)(struct ccwgroup_device *); |
59 | int (*prepare) (struct ccwgroup_device *); | ||
60 | void (*complete) (struct ccwgroup_device *); | ||
61 | int (*freeze)(struct ccwgroup_device *); | ||
62 | int (*thaw) (struct ccwgroup_device *); | ||
63 | int (*restore)(struct ccwgroup_device *); | ||
54 | 64 | ||
55 | struct device_driver driver; | 65 | struct device_driver driver; |
56 | }; | 66 | }; |
diff --git a/arch/s390/include/asm/suspend.h b/arch/s390/include/asm/suspend.h new file mode 100644 index 000000000000..dc75c616eafe --- /dev/null +++ b/arch/s390/include/asm/suspend.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #ifndef __ASM_S390_SUSPEND_H | ||
2 | #define __ASM_S390_SUSPEND_H | ||
3 | |||
4 | static inline int arch_prepare_suspend(void) | ||
5 | { | ||
6 | return 0; | ||
7 | } | ||
8 | |||
9 | #endif | ||
10 | |||
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index 3a8b26eb1f2e..4fb83c1cdb77 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h | |||
@@ -1,11 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-s390/system.h | 2 | * Copyright IBM Corp. 1999, 2009 |
3 | * | 3 | * |
4 | * S390 version | 4 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> |
5 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
7 | * | ||
8 | * Derived from "include/asm-i386/system.h" | ||
9 | */ | 5 | */ |
10 | 6 | ||
11 | #ifndef __ASM_SYSTEM_H | 7 | #ifndef __ASM_SYSTEM_H |
@@ -469,6 +465,20 @@ extern psw_t sysc_restore_trace_psw; | |||
469 | extern psw_t io_restore_trace_psw; | 465 | extern psw_t io_restore_trace_psw; |
470 | #endif | 466 | #endif |
471 | 467 | ||
468 | static inline int tprot(unsigned long addr) | ||
469 | { | ||
470 | int rc = -EFAULT; | ||
471 | |||
472 | asm volatile( | ||
473 | " tprot 0(%1),0\n" | ||
474 | "0: ipm %0\n" | ||
475 | " srl %0,28\n" | ||
476 | "1:\n" | ||
477 | EX_TABLE(0b,1b) | ||
478 | : "+d" (rc) : "a" (addr) : "cc"); | ||
479 | return rc; | ||
480 | } | ||
481 | |||
472 | #endif /* __KERNEL__ */ | 482 | #endif /* __KERNEL__ */ |
473 | 483 | ||
474 | #endif | 484 | #endif |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index fb263736826c..f9b144049dc9 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * arch/s390/kernel/early.c | 2 | * arch/s390/kernel/early.c |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2007 | 4 | * Copyright IBM Corp. 2007, 2009 |
5 | * Author(s): Hongjie Yang <hongjie@us.ibm.com>, | 5 | * Author(s): Hongjie Yang <hongjie@us.ibm.com>, |
6 | * Heiko Carstens <heiko.carstens@de.ibm.com> | 6 | * Heiko Carstens <heiko.carstens@de.ibm.com> |
7 | */ | 7 | */ |
@@ -210,7 +210,7 @@ static noinline __init void detect_machine_type(void) | |||
210 | machine_flags |= MACHINE_FLAG_VM; | 210 | machine_flags |= MACHINE_FLAG_VM; |
211 | } | 211 | } |
212 | 212 | ||
213 | static __init void early_pgm_check_handler(void) | 213 | static void early_pgm_check_handler(void) |
214 | { | 214 | { |
215 | unsigned long addr; | 215 | unsigned long addr; |
216 | const struct exception_table_entry *fixup; | 216 | const struct exception_table_entry *fixup; |
@@ -222,7 +222,7 @@ static __init void early_pgm_check_handler(void) | |||
222 | S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE; | 222 | S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE; |
223 | } | 223 | } |
224 | 224 | ||
225 | static noinline __init void setup_lowcore_early(void) | 225 | void setup_lowcore_early(void) |
226 | { | 226 | { |
227 | psw_t psw; | 227 | psw_t psw; |
228 | 228 | ||
diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c index 9872999c66d1..559af0d07878 100644 --- a/arch/s390/kernel/mem_detect.c +++ b/arch/s390/kernel/mem_detect.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright IBM Corp. 2008 | 2 | * Copyright IBM Corp. 2008, 2009 |
3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | 3 | * |
4 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | ||
4 | */ | 5 | */ |
5 | 6 | ||
6 | #include <linux/kernel.h> | 7 | #include <linux/kernel.h> |
@@ -9,20 +10,6 @@ | |||
9 | #include <asm/sclp.h> | 10 | #include <asm/sclp.h> |
10 | #include <asm/setup.h> | 11 | #include <asm/setup.h> |
11 | 12 | ||
12 | static inline int tprot(unsigned long addr) | ||
13 | { | ||
14 | int rc = -EFAULT; | ||
15 | |||
16 | asm volatile( | ||
17 | " tprot 0(%1),0\n" | ||
18 | "0: ipm %0\n" | ||
19 | " srl %0,28\n" | ||
20 | "1:\n" | ||
21 | EX_TABLE(0b,1b) | ||
22 | : "+d" (rc) : "a" (addr) : "cc"); | ||
23 | return rc; | ||
24 | } | ||
25 | |||
26 | #define ADDR2G (1ULL << 31) | 13 | #define ADDR2G (1ULL << 31) |
27 | 14 | ||
28 | static void find_memory_chunks(struct mem_chunk chunk[]) | 15 | static void find_memory_chunks(struct mem_chunk chunk[]) |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index cc8c484984e3..fd8e3111a4e8 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * arch/s390/kernel/smp.c | 2 | * arch/s390/kernel/smp.c |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 1999,2007 | 4 | * Copyright IBM Corp. 1999, 2009 |
5 | * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | 5 | * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), |
6 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | 6 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
7 | * Heiko Carstens (heiko.carstens@de.ibm.com) | 7 | * Heiko Carstens (heiko.carstens@de.ibm.com) |
@@ -1031,6 +1031,42 @@ out: | |||
1031 | static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show, | 1031 | static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show, |
1032 | dispatching_store); | 1032 | dispatching_store); |
1033 | 1033 | ||
1034 | /* | ||
1035 | * If the resume kernel runs on another cpu than the suspended kernel, | ||
1036 | * we have to switch the cpu IDs in the logical map. | ||
1037 | */ | ||
1038 | void smp_switch_boot_cpu_in_resume(u32 resume_phys_cpu_id, | ||
1039 | struct _lowcore *suspend_lowcore) | ||
1040 | { | ||
1041 | int cpu, suspend_cpu_id, resume_cpu_id; | ||
1042 | u32 suspend_phys_cpu_id; | ||
1043 | |||
1044 | suspend_phys_cpu_id = __cpu_logical_map[suspend_lowcore->cpu_nr]; | ||
1045 | suspend_cpu_id = suspend_lowcore->cpu_nr; | ||
1046 | |||
1047 | for_each_present_cpu(cpu) { | ||
1048 | if (__cpu_logical_map[cpu] == resume_phys_cpu_id) { | ||
1049 | resume_cpu_id = cpu; | ||
1050 | goto found; | ||
1051 | } | ||
1052 | } | ||
1053 | panic("Could not find resume cpu in logical map.\n"); | ||
1054 | |||
1055 | found: | ||
1056 | printk("Resume cpu ID: %i/%i\n", resume_phys_cpu_id, resume_cpu_id); | ||
1057 | printk("Suspend cpu ID: %i/%i\n", suspend_phys_cpu_id, suspend_cpu_id); | ||
1058 | |||
1059 | __cpu_logical_map[resume_cpu_id] = suspend_phys_cpu_id; | ||
1060 | __cpu_logical_map[suspend_cpu_id] = resume_phys_cpu_id; | ||
1061 | |||
1062 | lowcore_ptr[suspend_cpu_id]->cpu_addr = resume_phys_cpu_id; | ||
1063 | } | ||
1064 | |||
1065 | u32 smp_get_phys_cpu_id(void) | ||
1066 | { | ||
1067 | return __cpu_logical_map[smp_processor_id()]; | ||
1068 | } | ||
1069 | |||
1034 | static int __init topology_init(void) | 1070 | static int __init topology_init(void) |
1035 | { | 1071 | { |
1036 | int cpu; | 1072 | int cpu; |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 4ca8e826bf30..565667207985 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -313,3 +313,22 @@ int s390_enable_sie(void) | |||
313 | return 0; | 313 | return 0; |
314 | } | 314 | } |
315 | EXPORT_SYMBOL_GPL(s390_enable_sie); | 315 | EXPORT_SYMBOL_GPL(s390_enable_sie); |
316 | |||
317 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
318 | #ifdef CONFIG_HIBERNATION | ||
319 | bool kernel_page_present(struct page *page) | ||
320 | { | ||
321 | unsigned long addr; | ||
322 | int cc; | ||
323 | |||
324 | addr = page_to_phys(page); | ||
325 | asm("lra %1,0(%1)\n" | ||
326 | "ipm %0\n" | ||
327 | "srl %0,28" | ||
328 | :"=d"(cc),"+a"(addr)::"cc"); | ||
329 | return cc == 0; | ||
330 | } | ||
331 | |||
332 | #endif /* CONFIG_HIBERNATION */ | ||
333 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | ||
334 | |||
diff --git a/arch/s390/power/Makefile b/arch/s390/power/Makefile new file mode 100644 index 000000000000..973bb45a8fec --- /dev/null +++ b/arch/s390/power/Makefile | |||
@@ -0,0 +1,8 @@ | |||
1 | # | ||
2 | # Makefile for s390 PM support | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_HIBERNATION) += suspend.o | ||
6 | obj-$(CONFIG_HIBERNATION) += swsusp.o | ||
7 | obj-$(CONFIG_HIBERNATION) += swsusp_64.o | ||
8 | obj-$(CONFIG_HIBERNATION) += swsusp_asm64.o | ||
diff --git a/arch/s390/power/suspend.c b/arch/s390/power/suspend.c new file mode 100644 index 000000000000..b3351eceebbe --- /dev/null +++ b/arch/s390/power/suspend.c | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * Suspend support specific for s390. | ||
3 | * | ||
4 | * Copyright IBM Corp. 2009 | ||
5 | * | ||
6 | * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/mm.h> | ||
10 | #include <linux/suspend.h> | ||
11 | #include <linux/reboot.h> | ||
12 | #include <linux/pfn.h> | ||
13 | #include <asm/sections.h> | ||
14 | #include <asm/ipl.h> | ||
15 | |||
16 | /* | ||
17 | * References to section boundaries | ||
18 | */ | ||
19 | extern const void __nosave_begin, __nosave_end; | ||
20 | |||
21 | /* | ||
22 | * check if given pfn is in the 'nosave' or in the read only NSS section | ||
23 | */ | ||
24 | int pfn_is_nosave(unsigned long pfn) | ||
25 | { | ||
26 | unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; | ||
27 | unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) | ||
28 | >> PAGE_SHIFT; | ||
29 | unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1; | ||
30 | unsigned long stext_pfn = PFN_DOWN(__pa(&_stext)); | ||
31 | |||
32 | if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn) | ||
33 | return 1; | ||
34 | if (pfn >= stext_pfn && pfn <= eshared_pfn) { | ||
35 | if (ipl_info.type == IPL_TYPE_NSS) | ||
36 | return 1; | ||
37 | } else if ((tprot(pfn * PAGE_SIZE) && pfn > 0)) | ||
38 | return 1; | ||
39 | return 0; | ||
40 | } | ||
diff --git a/arch/s390/power/swsusp.c b/arch/s390/power/swsusp.c new file mode 100644 index 000000000000..e6a4fe9f5f24 --- /dev/null +++ b/arch/s390/power/swsusp.c | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * Support for suspend and resume on s390 | ||
3 | * | ||
4 | * Copyright IBM Corp. 2009 | ||
5 | * | ||
6 | * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | |||
11 | /* | ||
12 | * save CPU registers before creating a hibernation image and before | ||
13 | * restoring the memory state from it | ||
14 | */ | ||
15 | void save_processor_state(void) | ||
16 | { | ||
17 | /* implentation contained in the | ||
18 | * swsusp_arch_suspend function | ||
19 | */ | ||
20 | } | ||
21 | |||
22 | /* | ||
23 | * restore the contents of CPU registers | ||
24 | */ | ||
25 | void restore_processor_state(void) | ||
26 | { | ||
27 | /* implentation contained in the | ||
28 | * swsusp_arch_resume function | ||
29 | */ | ||
30 | } | ||
diff --git a/arch/s390/power/swsusp_64.c b/arch/s390/power/swsusp_64.c new file mode 100644 index 000000000000..9516a517d72f --- /dev/null +++ b/arch/s390/power/swsusp_64.c | |||
@@ -0,0 +1,17 @@ | |||
1 | /* | ||
2 | * Support for suspend and resume on s390 | ||
3 | * | ||
4 | * Copyright IBM Corp. 2009 | ||
5 | * | ||
6 | * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #include <asm/system.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | |||
13 | void do_after_copyback(void) | ||
14 | { | ||
15 | mb(); | ||
16 | } | ||
17 | |||
diff --git a/arch/s390/power/swsusp_asm64.S b/arch/s390/power/swsusp_asm64.S new file mode 100644 index 000000000000..3c74e7d827c9 --- /dev/null +++ b/arch/s390/power/swsusp_asm64.S | |||
@@ -0,0 +1,199 @@ | |||
1 | /* | ||
2 | * S390 64-bit swsusp implementation | ||
3 | * | ||
4 | * Copyright IBM Corp. 2009 | ||
5 | * | ||
6 | * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> | ||
7 | * Michael Holzheu <holzheu@linux.vnet.ibm.com> | ||
8 | */ | ||
9 | |||
10 | #include <asm/page.h> | ||
11 | #include <asm/ptrace.h> | ||
12 | #include <asm/asm-offsets.h> | ||
13 | |||
14 | /* | ||
15 | * Save register context in absolute 0 lowcore and call swsusp_save() to | ||
16 | * create in-memory kernel image. The context is saved in the designated | ||
17 | * "store status" memory locations (see POP). | ||
18 | * We return from this function twice. The first time during the suspend to | ||
19 | * disk process. The second time via the swsusp_arch_resume() function | ||
20 | * (see below) in the resume process. | ||
21 | * This function runs with disabled interrupts. | ||
22 | */ | ||
23 | .section .text | ||
24 | .align 2 | ||
25 | .globl swsusp_arch_suspend | ||
26 | swsusp_arch_suspend: | ||
27 | stmg %r6,%r15,__SF_GPRS(%r15) | ||
28 | lgr %r1,%r15 | ||
29 | aghi %r15,-STACK_FRAME_OVERHEAD | ||
30 | stg %r1,__SF_BACKCHAIN(%r15) | ||
31 | |||
32 | /* Deactivate DAT */ | ||
33 | stnsm __SF_EMPTY(%r15),0xfb | ||
34 | |||
35 | /* Switch off lowcore protection */ | ||
36 | stctg %c0,%c0,__SF_EMPTY(%r15) | ||
37 | ni __SF_EMPTY+4(%r15),0xef | ||
38 | lctlg %c0,%c0,__SF_EMPTY(%r15) | ||
39 | |||
40 | /* Store prefix register on stack */ | ||
41 | stpx __SF_EMPTY(%r15) | ||
42 | |||
43 | /* Setup base register for lowcore (absolute 0) */ | ||
44 | llgf %r1,__SF_EMPTY(%r15) | ||
45 | |||
46 | /* Get pointer to save area */ | ||
47 | aghi %r1,0x1000 | ||
48 | |||
49 | /* Store registers */ | ||
50 | mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */ | ||
51 | stfpc 0x31c(%r1) /* store fpu control */ | ||
52 | std 0,0x200(%r1) /* store f0 */ | ||
53 | std 1,0x208(%r1) /* store f1 */ | ||
54 | std 2,0x210(%r1) /* store f2 */ | ||
55 | std 3,0x218(%r1) /* store f3 */ | ||
56 | std 4,0x220(%r1) /* store f4 */ | ||
57 | std 5,0x228(%r1) /* store f5 */ | ||
58 | std 6,0x230(%r1) /* store f6 */ | ||
59 | std 7,0x238(%r1) /* store f7 */ | ||
60 | std 8,0x240(%r1) /* store f8 */ | ||
61 | std 9,0x248(%r1) /* store f9 */ | ||
62 | std 10,0x250(%r1) /* store f10 */ | ||
63 | std 11,0x258(%r1) /* store f11 */ | ||
64 | std 12,0x260(%r1) /* store f12 */ | ||
65 | std 13,0x268(%r1) /* store f13 */ | ||
66 | std 14,0x270(%r1) /* store f14 */ | ||
67 | std 15,0x278(%r1) /* store f15 */ | ||
68 | stam %a0,%a15,0x340(%r1) /* store access registers */ | ||
69 | stctg %c0,%c15,0x380(%r1) /* store control registers */ | ||
70 | stmg %r0,%r15,0x280(%r1) /* store general registers */ | ||
71 | |||
72 | stpt 0x328(%r1) /* store timer */ | ||
73 | stckc 0x330(%r1) /* store clock comparator */ | ||
74 | |||
75 | /* Activate DAT */ | ||
76 | stosm __SF_EMPTY(%r15),0x04 | ||
77 | |||
78 | /* Set prefix page to zero */ | ||
79 | xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) | ||
80 | spx __SF_EMPTY(%r15) | ||
81 | |||
82 | /* Setup lowcore */ | ||
83 | brasl %r14,setup_lowcore_early | ||
84 | |||
85 | /* Save image */ | ||
86 | brasl %r14,swsusp_save | ||
87 | |||
88 | /* Switch on lowcore protection */ | ||
89 | stctg %c0,%c0,__SF_EMPTY(%r15) | ||
90 | oi __SF_EMPTY+4(%r15),0x10 | ||
91 | lctlg %c0,%c0,__SF_EMPTY(%r15) | ||
92 | |||
93 | /* Restore prefix register and return */ | ||
94 | lghi %r1,0x1000 | ||
95 | spx 0x318(%r1) | ||
96 | lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) | ||
97 | lghi %r2,0 | ||
98 | br %r14 | ||
99 | |||
100 | /* | ||
101 | * Restore saved memory image to correct place and restore register context. | ||
102 | * Then we return to the function that called swsusp_arch_suspend(). | ||
103 | * swsusp_arch_resume() runs with disabled interrupts. | ||
104 | */ | ||
105 | .globl swsusp_arch_resume | ||
106 | swsusp_arch_resume: | ||
107 | stmg %r6,%r15,__SF_GPRS(%r15) | ||
108 | lgr %r1,%r15 | ||
109 | aghi %r15,-STACK_FRAME_OVERHEAD | ||
110 | stg %r1,__SF_BACKCHAIN(%r15) | ||
111 | |||
112 | /* Save boot cpu number */ | ||
113 | brasl %r14,smp_get_phys_cpu_id | ||
114 | lgr %r10,%r2 | ||
115 | |||
116 | /* Deactivate DAT */ | ||
117 | stnsm __SF_EMPTY(%r15),0xfb | ||
118 | |||
119 | /* Switch off lowcore protection */ | ||
120 | stctg %c0,%c0,__SF_EMPTY(%r15) | ||
121 | ni __SF_EMPTY+4(%r15),0xef | ||
122 | lctlg %c0,%c0,__SF_EMPTY(%r15) | ||
123 | |||
124 | /* Set prefix page to zero */ | ||
125 | xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) | ||
126 | spx __SF_EMPTY(%r15) | ||
127 | |||
128 | /* Restore saved image */ | ||
129 | larl %r1,restore_pblist | ||
130 | lg %r1,0(%r1) | ||
131 | ltgr %r1,%r1 | ||
132 | jz 2f | ||
133 | 0: | ||
134 | lg %r2,8(%r1) | ||
135 | lg %r4,0(%r1) | ||
136 | lghi %r3,PAGE_SIZE | ||
137 | lghi %r5,PAGE_SIZE | ||
138 | 1: | ||
139 | mvcle %r2,%r4,0 | ||
140 | jo 1b | ||
141 | lg %r1,16(%r1) | ||
142 | ltgr %r1,%r1 | ||
143 | jnz 0b | ||
144 | 2: | ||
145 | ptlb /* flush tlb */ | ||
146 | |||
147 | /* Restore registers */ | ||
148 | lghi %r13,0x1000 /* %r1 = pointer to save arae */ | ||
149 | |||
150 | spt 0x328(%r13) /* reprogram timer */ | ||
151 | //sckc 0x330(%r13) /* set clock comparator */ | ||
152 | |||
153 | lctlg %c0,%c15,0x380(%r13) /* load control registers */ | ||
154 | lam %a0,%a15,0x340(%r13) /* load access registers */ | ||
155 | |||
156 | lfpc 0x31c(%r13) /* load fpu control */ | ||
157 | ld 0,0x200(%r13) /* load f0 */ | ||
158 | ld 1,0x208(%r13) /* load f1 */ | ||
159 | ld 2,0x210(%r13) /* load f2 */ | ||
160 | ld 3,0x218(%r13) /* load f3 */ | ||
161 | ld 4,0x220(%r13) /* load f4 */ | ||
162 | ld 5,0x228(%r13) /* load f5 */ | ||
163 | ld 6,0x230(%r13) /* load f6 */ | ||
164 | ld 7,0x238(%r13) /* load f7 */ | ||
165 | ld 8,0x240(%r13) /* load f8 */ | ||
166 | ld 9,0x248(%r13) /* load f9 */ | ||
167 | ld 10,0x250(%r13) /* load f10 */ | ||
168 | ld 11,0x258(%r13) /* load f11 */ | ||
169 | ld 12,0x260(%r13) /* load f12 */ | ||
170 | ld 13,0x268(%r13) /* load f13 */ | ||
171 | ld 14,0x270(%r13) /* load f14 */ | ||
172 | ld 15,0x278(%r13) /* load f15 */ | ||
173 | |||
174 | /* Load old stack */ | ||
175 | lg %r15,0x2f8(%r13) | ||
176 | |||
177 | /* Pointer to save arae */ | ||
178 | lghi %r13,0x1000 | ||
179 | |||
180 | /* Switch CPUs */ | ||
181 | lgr %r2,%r10 /* get cpu id */ | ||
182 | llgf %r3,0x318(%r13) | ||
183 | brasl %r14,smp_switch_boot_cpu_in_resume | ||
184 | |||
185 | /* Restore prefix register */ | ||
186 | spx 0x318(%r13) | ||
187 | |||
188 | /* Switch on lowcore protection */ | ||
189 | stctg %c0,%c0,__SF_EMPTY(%r15) | ||
190 | oi __SF_EMPTY+4(%r15),0x10 | ||
191 | lctlg %c0,%c0,__SF_EMPTY(%r15) | ||
192 | |||
193 | /* Activate DAT */ | ||
194 | stosm __SF_EMPTY(%r15),0x04 | ||
195 | |||
196 | /* Return 0 */ | ||
197 | lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) | ||
198 | lghi %r2,0 | ||
199 | br %r14 | ||
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index cc12cd48bbc5..3f8b6a92eabd 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -37,6 +37,8 @@ config SPARC64 | |||
37 | select HAVE_KPROBES | 37 | select HAVE_KPROBES |
38 | select HAVE_LMB | 38 | select HAVE_LMB |
39 | select HAVE_SYSCALL_WRAPPERS | 39 | select HAVE_SYSCALL_WRAPPERS |
40 | select HAVE_DYNAMIC_FTRACE | ||
41 | select HAVE_FTRACE_MCOUNT_RECORD | ||
40 | select USE_GENERIC_SMP_HELPERS if SMP | 42 | select USE_GENERIC_SMP_HELPERS if SMP |
41 | select RTC_DRV_CMOS | 43 | select RTC_DRV_CMOS |
42 | select RTC_DRV_BQ4802 | 44 | select RTC_DRV_BQ4802 |
@@ -93,6 +95,9 @@ config AUDIT_ARCH | |||
93 | config HAVE_SETUP_PER_CPU_AREA | 95 | config HAVE_SETUP_PER_CPU_AREA |
94 | def_bool y if SPARC64 | 96 | def_bool y if SPARC64 |
95 | 97 | ||
98 | config HAVE_DYNAMIC_PER_CPU_AREA | ||
99 | def_bool y if SPARC64 | ||
100 | |||
96 | config GENERIC_HARDIRQS_NO__DO_IRQ | 101 | config GENERIC_HARDIRQS_NO__DO_IRQ |
97 | bool | 102 | bool |
98 | def_bool y if SPARC64 | 103 | def_bool y if SPARC64 |
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index b5d63bd8716e..0123a4c596ce 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.30-rc2 | 3 | # Linux kernel version: 2.6.30 |
4 | # Fri Apr 17 02:03:07 2009 | 4 | # Tue Jun 16 04:59:36 2009 |
5 | # | 5 | # |
6 | CONFIG_64BIT=y | 6 | CONFIG_64BIT=y |
7 | CONFIG_SPARC=y | 7 | CONFIG_SPARC=y |
@@ -19,6 +19,7 @@ CONFIG_LOCKDEP_SUPPORT=y | |||
19 | CONFIG_HAVE_LATENCYTOP_SUPPORT=y | 19 | CONFIG_HAVE_LATENCYTOP_SUPPORT=y |
20 | CONFIG_AUDIT_ARCH=y | 20 | CONFIG_AUDIT_ARCH=y |
21 | CONFIG_HAVE_SETUP_PER_CPU_AREA=y | 21 | CONFIG_HAVE_SETUP_PER_CPU_AREA=y |
22 | CONFIG_HAVE_DYNAMIC_PER_CPU_AREA=y | ||
22 | CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y | 23 | CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y |
23 | CONFIG_MMU=y | 24 | CONFIG_MMU=y |
24 | CONFIG_ARCH_NO_VIRT_TO_BUS=y | 25 | CONFIG_ARCH_NO_VIRT_TO_BUS=y |
@@ -82,7 +83,6 @@ CONFIG_SYSCTL_SYSCALL=y | |||
82 | CONFIG_KALLSYMS=y | 83 | CONFIG_KALLSYMS=y |
83 | # CONFIG_KALLSYMS_ALL is not set | 84 | # CONFIG_KALLSYMS_ALL is not set |
84 | # CONFIG_KALLSYMS_EXTRA_PASS is not set | 85 | # CONFIG_KALLSYMS_EXTRA_PASS is not set |
85 | # CONFIG_STRIP_ASM_SYMS is not set | ||
86 | CONFIG_HOTPLUG=y | 86 | CONFIG_HOTPLUG=y |
87 | CONFIG_PRINTK=y | 87 | CONFIG_PRINTK=y |
88 | CONFIG_BUG=y | 88 | CONFIG_BUG=y |
@@ -95,16 +95,21 @@ CONFIG_TIMERFD=y | |||
95 | CONFIG_EVENTFD=y | 95 | CONFIG_EVENTFD=y |
96 | CONFIG_SHMEM=y | 96 | CONFIG_SHMEM=y |
97 | CONFIG_AIO=y | 97 | CONFIG_AIO=y |
98 | |||
99 | # | ||
100 | # Performance Counters | ||
101 | # | ||
98 | CONFIG_VM_EVENT_COUNTERS=y | 102 | CONFIG_VM_EVENT_COUNTERS=y |
99 | CONFIG_PCI_QUIRKS=y | 103 | CONFIG_PCI_QUIRKS=y |
100 | CONFIG_SLUB_DEBUG=y | 104 | CONFIG_SLUB_DEBUG=y |
105 | # CONFIG_STRIP_ASM_SYMS is not set | ||
101 | # CONFIG_COMPAT_BRK is not set | 106 | # CONFIG_COMPAT_BRK is not set |
102 | # CONFIG_SLAB is not set | 107 | # CONFIG_SLAB is not set |
103 | CONFIG_SLUB=y | 108 | CONFIG_SLUB=y |
104 | # CONFIG_SLOB is not set | 109 | # CONFIG_SLOB is not set |
105 | CONFIG_PROFILING=y | 110 | CONFIG_PROFILING=y |
106 | CONFIG_TRACEPOINTS=y | 111 | CONFIG_TRACEPOINTS=y |
107 | # CONFIG_MARKERS is not set | 112 | CONFIG_MARKERS=y |
108 | CONFIG_OPROFILE=m | 113 | CONFIG_OPROFILE=m |
109 | CONFIG_HAVE_OPROFILE=y | 114 | CONFIG_HAVE_OPROFILE=y |
110 | CONFIG_KPROBES=y | 115 | CONFIG_KPROBES=y |
@@ -202,6 +207,7 @@ CONFIG_NR_QUICK=1 | |||
202 | CONFIG_UNEVICTABLE_LRU=y | 207 | CONFIG_UNEVICTABLE_LRU=y |
203 | CONFIG_HAVE_MLOCK=y | 208 | CONFIG_HAVE_MLOCK=y |
204 | CONFIG_HAVE_MLOCKED_PAGE_BIT=y | 209 | CONFIG_HAVE_MLOCKED_PAGE_BIT=y |
210 | CONFIG_DEFAULT_MMAP_MIN_ADDR=8192 | ||
205 | CONFIG_SCHED_SMT=y | 211 | CONFIG_SCHED_SMT=y |
206 | CONFIG_SCHED_MC=y | 212 | CONFIG_SCHED_MC=y |
207 | # CONFIG_PREEMPT_NONE is not set | 213 | # CONFIG_PREEMPT_NONE is not set |
@@ -321,6 +327,7 @@ CONFIG_VLAN_8021Q=m | |||
321 | # CONFIG_ECONET is not set | 327 | # CONFIG_ECONET is not set |
322 | # CONFIG_WAN_ROUTER is not set | 328 | # CONFIG_WAN_ROUTER is not set |
323 | # CONFIG_PHONET is not set | 329 | # CONFIG_PHONET is not set |
330 | # CONFIG_IEEE802154 is not set | ||
324 | # CONFIG_NET_SCHED is not set | 331 | # CONFIG_NET_SCHED is not set |
325 | # CONFIG_DCB is not set | 332 | # CONFIG_DCB is not set |
326 | 333 | ||
@@ -340,7 +347,11 @@ CONFIG_WIRELESS=y | |||
340 | CONFIG_WIRELESS_OLD_REGULATORY=y | 347 | CONFIG_WIRELESS_OLD_REGULATORY=y |
341 | # CONFIG_WIRELESS_EXT is not set | 348 | # CONFIG_WIRELESS_EXT is not set |
342 | # CONFIG_LIB80211 is not set | 349 | # CONFIG_LIB80211 is not set |
343 | # CONFIG_MAC80211 is not set | 350 | |
351 | # | ||
352 | # CFG80211 needs to be enabled for MAC80211 | ||
353 | # | ||
354 | CONFIG_MAC80211_DEFAULT_PS_VALUE=0 | ||
344 | # CONFIG_WIMAX is not set | 355 | # CONFIG_WIMAX is not set |
345 | # CONFIG_RFKILL is not set | 356 | # CONFIG_RFKILL is not set |
346 | # CONFIG_NET_9P is not set | 357 | # CONFIG_NET_9P is not set |
@@ -364,6 +375,7 @@ CONFIG_EXTRA_FIRMWARE="" | |||
364 | CONFIG_CONNECTOR=m | 375 | CONFIG_CONNECTOR=m |
365 | # CONFIG_MTD is not set | 376 | # CONFIG_MTD is not set |
366 | CONFIG_OF_DEVICE=y | 377 | CONFIG_OF_DEVICE=y |
378 | CONFIG_OF_MDIO=m | ||
367 | # CONFIG_PARPORT is not set | 379 | # CONFIG_PARPORT is not set |
368 | CONFIG_BLK_DEV=y | 380 | CONFIG_BLK_DEV=y |
369 | # CONFIG_BLK_DEV_FD is not set | 381 | # CONFIG_BLK_DEV_FD is not set |
@@ -399,6 +411,7 @@ CONFIG_MISC_DEVICES=y | |||
399 | # CONFIG_EEPROM_AT24 is not set | 411 | # CONFIG_EEPROM_AT24 is not set |
400 | # CONFIG_EEPROM_LEGACY is not set | 412 | # CONFIG_EEPROM_LEGACY is not set |
401 | # CONFIG_EEPROM_93CX6 is not set | 413 | # CONFIG_EEPROM_93CX6 is not set |
414 | # CONFIG_CB710_CORE is not set | ||
402 | CONFIG_HAVE_IDE=y | 415 | CONFIG_HAVE_IDE=y |
403 | CONFIG_IDE=y | 416 | CONFIG_IDE=y |
404 | 417 | ||
@@ -477,10 +490,6 @@ CONFIG_BLK_DEV_SR=m | |||
477 | CONFIG_BLK_DEV_SR_VENDOR=y | 490 | CONFIG_BLK_DEV_SR_VENDOR=y |
478 | CONFIG_CHR_DEV_SG=m | 491 | CONFIG_CHR_DEV_SG=m |
479 | # CONFIG_CHR_DEV_SCH is not set | 492 | # CONFIG_CHR_DEV_SCH is not set |
480 | |||
481 | # | ||
482 | # Some SCSI devices (e.g. CD jukebox) support multiple LUNs | ||
483 | # | ||
484 | CONFIG_SCSI_MULTI_LUN=y | 493 | CONFIG_SCSI_MULTI_LUN=y |
485 | CONFIG_SCSI_CONSTANTS=y | 494 | CONFIG_SCSI_CONSTANTS=y |
486 | # CONFIG_SCSI_LOGGING is not set | 495 | # CONFIG_SCSI_LOGGING is not set |
@@ -499,6 +508,7 @@ CONFIG_SCSI_FC_ATTRS=y | |||
499 | CONFIG_SCSI_LOWLEVEL=y | 508 | CONFIG_SCSI_LOWLEVEL=y |
500 | # CONFIG_ISCSI_TCP is not set | 509 | # CONFIG_ISCSI_TCP is not set |
501 | # CONFIG_SCSI_CXGB3_ISCSI is not set | 510 | # CONFIG_SCSI_CXGB3_ISCSI is not set |
511 | # CONFIG_SCSI_BNX2_ISCSI is not set | ||
502 | # CONFIG_BLK_DEV_3W_XXXX_RAID is not set | 512 | # CONFIG_BLK_DEV_3W_XXXX_RAID is not set |
503 | # CONFIG_SCSI_3W_9XXX is not set | 513 | # CONFIG_SCSI_3W_9XXX is not set |
504 | # CONFIG_SCSI_ACARD is not set | 514 | # CONFIG_SCSI_ACARD is not set |
@@ -507,6 +517,7 @@ CONFIG_SCSI_LOWLEVEL=y | |||
507 | # CONFIG_SCSI_AIC7XXX_OLD is not set | 517 | # CONFIG_SCSI_AIC7XXX_OLD is not set |
508 | # CONFIG_SCSI_AIC79XX is not set | 518 | # CONFIG_SCSI_AIC79XX is not set |
509 | # CONFIG_SCSI_AIC94XX is not set | 519 | # CONFIG_SCSI_AIC94XX is not set |
520 | # CONFIG_SCSI_MVSAS is not set | ||
510 | # CONFIG_SCSI_ARCMSR is not set | 521 | # CONFIG_SCSI_ARCMSR is not set |
511 | # CONFIG_MEGARAID_NEWGEN is not set | 522 | # CONFIG_MEGARAID_NEWGEN is not set |
512 | # CONFIG_MEGARAID_LEGACY is not set | 523 | # CONFIG_MEGARAID_LEGACY is not set |
@@ -521,7 +532,6 @@ CONFIG_SCSI_LOWLEVEL=y | |||
521 | # CONFIG_SCSI_IPS is not set | 532 | # CONFIG_SCSI_IPS is not set |
522 | # CONFIG_SCSI_INITIO is not set | 533 | # CONFIG_SCSI_INITIO is not set |
523 | # CONFIG_SCSI_INIA100 is not set | 534 | # CONFIG_SCSI_INIA100 is not set |
524 | # CONFIG_SCSI_MVSAS is not set | ||
525 | # CONFIG_SCSI_STEX is not set | 535 | # CONFIG_SCSI_STEX is not set |
526 | # CONFIG_SCSI_SYM53C8XX_2 is not set | 536 | # CONFIG_SCSI_SYM53C8XX_2 is not set |
527 | # CONFIG_SCSI_QLOGIC_1280 is not set | 537 | # CONFIG_SCSI_QLOGIC_1280 is not set |
@@ -569,7 +579,6 @@ CONFIG_DM_ZERO=m | |||
569 | # CONFIG_IEEE1394 is not set | 579 | # CONFIG_IEEE1394 is not set |
570 | # CONFIG_I2O is not set | 580 | # CONFIG_I2O is not set |
571 | CONFIG_NETDEVICES=y | 581 | CONFIG_NETDEVICES=y |
572 | CONFIG_COMPAT_NET_DEV_OPS=y | ||
573 | # CONFIG_DUMMY is not set | 582 | # CONFIG_DUMMY is not set |
574 | # CONFIG_BONDING is not set | 583 | # CONFIG_BONDING is not set |
575 | # CONFIG_MACVLAN is not set | 584 | # CONFIG_MACVLAN is not set |
@@ -635,6 +644,7 @@ CONFIG_NET_PCI=y | |||
635 | # CONFIG_SMSC9420 is not set | 644 | # CONFIG_SMSC9420 is not set |
636 | # CONFIG_SUNDANCE is not set | 645 | # CONFIG_SUNDANCE is not set |
637 | # CONFIG_TLAN is not set | 646 | # CONFIG_TLAN is not set |
647 | # CONFIG_KS8842 is not set | ||
638 | # CONFIG_VIA_RHINE is not set | 648 | # CONFIG_VIA_RHINE is not set |
639 | # CONFIG_SC92031 is not set | 649 | # CONFIG_SC92031 is not set |
640 | # CONFIG_ATL2 is not set | 650 | # CONFIG_ATL2 is not set |
@@ -1127,6 +1137,11 @@ CONFIG_SND_VERBOSE_PROCFS=y | |||
1127 | # CONFIG_SND_VERBOSE_PRINTK is not set | 1137 | # CONFIG_SND_VERBOSE_PRINTK is not set |
1128 | # CONFIG_SND_DEBUG is not set | 1138 | # CONFIG_SND_DEBUG is not set |
1129 | CONFIG_SND_VMASTER=y | 1139 | CONFIG_SND_VMASTER=y |
1140 | CONFIG_SND_RAWMIDI_SEQ=m | ||
1141 | # CONFIG_SND_OPL3_LIB_SEQ is not set | ||
1142 | # CONFIG_SND_OPL4_LIB_SEQ is not set | ||
1143 | # CONFIG_SND_SBAWE_SEQ is not set | ||
1144 | # CONFIG_SND_EMU10K1_SEQ is not set | ||
1130 | CONFIG_SND_MPU401_UART=m | 1145 | CONFIG_SND_MPU401_UART=m |
1131 | CONFIG_SND_AC97_CODEC=m | 1146 | CONFIG_SND_AC97_CODEC=m |
1132 | CONFIG_SND_DRIVERS=y | 1147 | CONFIG_SND_DRIVERS=y |
@@ -1153,6 +1168,7 @@ CONFIG_SND_ALI5451=m | |||
1153 | # CONFIG_SND_OXYGEN is not set | 1168 | # CONFIG_SND_OXYGEN is not set |
1154 | # CONFIG_SND_CS4281 is not set | 1169 | # CONFIG_SND_CS4281 is not set |
1155 | # CONFIG_SND_CS46XX is not set | 1170 | # CONFIG_SND_CS46XX is not set |
1171 | # CONFIG_SND_CTXFI is not set | ||
1156 | # CONFIG_SND_DARLA20 is not set | 1172 | # CONFIG_SND_DARLA20 is not set |
1157 | # CONFIG_SND_GINA20 is not set | 1173 | # CONFIG_SND_GINA20 is not set |
1158 | # CONFIG_SND_LAYLA20 is not set | 1174 | # CONFIG_SND_LAYLA20 is not set |
@@ -1183,6 +1199,7 @@ CONFIG_SND_ALI5451=m | |||
1183 | # CONFIG_SND_INTEL8X0 is not set | 1199 | # CONFIG_SND_INTEL8X0 is not set |
1184 | # CONFIG_SND_INTEL8X0M is not set | 1200 | # CONFIG_SND_INTEL8X0M is not set |
1185 | # CONFIG_SND_KORG1212 is not set | 1201 | # CONFIG_SND_KORG1212 is not set |
1202 | # CONFIG_SND_LX6464ES is not set | ||
1186 | # CONFIG_SND_MAESTRO3 is not set | 1203 | # CONFIG_SND_MAESTRO3 is not set |
1187 | # CONFIG_SND_MIXART is not set | 1204 | # CONFIG_SND_MIXART is not set |
1188 | # CONFIG_SND_NM256 is not set | 1205 | # CONFIG_SND_NM256 is not set |
@@ -1229,6 +1246,7 @@ CONFIG_HID_BELKIN=y | |||
1229 | CONFIG_HID_CHERRY=y | 1246 | CONFIG_HID_CHERRY=y |
1230 | CONFIG_HID_CHICONY=y | 1247 | CONFIG_HID_CHICONY=y |
1231 | CONFIG_HID_CYPRESS=y | 1248 | CONFIG_HID_CYPRESS=y |
1249 | CONFIG_HID_DRAGONRISE=y | ||
1232 | # CONFIG_DRAGONRISE_FF is not set | 1250 | # CONFIG_DRAGONRISE_FF is not set |
1233 | CONFIG_HID_EZKEY=y | 1251 | CONFIG_HID_EZKEY=y |
1234 | CONFIG_HID_KYE=y | 1252 | CONFIG_HID_KYE=y |
@@ -1246,9 +1264,14 @@ CONFIG_HID_PETALYNX=y | |||
1246 | CONFIG_HID_SAMSUNG=y | 1264 | CONFIG_HID_SAMSUNG=y |
1247 | CONFIG_HID_SONY=y | 1265 | CONFIG_HID_SONY=y |
1248 | CONFIG_HID_SUNPLUS=y | 1266 | CONFIG_HID_SUNPLUS=y |
1267 | CONFIG_HID_GREENASIA=y | ||
1249 | # CONFIG_GREENASIA_FF is not set | 1268 | # CONFIG_GREENASIA_FF is not set |
1269 | CONFIG_HID_SMARTJOYPLUS=y | ||
1270 | # CONFIG_SMARTJOYPLUS_FF is not set | ||
1250 | CONFIG_HID_TOPSEED=y | 1271 | CONFIG_HID_TOPSEED=y |
1272 | CONFIG_HID_THRUSTMASTER=y | ||
1251 | # CONFIG_THRUSTMASTER_FF is not set | 1273 | # CONFIG_THRUSTMASTER_FF is not set |
1274 | CONFIG_HID_ZEROPLUS=y | ||
1252 | # CONFIG_ZEROPLUS_FF is not set | 1275 | # CONFIG_ZEROPLUS_FF is not set |
1253 | CONFIG_USB_SUPPORT=y | 1276 | CONFIG_USB_SUPPORT=y |
1254 | CONFIG_USB_ARCH_HAS_HCD=y | 1277 | CONFIG_USB_ARCH_HAS_HCD=y |
@@ -1462,6 +1485,7 @@ CONFIG_FILE_LOCKING=y | |||
1462 | # CONFIG_GFS2_FS is not set | 1485 | # CONFIG_GFS2_FS is not set |
1463 | # CONFIG_OCFS2_FS is not set | 1486 | # CONFIG_OCFS2_FS is not set |
1464 | # CONFIG_BTRFS_FS is not set | 1487 | # CONFIG_BTRFS_FS is not set |
1488 | CONFIG_FSNOTIFY=y | ||
1465 | CONFIG_DNOTIFY=y | 1489 | CONFIG_DNOTIFY=y |
1466 | CONFIG_INOTIFY=y | 1490 | CONFIG_INOTIFY=y |
1467 | CONFIG_INOTIFY_USER=y | 1491 | CONFIG_INOTIFY_USER=y |
@@ -1636,25 +1660,28 @@ CONFIG_SYSCTL_SYSCALL_CHECK=y | |||
1636 | # CONFIG_DEBUG_PAGEALLOC is not set | 1660 | # CONFIG_DEBUG_PAGEALLOC is not set |
1637 | CONFIG_NOP_TRACER=y | 1661 | CONFIG_NOP_TRACER=y |
1638 | CONFIG_HAVE_FUNCTION_TRACER=y | 1662 | CONFIG_HAVE_FUNCTION_TRACER=y |
1663 | CONFIG_HAVE_DYNAMIC_FTRACE=y | ||
1664 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y | ||
1639 | CONFIG_RING_BUFFER=y | 1665 | CONFIG_RING_BUFFER=y |
1666 | CONFIG_EVENT_TRACING=y | ||
1667 | CONFIG_CONTEXT_SWITCH_TRACER=y | ||
1640 | CONFIG_TRACING=y | 1668 | CONFIG_TRACING=y |
1669 | CONFIG_GENERIC_TRACER=y | ||
1641 | CONFIG_TRACING_SUPPORT=y | 1670 | CONFIG_TRACING_SUPPORT=y |
1642 | 1671 | CONFIG_FTRACE=y | |
1643 | # | ||
1644 | # Tracers | ||
1645 | # | ||
1646 | # CONFIG_FUNCTION_TRACER is not set | 1672 | # CONFIG_FUNCTION_TRACER is not set |
1647 | # CONFIG_IRQSOFF_TRACER is not set | 1673 | # CONFIG_IRQSOFF_TRACER is not set |
1648 | # CONFIG_SCHED_TRACER is not set | 1674 | # CONFIG_SCHED_TRACER is not set |
1649 | # CONFIG_CONTEXT_SWITCH_TRACER is not set | ||
1650 | # CONFIG_EVENT_TRACER is not set | ||
1651 | # CONFIG_BOOT_TRACER is not set | 1675 | # CONFIG_BOOT_TRACER is not set |
1652 | # CONFIG_TRACE_BRANCH_PROFILING is not set | 1676 | CONFIG_BRANCH_PROFILE_NONE=y |
1677 | # CONFIG_PROFILE_ANNOTATED_BRANCHES is not set | ||
1678 | # CONFIG_PROFILE_ALL_BRANCHES is not set | ||
1653 | # CONFIG_STACK_TRACER is not set | 1679 | # CONFIG_STACK_TRACER is not set |
1654 | # CONFIG_KMEMTRACE is not set | 1680 | # CONFIG_KMEMTRACE is not set |
1655 | # CONFIG_WORKQUEUE_TRACER is not set | 1681 | # CONFIG_WORKQUEUE_TRACER is not set |
1656 | CONFIG_BLK_DEV_IO_TRACE=y | 1682 | CONFIG_BLK_DEV_IO_TRACE=y |
1657 | # CONFIG_FTRACE_STARTUP_TEST is not set | 1683 | # CONFIG_FTRACE_STARTUP_TEST is not set |
1684 | # CONFIG_RING_BUFFER_BENCHMARK is not set | ||
1658 | # CONFIG_DYNAMIC_DEBUG is not set | 1685 | # CONFIG_DYNAMIC_DEBUG is not set |
1659 | # CONFIG_SAMPLES is not set | 1686 | # CONFIG_SAMPLES is not set |
1660 | CONFIG_HAVE_ARCH_KGDB=y | 1687 | CONFIG_HAVE_ARCH_KGDB=y |
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h index a11b89ee9ef8..926397d345ff 100644 --- a/arch/sparc/include/asm/cpudata_64.h +++ b/arch/sparc/include/asm/cpudata_64.h | |||
@@ -6,9 +6,6 @@ | |||
6 | #ifndef _SPARC64_CPUDATA_H | 6 | #ifndef _SPARC64_CPUDATA_H |
7 | #define _SPARC64_CPUDATA_H | 7 | #define _SPARC64_CPUDATA_H |
8 | 8 | ||
9 | #include <asm/hypervisor.h> | ||
10 | #include <asm/asi.h> | ||
11 | |||
12 | #ifndef __ASSEMBLY__ | 9 | #ifndef __ASSEMBLY__ |
13 | 10 | ||
14 | #include <linux/percpu.h> | 11 | #include <linux/percpu.h> |
@@ -38,202 +35,10 @@ DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); | |||
38 | #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) | 35 | #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) |
39 | #define local_cpu_data() __get_cpu_var(__cpu_data) | 36 | #define local_cpu_data() __get_cpu_var(__cpu_data) |
40 | 37 | ||
41 | /* Trap handling code needs to get at a few critical values upon | ||
42 | * trap entry and to process TSB misses. These cannot be in the | ||
43 | * per_cpu() area as we really need to lock them into the TLB and | ||
44 | * thus make them part of the main kernel image. As a result we | ||
45 | * try to make this as small as possible. | ||
46 | * | ||
47 | * This is padded out and aligned to 64-bytes to avoid false sharing | ||
48 | * on SMP. | ||
49 | */ | ||
50 | |||
51 | /* If you modify the size of this structure, please update | ||
52 | * TRAP_BLOCK_SZ_SHIFT below. | ||
53 | */ | ||
54 | struct thread_info; | ||
55 | struct trap_per_cpu { | ||
56 | /* D-cache line 1: Basic thread information, cpu and device mondo queues */ | ||
57 | struct thread_info *thread; | ||
58 | unsigned long pgd_paddr; | ||
59 | unsigned long cpu_mondo_pa; | ||
60 | unsigned long dev_mondo_pa; | ||
61 | |||
62 | /* D-cache line 2: Error Mondo Queue and kernel buffer pointers */ | ||
63 | unsigned long resum_mondo_pa; | ||
64 | unsigned long resum_kernel_buf_pa; | ||
65 | unsigned long nonresum_mondo_pa; | ||
66 | unsigned long nonresum_kernel_buf_pa; | ||
67 | |||
68 | /* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */ | ||
69 | struct hv_fault_status fault_info; | ||
70 | |||
71 | /* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */ | ||
72 | unsigned long cpu_mondo_block_pa; | ||
73 | unsigned long cpu_list_pa; | ||
74 | unsigned long tsb_huge; | ||
75 | unsigned long tsb_huge_temp; | ||
76 | |||
77 | /* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */ | ||
78 | unsigned long irq_worklist_pa; | ||
79 | unsigned int cpu_mondo_qmask; | ||
80 | unsigned int dev_mondo_qmask; | ||
81 | unsigned int resum_qmask; | ||
82 | unsigned int nonresum_qmask; | ||
83 | void *hdesc; | ||
84 | } __attribute__((aligned(64))); | ||
85 | extern struct trap_per_cpu trap_block[NR_CPUS]; | ||
86 | extern void init_cur_cpu_trap(struct thread_info *); | ||
87 | extern void setup_tba(void); | ||
88 | extern int ncpus_probed; | ||
89 | extern const struct seq_operations cpuinfo_op; | 38 | extern const struct seq_operations cpuinfo_op; |
90 | 39 | ||
91 | extern unsigned long real_hard_smp_processor_id(void); | ||
92 | |||
93 | struct cpuid_patch_entry { | ||
94 | unsigned int addr; | ||
95 | unsigned int cheetah_safari[4]; | ||
96 | unsigned int cheetah_jbus[4]; | ||
97 | unsigned int starfire[4]; | ||
98 | unsigned int sun4v[4]; | ||
99 | }; | ||
100 | extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end; | ||
101 | |||
102 | struct sun4v_1insn_patch_entry { | ||
103 | unsigned int addr; | ||
104 | unsigned int insn; | ||
105 | }; | ||
106 | extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch, | ||
107 | __sun4v_1insn_patch_end; | ||
108 | |||
109 | struct sun4v_2insn_patch_entry { | ||
110 | unsigned int addr; | ||
111 | unsigned int insns[2]; | ||
112 | }; | ||
113 | extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, | ||
114 | __sun4v_2insn_patch_end; | ||
115 | |||
116 | #endif /* !(__ASSEMBLY__) */ | 40 | #endif /* !(__ASSEMBLY__) */ |
117 | 41 | ||
118 | #define TRAP_PER_CPU_THREAD 0x00 | 42 | #include <asm/trap_block.h> |
119 | #define TRAP_PER_CPU_PGD_PADDR 0x08 | ||
120 | #define TRAP_PER_CPU_CPU_MONDO_PA 0x10 | ||
121 | #define TRAP_PER_CPU_DEV_MONDO_PA 0x18 | ||
122 | #define TRAP_PER_CPU_RESUM_MONDO_PA 0x20 | ||
123 | #define TRAP_PER_CPU_RESUM_KBUF_PA 0x28 | ||
124 | #define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30 | ||
125 | #define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38 | ||
126 | #define TRAP_PER_CPU_FAULT_INFO 0x40 | ||
127 | #define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0 | ||
128 | #define TRAP_PER_CPU_CPU_LIST_PA 0xc8 | ||
129 | #define TRAP_PER_CPU_TSB_HUGE 0xd0 | ||
130 | #define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8 | ||
131 | #define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0 | ||
132 | #define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8 | ||
133 | #define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec | ||
134 | #define TRAP_PER_CPU_RESUM_QMASK 0xf0 | ||
135 | #define TRAP_PER_CPU_NONRESUM_QMASK 0xf4 | ||
136 | |||
137 | #define TRAP_BLOCK_SZ_SHIFT 8 | ||
138 | |||
139 | #include <asm/scratchpad.h> | ||
140 | |||
141 | #define __GET_CPUID(REG) \ | ||
142 | /* Spitfire implementation (default). */ \ | ||
143 | 661: ldxa [%g0] ASI_UPA_CONFIG, REG; \ | ||
144 | srlx REG, 17, REG; \ | ||
145 | and REG, 0x1f, REG; \ | ||
146 | nop; \ | ||
147 | .section .cpuid_patch, "ax"; \ | ||
148 | /* Instruction location. */ \ | ||
149 | .word 661b; \ | ||
150 | /* Cheetah Safari implementation. */ \ | ||
151 | ldxa [%g0] ASI_SAFARI_CONFIG, REG; \ | ||
152 | srlx REG, 17, REG; \ | ||
153 | and REG, 0x3ff, REG; \ | ||
154 | nop; \ | ||
155 | /* Cheetah JBUS implementation. */ \ | ||
156 | ldxa [%g0] ASI_JBUS_CONFIG, REG; \ | ||
157 | srlx REG, 17, REG; \ | ||
158 | and REG, 0x1f, REG; \ | ||
159 | nop; \ | ||
160 | /* Starfire implementation. */ \ | ||
161 | sethi %hi(0x1fff40000d0 >> 9), REG; \ | ||
162 | sllx REG, 9, REG; \ | ||
163 | or REG, 0xd0, REG; \ | ||
164 | lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\ | ||
165 | /* sun4v implementation. */ \ | ||
166 | mov SCRATCHPAD_CPUID, REG; \ | ||
167 | ldxa [REG] ASI_SCRATCHPAD, REG; \ | ||
168 | nop; \ | ||
169 | nop; \ | ||
170 | .previous; | ||
171 | |||
172 | #ifdef CONFIG_SMP | ||
173 | |||
174 | #define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
175 | __GET_CPUID(TMP) \ | ||
176 | sethi %hi(trap_block), DEST; \ | ||
177 | sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \ | ||
178 | or DEST, %lo(trap_block), DEST; \ | ||
179 | add DEST, TMP, DEST; \ | ||
180 | |||
181 | /* Clobbers TMP, current address space PGD phys address into DEST. */ | ||
182 | #define TRAP_LOAD_PGD_PHYS(DEST, TMP) \ | ||
183 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
184 | ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; | ||
185 | |||
186 | /* Clobbers TMP, loads local processor's IRQ work area into DEST. */ | ||
187 | #define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \ | ||
188 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
189 | add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST; | ||
190 | |||
191 | /* Clobbers TMP, loads DEST with current thread info pointer. */ | ||
192 | #define TRAP_LOAD_THREAD_REG(DEST, TMP) \ | ||
193 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
194 | ldx [DEST + TRAP_PER_CPU_THREAD], DEST; | ||
195 | |||
196 | /* Given the current thread info pointer in THR, load the per-cpu | ||
197 | * area base of the current processor into DEST. REG1, REG2, and REG3 are | ||
198 | * clobbered. | ||
199 | * | ||
200 | * You absolutely cannot use DEST as a temporary in this code. The | ||
201 | * reason is that traps can happen during execution, and return from | ||
202 | * trap will load the fully resolved DEST per-cpu base. This can corrupt | ||
203 | * the calculations done by the macro mid-stream. | ||
204 | */ | ||
205 | #define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \ | ||
206 | lduh [THR + TI_CPU], REG1; \ | ||
207 | sethi %hi(__per_cpu_shift), REG3; \ | ||
208 | sethi %hi(__per_cpu_base), REG2; \ | ||
209 | ldx [REG3 + %lo(__per_cpu_shift)], REG3; \ | ||
210 | ldx [REG2 + %lo(__per_cpu_base)], REG2; \ | ||
211 | sllx REG1, REG3, REG3; \ | ||
212 | add REG3, REG2, DEST; | ||
213 | |||
214 | #else | ||
215 | |||
216 | #define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
217 | sethi %hi(trap_block), DEST; \ | ||
218 | or DEST, %lo(trap_block), DEST; \ | ||
219 | |||
220 | /* Uniprocessor versions, we know the cpuid is zero. */ | ||
221 | #define TRAP_LOAD_PGD_PHYS(DEST, TMP) \ | ||
222 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
223 | ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; | ||
224 | |||
225 | /* Clobbers TMP, loads local processor's IRQ work area into DEST. */ | ||
226 | #define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \ | ||
227 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
228 | add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST; | ||
229 | |||
230 | #define TRAP_LOAD_THREAD_REG(DEST, TMP) \ | ||
231 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
232 | ldx [DEST + TRAP_PER_CPU_THREAD], DEST; | ||
233 | |||
234 | /* No per-cpu areas on uniprocessor, so no need to load DEST. */ | ||
235 | #define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) | ||
236 | |||
237 | #endif /* !(CONFIG_SMP) */ | ||
238 | 43 | ||
239 | #endif /* _SPARC64_CPUDATA_H */ | 44 | #endif /* _SPARC64_CPUDATA_H */ |
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 0f4150e26619..204e4bf64438 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h | |||
@@ -1,8 +1,166 @@ | |||
1 | #ifndef ___ASM_SPARC_DMA_MAPPING_H | 1 | #ifndef ___ASM_SPARC_DMA_MAPPING_H |
2 | #define ___ASM_SPARC_DMA_MAPPING_H | 2 | #define ___ASM_SPARC_DMA_MAPPING_H |
3 | #if defined(__sparc__) && defined(__arch64__) | 3 | |
4 | #include <asm/dma-mapping_64.h> | 4 | #include <linux/scatterlist.h> |
5 | #else | 5 | #include <linux/mm.h> |
6 | #include <asm/dma-mapping_32.h> | 6 | |
7 | #endif | 7 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) |
8 | |||
9 | extern int dma_supported(struct device *dev, u64 mask); | ||
10 | extern int dma_set_mask(struct device *dev, u64 dma_mask); | ||
11 | |||
12 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
13 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
14 | #define dma_is_consistent(d, h) (1) | ||
15 | |||
16 | struct dma_ops { | ||
17 | void *(*alloc_coherent)(struct device *dev, size_t size, | ||
18 | dma_addr_t *dma_handle, gfp_t flag); | ||
19 | void (*free_coherent)(struct device *dev, size_t size, | ||
20 | void *cpu_addr, dma_addr_t dma_handle); | ||
21 | dma_addr_t (*map_page)(struct device *dev, struct page *page, | ||
22 | unsigned long offset, size_t size, | ||
23 | enum dma_data_direction direction); | ||
24 | void (*unmap_page)(struct device *dev, dma_addr_t dma_addr, | ||
25 | size_t size, | ||
26 | enum dma_data_direction direction); | ||
27 | int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, | ||
28 | enum dma_data_direction direction); | ||
29 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, | ||
30 | int nhwentries, | ||
31 | enum dma_data_direction direction); | ||
32 | void (*sync_single_for_cpu)(struct device *dev, | ||
33 | dma_addr_t dma_handle, size_t size, | ||
34 | enum dma_data_direction direction); | ||
35 | void (*sync_single_for_device)(struct device *dev, | ||
36 | dma_addr_t dma_handle, size_t size, | ||
37 | enum dma_data_direction direction); | ||
38 | void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, | ||
39 | int nelems, | ||
40 | enum dma_data_direction direction); | ||
41 | void (*sync_sg_for_device)(struct device *dev, | ||
42 | struct scatterlist *sg, int nents, | ||
43 | enum dma_data_direction dir); | ||
44 | }; | ||
45 | extern const struct dma_ops *dma_ops; | ||
46 | |||
47 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | ||
48 | dma_addr_t *dma_handle, gfp_t flag) | ||
49 | { | ||
50 | return dma_ops->alloc_coherent(dev, size, dma_handle, flag); | ||
51 | } | ||
52 | |||
53 | static inline void dma_free_coherent(struct device *dev, size_t size, | ||
54 | void *cpu_addr, dma_addr_t dma_handle) | ||
55 | { | ||
56 | dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); | ||
57 | } | ||
58 | |||
59 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | ||
60 | size_t size, | ||
61 | enum dma_data_direction direction) | ||
62 | { | ||
63 | return dma_ops->map_page(dev, virt_to_page(cpu_addr), | ||
64 | (unsigned long)cpu_addr & ~PAGE_MASK, size, | ||
65 | direction); | ||
66 | } | ||
67 | |||
68 | static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
69 | size_t size, | ||
70 | enum dma_data_direction direction) | ||
71 | { | ||
72 | dma_ops->unmap_page(dev, dma_addr, size, direction); | ||
73 | } | ||
74 | |||
75 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
76 | unsigned long offset, size_t size, | ||
77 | enum dma_data_direction direction) | ||
78 | { | ||
79 | return dma_ops->map_page(dev, page, offset, size, direction); | ||
80 | } | ||
81 | |||
82 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
83 | size_t size, | ||
84 | enum dma_data_direction direction) | ||
85 | { | ||
86 | dma_ops->unmap_page(dev, dma_address, size, direction); | ||
87 | } | ||
88 | |||
89 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
90 | int nents, enum dma_data_direction direction) | ||
91 | { | ||
92 | return dma_ops->map_sg(dev, sg, nents, direction); | ||
93 | } | ||
94 | |||
95 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
96 | int nents, enum dma_data_direction direction) | ||
97 | { | ||
98 | dma_ops->unmap_sg(dev, sg, nents, direction); | ||
99 | } | ||
100 | |||
101 | static inline void dma_sync_single_for_cpu(struct device *dev, | ||
102 | dma_addr_t dma_handle, size_t size, | ||
103 | enum dma_data_direction direction) | ||
104 | { | ||
105 | dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction); | ||
106 | } | ||
107 | |||
108 | static inline void dma_sync_single_for_device(struct device *dev, | ||
109 | dma_addr_t dma_handle, | ||
110 | size_t size, | ||
111 | enum dma_data_direction direction) | ||
112 | { | ||
113 | if (dma_ops->sync_single_for_device) | ||
114 | dma_ops->sync_single_for_device(dev, dma_handle, size, | ||
115 | direction); | ||
116 | } | ||
117 | |||
118 | static inline void dma_sync_sg_for_cpu(struct device *dev, | ||
119 | struct scatterlist *sg, int nelems, | ||
120 | enum dma_data_direction direction) | ||
121 | { | ||
122 | dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction); | ||
123 | } | ||
124 | |||
125 | static inline void dma_sync_sg_for_device(struct device *dev, | ||
126 | struct scatterlist *sg, int nelems, | ||
127 | enum dma_data_direction direction) | ||
128 | { | ||
129 | if (dma_ops->sync_sg_for_device) | ||
130 | dma_ops->sync_sg_for_device(dev, sg, nelems, direction); | ||
131 | } | ||
132 | |||
133 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | ||
134 | dma_addr_t dma_handle, | ||
135 | unsigned long offset, | ||
136 | size_t size, | ||
137 | enum dma_data_direction dir) | ||
138 | { | ||
139 | dma_sync_single_for_cpu(dev, dma_handle+offset, size, dir); | ||
140 | } | ||
141 | |||
142 | static inline void dma_sync_single_range_for_device(struct device *dev, | ||
143 | dma_addr_t dma_handle, | ||
144 | unsigned long offset, | ||
145 | size_t size, | ||
146 | enum dma_data_direction dir) | ||
147 | { | ||
148 | dma_sync_single_for_device(dev, dma_handle+offset, size, dir); | ||
149 | } | ||
150 | |||
151 | |||
152 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
153 | { | ||
154 | return (dma_addr == DMA_ERROR_CODE); | ||
155 | } | ||
156 | |||
157 | static inline int dma_get_cache_alignment(void) | ||
158 | { | ||
159 | /* | ||
160 | * no easy way to get cache size on all processors, so return | ||
161 | * the maximum possible, to be safe | ||
162 | */ | ||
163 | return (1 << INTERNODE_CACHE_SHIFT); | ||
164 | } | ||
165 | |||
8 | #endif | 166 | #endif |
diff --git a/arch/sparc/include/asm/dma-mapping_32.h b/arch/sparc/include/asm/dma-mapping_32.h deleted file mode 100644 index 8a57ea0573e6..000000000000 --- a/arch/sparc/include/asm/dma-mapping_32.h +++ /dev/null | |||
@@ -1,60 +0,0 @@ | |||
1 | #ifndef _ASM_SPARC_DMA_MAPPING_H | ||
2 | #define _ASM_SPARC_DMA_MAPPING_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | struct device; | ||
7 | struct scatterlist; | ||
8 | struct page; | ||
9 | |||
10 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) | ||
11 | |||
12 | extern int dma_supported(struct device *dev, u64 mask); | ||
13 | extern int dma_set_mask(struct device *dev, u64 dma_mask); | ||
14 | extern void *dma_alloc_coherent(struct device *dev, size_t size, | ||
15 | dma_addr_t *dma_handle, gfp_t flag); | ||
16 | extern void dma_free_coherent(struct device *dev, size_t size, | ||
17 | void *cpu_addr, dma_addr_t dma_handle); | ||
18 | extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | ||
19 | size_t size, | ||
20 | enum dma_data_direction direction); | ||
21 | extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
22 | size_t size, | ||
23 | enum dma_data_direction direction); | ||
24 | extern dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
25 | unsigned long offset, size_t size, | ||
26 | enum dma_data_direction direction); | ||
27 | extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
28 | size_t size, enum dma_data_direction direction); | ||
29 | extern int dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
30 | int nents, enum dma_data_direction direction); | ||
31 | extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
32 | int nents, enum dma_data_direction direction); | ||
33 | extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
34 | size_t size, | ||
35 | enum dma_data_direction direction); | ||
36 | extern void dma_sync_single_for_device(struct device *dev, | ||
37 | dma_addr_t dma_handle, | ||
38 | size_t size, | ||
39 | enum dma_data_direction direction); | ||
40 | extern void dma_sync_single_range_for_cpu(struct device *dev, | ||
41 | dma_addr_t dma_handle, | ||
42 | unsigned long offset, | ||
43 | size_t size, | ||
44 | enum dma_data_direction direction); | ||
45 | extern void dma_sync_single_range_for_device(struct device *dev, | ||
46 | dma_addr_t dma_handle, | ||
47 | unsigned long offset, size_t size, | ||
48 | enum dma_data_direction direction); | ||
49 | extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | ||
50 | int nelems, enum dma_data_direction direction); | ||
51 | extern void dma_sync_sg_for_device(struct device *dev, | ||
52 | struct scatterlist *sg, int nelems, | ||
53 | enum dma_data_direction direction); | ||
54 | extern int dma_mapping_error(struct device *dev, dma_addr_t dma_addr); | ||
55 | extern int dma_get_cache_alignment(void); | ||
56 | |||
57 | #define dma_alloc_noncoherent dma_alloc_coherent | ||
58 | #define dma_free_noncoherent dma_free_coherent | ||
59 | |||
60 | #endif /* _ASM_SPARC_DMA_MAPPING_H */ | ||
diff --git a/arch/sparc/include/asm/dma-mapping_64.h b/arch/sparc/include/asm/dma-mapping_64.h deleted file mode 100644 index bfa64f9702d5..000000000000 --- a/arch/sparc/include/asm/dma-mapping_64.h +++ /dev/null | |||
@@ -1,154 +0,0 @@ | |||
1 | #ifndef _ASM_SPARC64_DMA_MAPPING_H | ||
2 | #define _ASM_SPARC64_DMA_MAPPING_H | ||
3 | |||
4 | #include <linux/scatterlist.h> | ||
5 | #include <linux/mm.h> | ||
6 | |||
7 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) | ||
8 | |||
9 | struct dma_ops { | ||
10 | void *(*alloc_coherent)(struct device *dev, size_t size, | ||
11 | dma_addr_t *dma_handle, gfp_t flag); | ||
12 | void (*free_coherent)(struct device *dev, size_t size, | ||
13 | void *cpu_addr, dma_addr_t dma_handle); | ||
14 | dma_addr_t (*map_single)(struct device *dev, void *cpu_addr, | ||
15 | size_t size, | ||
16 | enum dma_data_direction direction); | ||
17 | void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, | ||
18 | size_t size, | ||
19 | enum dma_data_direction direction); | ||
20 | int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, | ||
21 | enum dma_data_direction direction); | ||
22 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, | ||
23 | int nhwentries, | ||
24 | enum dma_data_direction direction); | ||
25 | void (*sync_single_for_cpu)(struct device *dev, | ||
26 | dma_addr_t dma_handle, size_t size, | ||
27 | enum dma_data_direction direction); | ||
28 | void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, | ||
29 | int nelems, | ||
30 | enum dma_data_direction direction); | ||
31 | }; | ||
32 | extern const struct dma_ops *dma_ops; | ||
33 | |||
34 | extern int dma_supported(struct device *dev, u64 mask); | ||
35 | extern int dma_set_mask(struct device *dev, u64 dma_mask); | ||
36 | |||
37 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | ||
38 | dma_addr_t *dma_handle, gfp_t flag) | ||
39 | { | ||
40 | return dma_ops->alloc_coherent(dev, size, dma_handle, flag); | ||
41 | } | ||
42 | |||
43 | static inline void dma_free_coherent(struct device *dev, size_t size, | ||
44 | void *cpu_addr, dma_addr_t dma_handle) | ||
45 | { | ||
46 | dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); | ||
47 | } | ||
48 | |||
49 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | ||
50 | size_t size, | ||
51 | enum dma_data_direction direction) | ||
52 | { | ||
53 | return dma_ops->map_single(dev, cpu_addr, size, direction); | ||
54 | } | ||
55 | |||
56 | static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
57 | size_t size, | ||
58 | enum dma_data_direction direction) | ||
59 | { | ||
60 | dma_ops->unmap_single(dev, dma_addr, size, direction); | ||
61 | } | ||
62 | |||
63 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
64 | unsigned long offset, size_t size, | ||
65 | enum dma_data_direction direction) | ||
66 | { | ||
67 | return dma_ops->map_single(dev, page_address(page) + offset, | ||
68 | size, direction); | ||
69 | } | ||
70 | |||
71 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
72 | size_t size, | ||
73 | enum dma_data_direction direction) | ||
74 | { | ||
75 | dma_ops->unmap_single(dev, dma_address, size, direction); | ||
76 | } | ||
77 | |||
78 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
79 | int nents, enum dma_data_direction direction) | ||
80 | { | ||
81 | return dma_ops->map_sg(dev, sg, nents, direction); | ||
82 | } | ||
83 | |||
84 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
85 | int nents, enum dma_data_direction direction) | ||
86 | { | ||
87 | dma_ops->unmap_sg(dev, sg, nents, direction); | ||
88 | } | ||
89 | |||
90 | static inline void dma_sync_single_for_cpu(struct device *dev, | ||
91 | dma_addr_t dma_handle, size_t size, | ||
92 | enum dma_data_direction direction) | ||
93 | { | ||
94 | dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction); | ||
95 | } | ||
96 | |||
97 | static inline void dma_sync_single_for_device(struct device *dev, | ||
98 | dma_addr_t dma_handle, | ||
99 | size_t size, | ||
100 | enum dma_data_direction direction) | ||
101 | { | ||
102 | /* No flushing needed to sync cpu writes to the device. */ | ||
103 | } | ||
104 | |||
105 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | ||
106 | dma_addr_t dma_handle, | ||
107 | unsigned long offset, | ||
108 | size_t size, | ||
109 | enum dma_data_direction direction) | ||
110 | { | ||
111 | dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); | ||
112 | } | ||
113 | |||
114 | static inline void dma_sync_single_range_for_device(struct device *dev, | ||
115 | dma_addr_t dma_handle, | ||
116 | unsigned long offset, | ||
117 | size_t size, | ||
118 | enum dma_data_direction direction) | ||
119 | { | ||
120 | /* No flushing needed to sync cpu writes to the device. */ | ||
121 | } | ||
122 | |||
123 | |||
124 | static inline void dma_sync_sg_for_cpu(struct device *dev, | ||
125 | struct scatterlist *sg, int nelems, | ||
126 | enum dma_data_direction direction) | ||
127 | { | ||
128 | dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction); | ||
129 | } | ||
130 | |||
131 | static inline void dma_sync_sg_for_device(struct device *dev, | ||
132 | struct scatterlist *sg, int nelems, | ||
133 | enum dma_data_direction direction) | ||
134 | { | ||
135 | /* No flushing needed to sync cpu writes to the device. */ | ||
136 | } | ||
137 | |||
138 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
139 | { | ||
140 | return (dma_addr == DMA_ERROR_CODE); | ||
141 | } | ||
142 | |||
143 | static inline int dma_get_cache_alignment(void) | ||
144 | { | ||
145 | /* no easy way to get cache size on all processors, so return | ||
146 | * the maximum possible, to be safe */ | ||
147 | return (1 << INTERNODE_CACHE_SHIFT); | ||
148 | } | ||
149 | |||
150 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
151 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
152 | #define dma_is_consistent(d, h) (1) | ||
153 | |||
154 | #endif /* _ASM_SPARC64_DMA_MAPPING_H */ | ||
diff --git a/arch/sparc/include/asm/errno.h b/arch/sparc/include/asm/errno.h index a9ef172977de..4e2bc490d714 100644 --- a/arch/sparc/include/asm/errno.h +++ b/arch/sparc/include/asm/errno.h | |||
@@ -110,4 +110,6 @@ | |||
110 | #define EOWNERDEAD 132 /* Owner died */ | 110 | #define EOWNERDEAD 132 /* Owner died */ |
111 | #define ENOTRECOVERABLE 133 /* State not recoverable */ | 111 | #define ENOTRECOVERABLE 133 /* State not recoverable */ |
112 | 112 | ||
113 | #define ERFKILL 134 /* Operation not possible due to RF-kill */ | ||
114 | |||
113 | #endif | 115 | #endif |
diff --git a/arch/sparc/include/asm/ftrace.h b/arch/sparc/include/asm/ftrace.h index d27716cd38c1..b0f18e9893db 100644 --- a/arch/sparc/include/asm/ftrace.h +++ b/arch/sparc/include/asm/ftrace.h | |||
@@ -11,4 +11,15 @@ extern void _mcount(void); | |||
11 | 11 | ||
12 | #endif | 12 | #endif |
13 | 13 | ||
14 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
15 | /* reloction of mcount call site is the same as the address */ | ||
16 | static inline unsigned long ftrace_call_adjust(unsigned long addr) | ||
17 | { | ||
18 | return addr; | ||
19 | } | ||
20 | |||
21 | struct dyn_arch_ftrace { | ||
22 | }; | ||
23 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
24 | |||
14 | #endif /* _ASM_SPARC64_FTRACE */ | 25 | #endif /* _ASM_SPARC64_FTRACE */ |
diff --git a/arch/sparc/include/asm/mdesc.h b/arch/sparc/include/asm/mdesc.h index 1acc7272e537..9faa046713fb 100644 --- a/arch/sparc/include/asm/mdesc.h +++ b/arch/sparc/include/asm/mdesc.h | |||
@@ -71,7 +71,8 @@ struct mdesc_notifier_client { | |||
71 | 71 | ||
72 | extern void mdesc_register_notifier(struct mdesc_notifier_client *client); | 72 | extern void mdesc_register_notifier(struct mdesc_notifier_client *client); |
73 | 73 | ||
74 | extern void mdesc_fill_in_cpu_data(cpumask_t mask); | 74 | extern void mdesc_fill_in_cpu_data(cpumask_t *mask); |
75 | extern void mdesc_populate_present_mask(cpumask_t *mask); | ||
75 | 76 | ||
76 | extern void sun4v_mdesc_init(void); | 77 | extern void sun4v_mdesc_init(void); |
77 | 78 | ||
diff --git a/arch/sparc/include/asm/percpu_64.h b/arch/sparc/include/asm/percpu_64.h index bee64593023e..007aafb4ae97 100644 --- a/arch/sparc/include/asm/percpu_64.h +++ b/arch/sparc/include/asm/percpu_64.h | |||
@@ -7,20 +7,16 @@ register unsigned long __local_per_cpu_offset asm("g5"); | |||
7 | 7 | ||
8 | #ifdef CONFIG_SMP | 8 | #ifdef CONFIG_SMP |
9 | 9 | ||
10 | extern void real_setup_per_cpu_areas(void); | 10 | #include <asm/trap_block.h> |
11 | 11 | ||
12 | extern unsigned long __per_cpu_base; | ||
13 | extern unsigned long __per_cpu_shift; | ||
14 | #define __per_cpu_offset(__cpu) \ | 12 | #define __per_cpu_offset(__cpu) \ |
15 | (__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift)) | 13 | (trap_block[(__cpu)].__per_cpu_base) |
16 | #define per_cpu_offset(x) (__per_cpu_offset(x)) | 14 | #define per_cpu_offset(x) (__per_cpu_offset(x)) |
17 | 15 | ||
18 | #define __my_cpu_offset __local_per_cpu_offset | 16 | #define __my_cpu_offset __local_per_cpu_offset |
19 | 17 | ||
20 | #else /* ! SMP */ | 18 | #else /* ! SMP */ |
21 | 19 | ||
22 | #define real_setup_per_cpu_areas() do { } while (0) | ||
23 | |||
24 | #endif /* SMP */ | 20 | #endif /* SMP */ |
25 | 21 | ||
26 | #include <asm-generic/percpu.h> | 22 | #include <asm-generic/percpu.h> |
diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h index 900d44714f8d..be8d7aaeb60d 100644 --- a/arch/sparc/include/asm/prom.h +++ b/arch/sparc/include/asm/prom.h | |||
@@ -86,6 +86,8 @@ extern int of_node_to_nid(struct device_node *dp); | |||
86 | #endif | 86 | #endif |
87 | 87 | ||
88 | extern void prom_build_devicetree(void); | 88 | extern void prom_build_devicetree(void); |
89 | extern void of_populate_present_mask(void); | ||
90 | extern void of_fill_in_cpu_data(void); | ||
89 | 91 | ||
90 | /* Dummy ref counting routines - to be implemented later */ | 92 | /* Dummy ref counting routines - to be implemented later */ |
91 | static inline struct device_node *of_node_get(struct device_node *node) | 93 | static inline struct device_node *of_node_get(struct device_node *node) |
diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h new file mode 100644 index 000000000000..7e26b2db6211 --- /dev/null +++ b/arch/sparc/include/asm/trap_block.h | |||
@@ -0,0 +1,207 @@ | |||
1 | #ifndef _SPARC_TRAP_BLOCK_H | ||
2 | #define _SPARC_TRAP_BLOCK_H | ||
3 | |||
4 | #include <asm/hypervisor.h> | ||
5 | #include <asm/asi.h> | ||
6 | |||
7 | #ifndef __ASSEMBLY__ | ||
8 | |||
9 | /* Trap handling code needs to get at a few critical values upon | ||
10 | * trap entry and to process TSB misses. These cannot be in the | ||
11 | * per_cpu() area as we really need to lock them into the TLB and | ||
12 | * thus make them part of the main kernel image. As a result we | ||
13 | * try to make this as small as possible. | ||
14 | * | ||
15 | * This is padded out and aligned to 64-bytes to avoid false sharing | ||
16 | * on SMP. | ||
17 | */ | ||
18 | |||
19 | /* If you modify the size of this structure, please update | ||
20 | * TRAP_BLOCK_SZ_SHIFT below. | ||
21 | */ | ||
22 | struct thread_info; | ||
23 | struct trap_per_cpu { | ||
24 | /* D-cache line 1: Basic thread information, cpu and device mondo queues */ | ||
25 | struct thread_info *thread; | ||
26 | unsigned long pgd_paddr; | ||
27 | unsigned long cpu_mondo_pa; | ||
28 | unsigned long dev_mondo_pa; | ||
29 | |||
30 | /* D-cache line 2: Error Mondo Queue and kernel buffer pointers */ | ||
31 | unsigned long resum_mondo_pa; | ||
32 | unsigned long resum_kernel_buf_pa; | ||
33 | unsigned long nonresum_mondo_pa; | ||
34 | unsigned long nonresum_kernel_buf_pa; | ||
35 | |||
36 | /* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */ | ||
37 | struct hv_fault_status fault_info; | ||
38 | |||
39 | /* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */ | ||
40 | unsigned long cpu_mondo_block_pa; | ||
41 | unsigned long cpu_list_pa; | ||
42 | unsigned long tsb_huge; | ||
43 | unsigned long tsb_huge_temp; | ||
44 | |||
45 | /* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */ | ||
46 | unsigned long irq_worklist_pa; | ||
47 | unsigned int cpu_mondo_qmask; | ||
48 | unsigned int dev_mondo_qmask; | ||
49 | unsigned int resum_qmask; | ||
50 | unsigned int nonresum_qmask; | ||
51 | unsigned long __per_cpu_base; | ||
52 | } __attribute__((aligned(64))); | ||
53 | extern struct trap_per_cpu trap_block[NR_CPUS]; | ||
54 | extern void init_cur_cpu_trap(struct thread_info *); | ||
55 | extern void setup_tba(void); | ||
56 | extern int ncpus_probed; | ||
57 | |||
58 | extern unsigned long real_hard_smp_processor_id(void); | ||
59 | |||
60 | struct cpuid_patch_entry { | ||
61 | unsigned int addr; | ||
62 | unsigned int cheetah_safari[4]; | ||
63 | unsigned int cheetah_jbus[4]; | ||
64 | unsigned int starfire[4]; | ||
65 | unsigned int sun4v[4]; | ||
66 | }; | ||
67 | extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end; | ||
68 | |||
69 | struct sun4v_1insn_patch_entry { | ||
70 | unsigned int addr; | ||
71 | unsigned int insn; | ||
72 | }; | ||
73 | extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch, | ||
74 | __sun4v_1insn_patch_end; | ||
75 | |||
76 | struct sun4v_2insn_patch_entry { | ||
77 | unsigned int addr; | ||
78 | unsigned int insns[2]; | ||
79 | }; | ||
80 | extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, | ||
81 | __sun4v_2insn_patch_end; | ||
82 | |||
83 | |||
84 | #endif /* !(__ASSEMBLY__) */ | ||
85 | |||
86 | #define TRAP_PER_CPU_THREAD 0x00 | ||
87 | #define TRAP_PER_CPU_PGD_PADDR 0x08 | ||
88 | #define TRAP_PER_CPU_CPU_MONDO_PA 0x10 | ||
89 | #define TRAP_PER_CPU_DEV_MONDO_PA 0x18 | ||
90 | #define TRAP_PER_CPU_RESUM_MONDO_PA 0x20 | ||
91 | #define TRAP_PER_CPU_RESUM_KBUF_PA 0x28 | ||
92 | #define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30 | ||
93 | #define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38 | ||
94 | #define TRAP_PER_CPU_FAULT_INFO 0x40 | ||
95 | #define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0 | ||
96 | #define TRAP_PER_CPU_CPU_LIST_PA 0xc8 | ||
97 | #define TRAP_PER_CPU_TSB_HUGE 0xd0 | ||
98 | #define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8 | ||
99 | #define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0 | ||
100 | #define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8 | ||
101 | #define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec | ||
102 | #define TRAP_PER_CPU_RESUM_QMASK 0xf0 | ||
103 | #define TRAP_PER_CPU_NONRESUM_QMASK 0xf4 | ||
104 | #define TRAP_PER_CPU_PER_CPU_BASE 0xf8 | ||
105 | |||
106 | #define TRAP_BLOCK_SZ_SHIFT 8 | ||
107 | |||
108 | #include <asm/scratchpad.h> | ||
109 | |||
110 | #define __GET_CPUID(REG) \ | ||
111 | /* Spitfire implementation (default). */ \ | ||
112 | 661: ldxa [%g0] ASI_UPA_CONFIG, REG; \ | ||
113 | srlx REG, 17, REG; \ | ||
114 | and REG, 0x1f, REG; \ | ||
115 | nop; \ | ||
116 | .section .cpuid_patch, "ax"; \ | ||
117 | /* Instruction location. */ \ | ||
118 | .word 661b; \ | ||
119 | /* Cheetah Safari implementation. */ \ | ||
120 | ldxa [%g0] ASI_SAFARI_CONFIG, REG; \ | ||
121 | srlx REG, 17, REG; \ | ||
122 | and REG, 0x3ff, REG; \ | ||
123 | nop; \ | ||
124 | /* Cheetah JBUS implementation. */ \ | ||
125 | ldxa [%g0] ASI_JBUS_CONFIG, REG; \ | ||
126 | srlx REG, 17, REG; \ | ||
127 | and REG, 0x1f, REG; \ | ||
128 | nop; \ | ||
129 | /* Starfire implementation. */ \ | ||
130 | sethi %hi(0x1fff40000d0 >> 9), REG; \ | ||
131 | sllx REG, 9, REG; \ | ||
132 | or REG, 0xd0, REG; \ | ||
133 | lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\ | ||
134 | /* sun4v implementation. */ \ | ||
135 | mov SCRATCHPAD_CPUID, REG; \ | ||
136 | ldxa [REG] ASI_SCRATCHPAD, REG; \ | ||
137 | nop; \ | ||
138 | nop; \ | ||
139 | .previous; | ||
140 | |||
141 | #ifdef CONFIG_SMP | ||
142 | |||
143 | #define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
144 | __GET_CPUID(TMP) \ | ||
145 | sethi %hi(trap_block), DEST; \ | ||
146 | sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \ | ||
147 | or DEST, %lo(trap_block), DEST; \ | ||
148 | add DEST, TMP, DEST; \ | ||
149 | |||
150 | /* Clobbers TMP, current address space PGD phys address into DEST. */ | ||
151 | #define TRAP_LOAD_PGD_PHYS(DEST, TMP) \ | ||
152 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
153 | ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; | ||
154 | |||
155 | /* Clobbers TMP, loads local processor's IRQ work area into DEST. */ | ||
156 | #define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \ | ||
157 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
158 | add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST; | ||
159 | |||
160 | /* Clobbers TMP, loads DEST with current thread info pointer. */ | ||
161 | #define TRAP_LOAD_THREAD_REG(DEST, TMP) \ | ||
162 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
163 | ldx [DEST + TRAP_PER_CPU_THREAD], DEST; | ||
164 | |||
165 | /* Given the current thread info pointer in THR, load the per-cpu | ||
166 | * area base of the current processor into DEST. REG1, REG2, and REG3 are | ||
167 | * clobbered. | ||
168 | * | ||
169 | * You absolutely cannot use DEST as a temporary in this code. The | ||
170 | * reason is that traps can happen during execution, and return from | ||
171 | * trap will load the fully resolved DEST per-cpu base. This can corrupt | ||
172 | * the calculations done by the macro mid-stream. | ||
173 | */ | ||
174 | #define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \ | ||
175 | lduh [THR + TI_CPU], REG1; \ | ||
176 | sethi %hi(trap_block), REG2; \ | ||
177 | sllx REG1, TRAP_BLOCK_SZ_SHIFT, REG1; \ | ||
178 | or REG2, %lo(trap_block), REG2; \ | ||
179 | add REG2, REG1, REG2; \ | ||
180 | ldx [REG2 + TRAP_PER_CPU_PER_CPU_BASE], DEST; | ||
181 | |||
182 | #else | ||
183 | |||
184 | #define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
185 | sethi %hi(trap_block), DEST; \ | ||
186 | or DEST, %lo(trap_block), DEST; \ | ||
187 | |||
188 | /* Uniprocessor versions, we know the cpuid is zero. */ | ||
189 | #define TRAP_LOAD_PGD_PHYS(DEST, TMP) \ | ||
190 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
191 | ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; | ||
192 | |||
193 | /* Clobbers TMP, loads local processor's IRQ work area into DEST. */ | ||
194 | #define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \ | ||
195 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
196 | add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST; | ||
197 | |||
198 | #define TRAP_LOAD_THREAD_REG(DEST, TMP) \ | ||
199 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | ||
200 | ldx [DEST + TRAP_PER_CPU_THREAD], DEST; | ||
201 | |||
202 | /* No per-cpu areas on uniprocessor, so no need to load DEST. */ | ||
203 | #define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) | ||
204 | |||
205 | #endif /* !(CONFIG_SMP) */ | ||
206 | |||
207 | #endif /* _SPARC_TRAP_BLOCK_H */ | ||
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h index b8eb71ef3163..b2c406de7d4f 100644 --- a/arch/sparc/include/asm/unistd.h +++ b/arch/sparc/include/asm/unistd.h | |||
@@ -394,8 +394,9 @@ | |||
394 | #define __NR_accept4 323 | 394 | #define __NR_accept4 323 |
395 | #define __NR_preadv 324 | 395 | #define __NR_preadv 324 |
396 | #define __NR_pwritev 325 | 396 | #define __NR_pwritev 325 |
397 | #define __NR_rt_tgsigqueueinfo 326 | ||
397 | 398 | ||
398 | #define NR_SYSCALLS 326 | 399 | #define NR_SYSCALLS 327 |
399 | 400 | ||
400 | #ifdef __32bit_syscall_numbers__ | 401 | #ifdef __32bit_syscall_numbers__ |
401 | /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, | 402 | /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, |
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 54742e58831c..475ce4696acd 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile | |||
@@ -37,6 +37,7 @@ obj-y += una_asm_$(BITS).o | |||
37 | obj-$(CONFIG_SPARC32) += muldiv.o | 37 | obj-$(CONFIG_SPARC32) += muldiv.o |
38 | obj-y += prom_common.o | 38 | obj-y += prom_common.o |
39 | obj-y += prom_$(BITS).o | 39 | obj-y += prom_$(BITS).o |
40 | obj-y += of_device_common.o | ||
40 | obj-y += of_device_$(BITS).o | 41 | obj-y += of_device_$(BITS).o |
41 | obj-$(CONFIG_SPARC64) += prom_irqtrans.o | 42 | obj-$(CONFIG_SPARC64) += prom_irqtrans.o |
42 | 43 | ||
@@ -54,6 +55,7 @@ obj-$(CONFIG_SPARC64) += sstate.o | |||
54 | obj-$(CONFIG_SPARC64) += mdesc.o | 55 | obj-$(CONFIG_SPARC64) += mdesc.o |
55 | obj-$(CONFIG_SPARC64) += pcr.o | 56 | obj-$(CONFIG_SPARC64) += pcr.o |
56 | obj-$(CONFIG_SPARC64) += nmi.o | 57 | obj-$(CONFIG_SPARC64) += nmi.o |
58 | obj-$(CONFIG_SPARC64_SMP) += cpumap.o | ||
57 | 59 | ||
58 | # sparc32 do not use GENERIC_HARDIRQS but uses the generic devres implementation | 60 | # sparc32 do not use GENERIC_HARDIRQS but uses the generic devres implementation |
59 | obj-$(CONFIG_SPARC32) += devres.o | 61 | obj-$(CONFIG_SPARC32) += devres.o |
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c new file mode 100644 index 000000000000..7430ed080b23 --- /dev/null +++ b/arch/sparc/kernel/cpumap.c | |||
@@ -0,0 +1,431 @@ | |||
1 | /* cpumap.c: used for optimizing CPU assignment | ||
2 | * | ||
3 | * Copyright (C) 2009 Hong H. Pham <hong.pham@windriver.com> | ||
4 | */ | ||
5 | |||
6 | #include <linux/module.h> | ||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/cpumask.h> | ||
10 | #include <linux/spinlock.h> | ||
11 | #include <asm/cpudata.h> | ||
12 | #include "cpumap.h" | ||
13 | |||
14 | |||
15 | enum { | ||
16 | CPUINFO_LVL_ROOT = 0, | ||
17 | CPUINFO_LVL_NODE, | ||
18 | CPUINFO_LVL_CORE, | ||
19 | CPUINFO_LVL_PROC, | ||
20 | CPUINFO_LVL_MAX, | ||
21 | }; | ||
22 | |||
23 | enum { | ||
24 | ROVER_NO_OP = 0, | ||
25 | /* Increment rover every time level is visited */ | ||
26 | ROVER_INC_ON_VISIT = 1 << 0, | ||
27 | /* Increment parent's rover every time rover wraps around */ | ||
28 | ROVER_INC_PARENT_ON_LOOP = 1 << 1, | ||
29 | }; | ||
30 | |||
31 | struct cpuinfo_node { | ||
32 | int id; | ||
33 | int level; | ||
34 | int num_cpus; /* Number of CPUs in this hierarchy */ | ||
35 | int parent_index; | ||
36 | int child_start; /* Array index of the first child node */ | ||
37 | int child_end; /* Array index of the last child node */ | ||
38 | int rover; /* Child node iterator */ | ||
39 | }; | ||
40 | |||
41 | struct cpuinfo_level { | ||
42 | int start_index; /* Index of first node of a level in a cpuinfo tree */ | ||
43 | int end_index; /* Index of last node of a level in a cpuinfo tree */ | ||
44 | int num_nodes; /* Number of nodes in a level in a cpuinfo tree */ | ||
45 | }; | ||
46 | |||
47 | struct cpuinfo_tree { | ||
48 | int total_nodes; | ||
49 | |||
50 | /* Offsets into nodes[] for each level of the tree */ | ||
51 | struct cpuinfo_level level[CPUINFO_LVL_MAX]; | ||
52 | struct cpuinfo_node nodes[0]; | ||
53 | }; | ||
54 | |||
55 | |||
56 | static struct cpuinfo_tree *cpuinfo_tree; | ||
57 | |||
58 | static u16 cpu_distribution_map[NR_CPUS]; | ||
59 | static DEFINE_SPINLOCK(cpu_map_lock); | ||
60 | |||
61 | |||
62 | /* Niagara optimized cpuinfo tree traversal. */ | ||
63 | static const int niagara_iterate_method[] = { | ||
64 | [CPUINFO_LVL_ROOT] = ROVER_NO_OP, | ||
65 | |||
66 | /* Strands (or virtual CPUs) within a core may not run concurrently | ||
67 | * on the Niagara, as instruction pipeline(s) are shared. Distribute | ||
68 | * work to strands in different cores first for better concurrency. | ||
69 | * Go to next NUMA node when all cores are used. | ||
70 | */ | ||
71 | [CPUINFO_LVL_NODE] = ROVER_INC_ON_VISIT|ROVER_INC_PARENT_ON_LOOP, | ||
72 | |||
73 | /* Strands are grouped together by proc_id in cpuinfo_sparc, i.e. | ||
74 | * a proc_id represents an instruction pipeline. Distribute work to | ||
75 | * strands in different proc_id groups if the core has multiple | ||
76 | * instruction pipelines (e.g. the Niagara 2/2+ has two). | ||
77 | */ | ||
78 | [CPUINFO_LVL_CORE] = ROVER_INC_ON_VISIT, | ||
79 | |||
80 | /* Pick the next strand in the proc_id group. */ | ||
81 | [CPUINFO_LVL_PROC] = ROVER_INC_ON_VISIT, | ||
82 | }; | ||
83 | |||
84 | /* Generic cpuinfo tree traversal. Distribute work round robin across NUMA | ||
85 | * nodes. | ||
86 | */ | ||
87 | static const int generic_iterate_method[] = { | ||
88 | [CPUINFO_LVL_ROOT] = ROVER_INC_ON_VISIT, | ||
89 | [CPUINFO_LVL_NODE] = ROVER_NO_OP, | ||
90 | [CPUINFO_LVL_CORE] = ROVER_INC_PARENT_ON_LOOP, | ||
91 | [CPUINFO_LVL_PROC] = ROVER_INC_ON_VISIT|ROVER_INC_PARENT_ON_LOOP, | ||
92 | }; | ||
93 | |||
94 | |||
95 | static int cpuinfo_id(int cpu, int level) | ||
96 | { | ||
97 | int id; | ||
98 | |||
99 | switch (level) { | ||
100 | case CPUINFO_LVL_ROOT: | ||
101 | id = 0; | ||
102 | break; | ||
103 | case CPUINFO_LVL_NODE: | ||
104 | id = cpu_to_node(cpu); | ||
105 | break; | ||
106 | case CPUINFO_LVL_CORE: | ||
107 | id = cpu_data(cpu).core_id; | ||
108 | break; | ||
109 | case CPUINFO_LVL_PROC: | ||
110 | id = cpu_data(cpu).proc_id; | ||
111 | break; | ||
112 | default: | ||
113 | id = -EINVAL; | ||
114 | } | ||
115 | return id; | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * Enumerate the CPU information in __cpu_data to determine the start index, | ||
120 | * end index, and number of nodes for each level in the cpuinfo tree. The | ||
121 | * total number of cpuinfo nodes required to build the tree is returned. | ||
122 | */ | ||
123 | static int enumerate_cpuinfo_nodes(struct cpuinfo_level *tree_level) | ||
124 | { | ||
125 | int prev_id[CPUINFO_LVL_MAX]; | ||
126 | int i, n, num_nodes; | ||
127 | |||
128 | for (i = CPUINFO_LVL_ROOT; i < CPUINFO_LVL_MAX; i++) { | ||
129 | struct cpuinfo_level *lv = &tree_level[i]; | ||
130 | |||
131 | prev_id[i] = -1; | ||
132 | lv->start_index = lv->end_index = lv->num_nodes = 0; | ||
133 | } | ||
134 | |||
135 | num_nodes = 1; /* Include the root node */ | ||
136 | |||
137 | for (i = 0; i < num_possible_cpus(); i++) { | ||
138 | if (!cpu_online(i)) | ||
139 | continue; | ||
140 | |||
141 | n = cpuinfo_id(i, CPUINFO_LVL_NODE); | ||
142 | if (n > prev_id[CPUINFO_LVL_NODE]) { | ||
143 | tree_level[CPUINFO_LVL_NODE].num_nodes++; | ||
144 | prev_id[CPUINFO_LVL_NODE] = n; | ||
145 | num_nodes++; | ||
146 | } | ||
147 | n = cpuinfo_id(i, CPUINFO_LVL_CORE); | ||
148 | if (n > prev_id[CPUINFO_LVL_CORE]) { | ||
149 | tree_level[CPUINFO_LVL_CORE].num_nodes++; | ||
150 | prev_id[CPUINFO_LVL_CORE] = n; | ||
151 | num_nodes++; | ||
152 | } | ||
153 | n = cpuinfo_id(i, CPUINFO_LVL_PROC); | ||
154 | if (n > prev_id[CPUINFO_LVL_PROC]) { | ||
155 | tree_level[CPUINFO_LVL_PROC].num_nodes++; | ||
156 | prev_id[CPUINFO_LVL_PROC] = n; | ||
157 | num_nodes++; | ||
158 | } | ||
159 | } | ||
160 | |||
161 | tree_level[CPUINFO_LVL_ROOT].num_nodes = 1; | ||
162 | |||
163 | n = tree_level[CPUINFO_LVL_NODE].num_nodes; | ||
164 | tree_level[CPUINFO_LVL_NODE].start_index = 1; | ||
165 | tree_level[CPUINFO_LVL_NODE].end_index = n; | ||
166 | |||
167 | n++; | ||
168 | tree_level[CPUINFO_LVL_CORE].start_index = n; | ||
169 | n += tree_level[CPUINFO_LVL_CORE].num_nodes; | ||
170 | tree_level[CPUINFO_LVL_CORE].end_index = n - 1; | ||
171 | |||
172 | tree_level[CPUINFO_LVL_PROC].start_index = n; | ||
173 | n += tree_level[CPUINFO_LVL_PROC].num_nodes; | ||
174 | tree_level[CPUINFO_LVL_PROC].end_index = n - 1; | ||
175 | |||
176 | return num_nodes; | ||
177 | } | ||
178 | |||
179 | /* Build a tree representation of the CPU hierarchy using the per CPU | ||
180 | * information in __cpu_data. Entries in __cpu_data[0..NR_CPUS] are | ||
181 | * assumed to be sorted in ascending order based on node, core_id, and | ||
182 | * proc_id (in order of significance). | ||
183 | */ | ||
184 | static struct cpuinfo_tree *build_cpuinfo_tree(void) | ||
185 | { | ||
186 | struct cpuinfo_tree *new_tree; | ||
187 | struct cpuinfo_node *node; | ||
188 | struct cpuinfo_level tmp_level[CPUINFO_LVL_MAX]; | ||
189 | int num_cpus[CPUINFO_LVL_MAX]; | ||
190 | int level_rover[CPUINFO_LVL_MAX]; | ||
191 | int prev_id[CPUINFO_LVL_MAX]; | ||
192 | int n, id, cpu, prev_cpu, last_cpu, level; | ||
193 | |||
194 | n = enumerate_cpuinfo_nodes(tmp_level); | ||
195 | |||
196 | new_tree = kzalloc(sizeof(struct cpuinfo_tree) + | ||
197 | (sizeof(struct cpuinfo_node) * n), GFP_ATOMIC); | ||
198 | if (!new_tree) | ||
199 | return NULL; | ||
200 | |||
201 | new_tree->total_nodes = n; | ||
202 | memcpy(&new_tree->level, tmp_level, sizeof(tmp_level)); | ||
203 | |||
204 | prev_cpu = cpu = first_cpu(cpu_online_map); | ||
205 | |||
206 | /* Initialize all levels in the tree with the first CPU */ | ||
207 | for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT; level--) { | ||
208 | n = new_tree->level[level].start_index; | ||
209 | |||
210 | level_rover[level] = n; | ||
211 | node = &new_tree->nodes[n]; | ||
212 | |||
213 | id = cpuinfo_id(cpu, level); | ||
214 | if (unlikely(id < 0)) { | ||
215 | kfree(new_tree); | ||
216 | return NULL; | ||
217 | } | ||
218 | node->id = id; | ||
219 | node->level = level; | ||
220 | node->num_cpus = 1; | ||
221 | |||
222 | node->parent_index = (level > CPUINFO_LVL_ROOT) | ||
223 | ? new_tree->level[level - 1].start_index : -1; | ||
224 | |||
225 | node->child_start = node->child_end = node->rover = | ||
226 | (level == CPUINFO_LVL_PROC) | ||
227 | ? cpu : new_tree->level[level + 1].start_index; | ||
228 | |||
229 | prev_id[level] = node->id; | ||
230 | num_cpus[level] = 1; | ||
231 | } | ||
232 | |||
233 | for (last_cpu = (num_possible_cpus() - 1); last_cpu >= 0; last_cpu--) { | ||
234 | if (cpu_online(last_cpu)) | ||
235 | break; | ||
236 | } | ||
237 | |||
238 | while (++cpu <= last_cpu) { | ||
239 | if (!cpu_online(cpu)) | ||
240 | continue; | ||
241 | |||
242 | for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT; | ||
243 | level--) { | ||
244 | id = cpuinfo_id(cpu, level); | ||
245 | if (unlikely(id < 0)) { | ||
246 | kfree(new_tree); | ||
247 | return NULL; | ||
248 | } | ||
249 | |||
250 | if ((id != prev_id[level]) || (cpu == last_cpu)) { | ||
251 | prev_id[level] = id; | ||
252 | node = &new_tree->nodes[level_rover[level]]; | ||
253 | node->num_cpus = num_cpus[level]; | ||
254 | num_cpus[level] = 1; | ||
255 | |||
256 | if (cpu == last_cpu) | ||
257 | node->num_cpus++; | ||
258 | |||
259 | /* Connect tree node to parent */ | ||
260 | if (level == CPUINFO_LVL_ROOT) | ||
261 | node->parent_index = -1; | ||
262 | else | ||
263 | node->parent_index = | ||
264 | level_rover[level - 1]; | ||
265 | |||
266 | if (level == CPUINFO_LVL_PROC) { | ||
267 | node->child_end = | ||
268 | (cpu == last_cpu) ? cpu : prev_cpu; | ||
269 | } else { | ||
270 | node->child_end = | ||
271 | level_rover[level + 1] - 1; | ||
272 | } | ||
273 | |||
274 | /* Initialize the next node in the same level */ | ||
275 | n = ++level_rover[level]; | ||
276 | if (n <= new_tree->level[level].end_index) { | ||
277 | node = &new_tree->nodes[n]; | ||
278 | node->id = id; | ||
279 | node->level = level; | ||
280 | |||
281 | /* Connect node to child */ | ||
282 | node->child_start = node->child_end = | ||
283 | node->rover = | ||
284 | (level == CPUINFO_LVL_PROC) | ||
285 | ? cpu : level_rover[level + 1]; | ||
286 | } | ||
287 | } else | ||
288 | num_cpus[level]++; | ||
289 | } | ||
290 | prev_cpu = cpu; | ||
291 | } | ||
292 | |||
293 | return new_tree; | ||
294 | } | ||
295 | |||
296 | static void increment_rover(struct cpuinfo_tree *t, int node_index, | ||
297 | int root_index, const int *rover_inc_table) | ||
298 | { | ||
299 | struct cpuinfo_node *node = &t->nodes[node_index]; | ||
300 | int top_level, level; | ||
301 | |||
302 | top_level = t->nodes[root_index].level; | ||
303 | for (level = node->level; level >= top_level; level--) { | ||
304 | node->rover++; | ||
305 | if (node->rover <= node->child_end) | ||
306 | return; | ||
307 | |||
308 | node->rover = node->child_start; | ||
309 | /* If parent's rover does not need to be adjusted, stop here. */ | ||
310 | if ((level == top_level) || | ||
311 | !(rover_inc_table[level] & ROVER_INC_PARENT_ON_LOOP)) | ||
312 | return; | ||
313 | |||
314 | node = &t->nodes[node->parent_index]; | ||
315 | } | ||
316 | } | ||
317 | |||
318 | static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index) | ||
319 | { | ||
320 | const int *rover_inc_table; | ||
321 | int level, new_index, index = root_index; | ||
322 | |||
323 | switch (sun4v_chip_type) { | ||
324 | case SUN4V_CHIP_NIAGARA1: | ||
325 | case SUN4V_CHIP_NIAGARA2: | ||
326 | rover_inc_table = niagara_iterate_method; | ||
327 | break; | ||
328 | default: | ||
329 | rover_inc_table = generic_iterate_method; | ||
330 | } | ||
331 | |||
332 | for (level = t->nodes[root_index].level; level < CPUINFO_LVL_MAX; | ||
333 | level++) { | ||
334 | new_index = t->nodes[index].rover; | ||
335 | if (rover_inc_table[level] & ROVER_INC_ON_VISIT) | ||
336 | increment_rover(t, index, root_index, rover_inc_table); | ||
337 | |||
338 | index = new_index; | ||
339 | } | ||
340 | return index; | ||
341 | } | ||
342 | |||
343 | static void _cpu_map_rebuild(void) | ||
344 | { | ||
345 | int i; | ||
346 | |||
347 | if (cpuinfo_tree) { | ||
348 | kfree(cpuinfo_tree); | ||
349 | cpuinfo_tree = NULL; | ||
350 | } | ||
351 | |||
352 | cpuinfo_tree = build_cpuinfo_tree(); | ||
353 | if (!cpuinfo_tree) | ||
354 | return; | ||
355 | |||
356 | /* Build CPU distribution map that spans all online CPUs. No need | ||
357 | * to check if the CPU is online, as that is done when the cpuinfo | ||
358 | * tree is being built. | ||
359 | */ | ||
360 | for (i = 0; i < cpuinfo_tree->nodes[0].num_cpus; i++) | ||
361 | cpu_distribution_map[i] = iterate_cpu(cpuinfo_tree, 0); | ||
362 | } | ||
363 | |||
364 | /* Fallback if the cpuinfo tree could not be built. CPU mapping is linear | ||
365 | * round robin. | ||
366 | */ | ||
367 | static int simple_map_to_cpu(unsigned int index) | ||
368 | { | ||
369 | int i, end, cpu_rover; | ||
370 | |||
371 | cpu_rover = 0; | ||
372 | end = index % num_online_cpus(); | ||
373 | for (i = 0; i < num_possible_cpus(); i++) { | ||
374 | if (cpu_online(cpu_rover)) { | ||
375 | if (cpu_rover >= end) | ||
376 | return cpu_rover; | ||
377 | |||
378 | cpu_rover++; | ||
379 | } | ||
380 | } | ||
381 | |||
382 | /* Impossible, since num_online_cpus() <= num_possible_cpus() */ | ||
383 | return first_cpu(cpu_online_map); | ||
384 | } | ||
385 | |||
386 | static int _map_to_cpu(unsigned int index) | ||
387 | { | ||
388 | struct cpuinfo_node *root_node; | ||
389 | |||
390 | if (unlikely(!cpuinfo_tree)) { | ||
391 | _cpu_map_rebuild(); | ||
392 | if (!cpuinfo_tree) | ||
393 | return simple_map_to_cpu(index); | ||
394 | } | ||
395 | |||
396 | root_node = &cpuinfo_tree->nodes[0]; | ||
397 | #ifdef CONFIG_HOTPLUG_CPU | ||
398 | if (unlikely(root_node->num_cpus != num_online_cpus())) { | ||
399 | _cpu_map_rebuild(); | ||
400 | if (!cpuinfo_tree) | ||
401 | return simple_map_to_cpu(index); | ||
402 | } | ||
403 | #endif | ||
404 | return cpu_distribution_map[index % root_node->num_cpus]; | ||
405 | } | ||
406 | |||
407 | int map_to_cpu(unsigned int index) | ||
408 | { | ||
409 | int mapped_cpu; | ||
410 | unsigned long flag; | ||
411 | |||
412 | spin_lock_irqsave(&cpu_map_lock, flag); | ||
413 | mapped_cpu = _map_to_cpu(index); | ||
414 | |||
415 | #ifdef CONFIG_HOTPLUG_CPU | ||
416 | while (unlikely(!cpu_online(mapped_cpu))) | ||
417 | mapped_cpu = _map_to_cpu(index); | ||
418 | #endif | ||
419 | spin_unlock_irqrestore(&cpu_map_lock, flag); | ||
420 | return mapped_cpu; | ||
421 | } | ||
422 | EXPORT_SYMBOL(map_to_cpu); | ||
423 | |||
424 | void cpu_map_rebuild(void) | ||
425 | { | ||
426 | unsigned long flag; | ||
427 | |||
428 | spin_lock_irqsave(&cpu_map_lock, flag); | ||
429 | _cpu_map_rebuild(); | ||
430 | spin_unlock_irqrestore(&cpu_map_lock, flag); | ||
431 | } | ||
diff --git a/arch/sparc/kernel/cpumap.h b/arch/sparc/kernel/cpumap.h new file mode 100644 index 000000000000..e639880ab864 --- /dev/null +++ b/arch/sparc/kernel/cpumap.h | |||
@@ -0,0 +1,16 @@ | |||
1 | #ifndef _CPUMAP_H | ||
2 | #define _CPUMAP_H | ||
3 | |||
4 | #ifdef CONFIG_SMP | ||
5 | extern void cpu_map_rebuild(void); | ||
6 | extern int map_to_cpu(unsigned int index); | ||
7 | #define cpu_map_init() cpu_map_rebuild() | ||
8 | #else | ||
9 | #define cpu_map_init() do {} while (0) | ||
10 | static inline int map_to_cpu(unsigned int index) | ||
11 | { | ||
12 | return raw_smp_processor_id(); | ||
13 | } | ||
14 | #endif | ||
15 | |||
16 | #endif | ||
diff --git a/arch/sparc/kernel/dma.c b/arch/sparc/kernel/dma.c index ebc8403b035e..524c32f97c55 100644 --- a/arch/sparc/kernel/dma.c +++ b/arch/sparc/kernel/dma.c | |||
@@ -35,8 +35,8 @@ int dma_set_mask(struct device *dev, u64 dma_mask) | |||
35 | } | 35 | } |
36 | EXPORT_SYMBOL(dma_set_mask); | 36 | EXPORT_SYMBOL(dma_set_mask); |
37 | 37 | ||
38 | void *dma_alloc_coherent(struct device *dev, size_t size, | 38 | static void *dma32_alloc_coherent(struct device *dev, size_t size, |
39 | dma_addr_t *dma_handle, gfp_t flag) | 39 | dma_addr_t *dma_handle, gfp_t flag) |
40 | { | 40 | { |
41 | #ifdef CONFIG_PCI | 41 | #ifdef CONFIG_PCI |
42 | if (dev->bus == &pci_bus_type) | 42 | if (dev->bus == &pci_bus_type) |
@@ -44,10 +44,9 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
44 | #endif | 44 | #endif |
45 | return sbus_alloc_consistent(dev, size, dma_handle); | 45 | return sbus_alloc_consistent(dev, size, dma_handle); |
46 | } | 46 | } |
47 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
48 | 47 | ||
49 | void dma_free_coherent(struct device *dev, size_t size, | 48 | static void dma32_free_coherent(struct device *dev, size_t size, |
50 | void *cpu_addr, dma_addr_t dma_handle) | 49 | void *cpu_addr, dma_addr_t dma_handle) |
51 | { | 50 | { |
52 | #ifdef CONFIG_PCI | 51 | #ifdef CONFIG_PCI |
53 | if (dev->bus == &pci_bus_type) { | 52 | if (dev->bus == &pci_bus_type) { |
@@ -58,38 +57,10 @@ void dma_free_coherent(struct device *dev, size_t size, | |||
58 | #endif | 57 | #endif |
59 | sbus_free_consistent(dev, size, cpu_addr, dma_handle); | 58 | sbus_free_consistent(dev, size, cpu_addr, dma_handle); |
60 | } | 59 | } |
61 | EXPORT_SYMBOL(dma_free_coherent); | ||
62 | 60 | ||
63 | dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | 61 | static dma_addr_t dma32_map_page(struct device *dev, struct page *page, |
64 | size_t size, enum dma_data_direction direction) | 62 | unsigned long offset, size_t size, |
65 | { | 63 | enum dma_data_direction direction) |
66 | #ifdef CONFIG_PCI | ||
67 | if (dev->bus == &pci_bus_type) | ||
68 | return pci_map_single(to_pci_dev(dev), cpu_addr, | ||
69 | size, (int)direction); | ||
70 | #endif | ||
71 | return sbus_map_single(dev, cpu_addr, size, (int)direction); | ||
72 | } | ||
73 | EXPORT_SYMBOL(dma_map_single); | ||
74 | |||
75 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
76 | size_t size, | ||
77 | enum dma_data_direction direction) | ||
78 | { | ||
79 | #ifdef CONFIG_PCI | ||
80 | if (dev->bus == &pci_bus_type) { | ||
81 | pci_unmap_single(to_pci_dev(dev), dma_addr, | ||
82 | size, (int)direction); | ||
83 | return; | ||
84 | } | ||
85 | #endif | ||
86 | sbus_unmap_single(dev, dma_addr, size, (int)direction); | ||
87 | } | ||
88 | EXPORT_SYMBOL(dma_unmap_single); | ||
89 | |||
90 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
91 | unsigned long offset, size_t size, | ||
92 | enum dma_data_direction direction) | ||
93 | { | 64 | { |
94 | #ifdef CONFIG_PCI | 65 | #ifdef CONFIG_PCI |
95 | if (dev->bus == &pci_bus_type) | 66 | if (dev->bus == &pci_bus_type) |
@@ -99,10 +70,9 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page, | |||
99 | return sbus_map_single(dev, page_address(page) + offset, | 70 | return sbus_map_single(dev, page_address(page) + offset, |
100 | size, (int)direction); | 71 | size, (int)direction); |
101 | } | 72 | } |
102 | EXPORT_SYMBOL(dma_map_page); | ||
103 | 73 | ||
104 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | 74 | static void dma32_unmap_page(struct device *dev, dma_addr_t dma_address, |
105 | size_t size, enum dma_data_direction direction) | 75 | size_t size, enum dma_data_direction direction) |
106 | { | 76 | { |
107 | #ifdef CONFIG_PCI | 77 | #ifdef CONFIG_PCI |
108 | if (dev->bus == &pci_bus_type) { | 78 | if (dev->bus == &pci_bus_type) { |
@@ -113,10 +83,9 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | |||
113 | #endif | 83 | #endif |
114 | sbus_unmap_single(dev, dma_address, size, (int)direction); | 84 | sbus_unmap_single(dev, dma_address, size, (int)direction); |
115 | } | 85 | } |
116 | EXPORT_SYMBOL(dma_unmap_page); | ||
117 | 86 | ||
118 | int dma_map_sg(struct device *dev, struct scatterlist *sg, | 87 | static int dma32_map_sg(struct device *dev, struct scatterlist *sg, |
119 | int nents, enum dma_data_direction direction) | 88 | int nents, enum dma_data_direction direction) |
120 | { | 89 | { |
121 | #ifdef CONFIG_PCI | 90 | #ifdef CONFIG_PCI |
122 | if (dev->bus == &pci_bus_type) | 91 | if (dev->bus == &pci_bus_type) |
@@ -124,10 +93,9 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
124 | #endif | 93 | #endif |
125 | return sbus_map_sg(dev, sg, nents, direction); | 94 | return sbus_map_sg(dev, sg, nents, direction); |
126 | } | 95 | } |
127 | EXPORT_SYMBOL(dma_map_sg); | ||
128 | 96 | ||
129 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | 97 | void dma32_unmap_sg(struct device *dev, struct scatterlist *sg, |
130 | int nents, enum dma_data_direction direction) | 98 | int nents, enum dma_data_direction direction) |
131 | { | 99 | { |
132 | #ifdef CONFIG_PCI | 100 | #ifdef CONFIG_PCI |
133 | if (dev->bus == &pci_bus_type) { | 101 | if (dev->bus == &pci_bus_type) { |
@@ -137,10 +105,10 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | |||
137 | #endif | 105 | #endif |
138 | sbus_unmap_sg(dev, sg, nents, (int)direction); | 106 | sbus_unmap_sg(dev, sg, nents, (int)direction); |
139 | } | 107 | } |
140 | EXPORT_SYMBOL(dma_unmap_sg); | ||
141 | 108 | ||
142 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | 109 | static void dma32_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, |
143 | size_t size, enum dma_data_direction direction) | 110 | size_t size, |
111 | enum dma_data_direction direction) | ||
144 | { | 112 | { |
145 | #ifdef CONFIG_PCI | 113 | #ifdef CONFIG_PCI |
146 | if (dev->bus == &pci_bus_type) { | 114 | if (dev->bus == &pci_bus_type) { |
@@ -151,10 +119,10 @@ void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | |||
151 | #endif | 119 | #endif |
152 | sbus_dma_sync_single_for_cpu(dev, dma_handle, size, (int) direction); | 120 | sbus_dma_sync_single_for_cpu(dev, dma_handle, size, (int) direction); |
153 | } | 121 | } |
154 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | ||
155 | 122 | ||
156 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | 123 | static void dma32_sync_single_for_device(struct device *dev, |
157 | size_t size, enum dma_data_direction direction) | 124 | dma_addr_t dma_handle, size_t size, |
125 | enum dma_data_direction direction) | ||
158 | { | 126 | { |
159 | #ifdef CONFIG_PCI | 127 | #ifdef CONFIG_PCI |
160 | if (dev->bus == &pci_bus_type) { | 128 | if (dev->bus == &pci_bus_type) { |
@@ -165,28 +133,9 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | |||
165 | #endif | 133 | #endif |
166 | sbus_dma_sync_single_for_device(dev, dma_handle, size, (int) direction); | 134 | sbus_dma_sync_single_for_device(dev, dma_handle, size, (int) direction); |
167 | } | 135 | } |
168 | EXPORT_SYMBOL(dma_sync_single_for_device); | ||
169 | |||
170 | void dma_sync_single_range_for_cpu(struct device *dev, | ||
171 | dma_addr_t dma_handle, | ||
172 | unsigned long offset, | ||
173 | size_t size, | ||
174 | enum dma_data_direction direction) | ||
175 | { | ||
176 | dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); | ||
177 | } | ||
178 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | ||
179 | |||
180 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
181 | unsigned long offset, size_t size, | ||
182 | enum dma_data_direction direction) | ||
183 | { | ||
184 | dma_sync_single_for_device(dev, dma_handle+offset, size, direction); | ||
185 | } | ||
186 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | ||
187 | 136 | ||
188 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | 137 | static void dma32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
189 | int nelems, enum dma_data_direction direction) | 138 | int nelems, enum dma_data_direction direction) |
190 | { | 139 | { |
191 | #ifdef CONFIG_PCI | 140 | #ifdef CONFIG_PCI |
192 | if (dev->bus == &pci_bus_type) { | 141 | if (dev->bus == &pci_bus_type) { |
@@ -197,11 +146,10 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |||
197 | #endif | 146 | #endif |
198 | BUG(); | 147 | BUG(); |
199 | } | 148 | } |
200 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | ||
201 | 149 | ||
202 | void dma_sync_sg_for_device(struct device *dev, | 150 | static void dma32_sync_sg_for_device(struct device *dev, |
203 | struct scatterlist *sg, int nelems, | 151 | struct scatterlist *sg, int nelems, |
204 | enum dma_data_direction direction) | 152 | enum dma_data_direction direction) |
205 | { | 153 | { |
206 | #ifdef CONFIG_PCI | 154 | #ifdef CONFIG_PCI |
207 | if (dev->bus == &pci_bus_type) { | 155 | if (dev->bus == &pci_bus_type) { |
@@ -212,16 +160,19 @@ void dma_sync_sg_for_device(struct device *dev, | |||
212 | #endif | 160 | #endif |
213 | BUG(); | 161 | BUG(); |
214 | } | 162 | } |
215 | EXPORT_SYMBOL(dma_sync_sg_for_device); | ||
216 | 163 | ||
217 | int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 164 | static const struct dma_ops dma32_dma_ops = { |
218 | { | 165 | .alloc_coherent = dma32_alloc_coherent, |
219 | return (dma_addr == DMA_ERROR_CODE); | 166 | .free_coherent = dma32_free_coherent, |
220 | } | 167 | .map_page = dma32_map_page, |
221 | EXPORT_SYMBOL(dma_mapping_error); | 168 | .unmap_page = dma32_unmap_page, |
222 | 169 | .map_sg = dma32_map_sg, | |
223 | int dma_get_cache_alignment(void) | 170 | .unmap_sg = dma32_unmap_sg, |
224 | { | 171 | .sync_single_for_cpu = dma32_sync_single_for_cpu, |
225 | return 32; | 172 | .sync_single_for_device = dma32_sync_single_for_device, |
226 | } | 173 | .sync_sg_for_cpu = dma32_sync_sg_for_cpu, |
227 | EXPORT_SYMBOL(dma_get_cache_alignment); | 174 | .sync_sg_for_device = dma32_sync_sg_for_device, |
175 | }; | ||
176 | |||
177 | const struct dma_ops *dma_ops = &dma32_dma_ops; | ||
178 | EXPORT_SYMBOL(dma_ops); | ||
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c index 90350f838f05..4a700f4b79ce 100644 --- a/arch/sparc/kernel/ds.c +++ b/arch/sparc/kernel/ds.c | |||
@@ -544,7 +544,8 @@ static int __cpuinit dr_cpu_configure(struct ds_info *dp, | |||
544 | resp_len, ncpus, mask, | 544 | resp_len, ncpus, mask, |
545 | DR_CPU_STAT_CONFIGURED); | 545 | DR_CPU_STAT_CONFIGURED); |
546 | 546 | ||
547 | mdesc_fill_in_cpu_data(*mask); | 547 | mdesc_populate_present_mask(mask); |
548 | mdesc_fill_in_cpu_data(mask); | ||
548 | 549 | ||
549 | for_each_cpu_mask(cpu, *mask) { | 550 | for_each_cpu_mask(cpu, *mask) { |
550 | int err; | 551 | int err; |
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c index d0218e73f982..d3b1a3076569 100644 --- a/arch/sparc/kernel/ftrace.c +++ b/arch/sparc/kernel/ftrace.c | |||
@@ -7,14 +7,10 @@ | |||
7 | 7 | ||
8 | #include <asm/ftrace.h> | 8 | #include <asm/ftrace.h> |
9 | 9 | ||
10 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
10 | static const u32 ftrace_nop = 0x01000000; | 11 | static const u32 ftrace_nop = 0x01000000; |
11 | 12 | ||
12 | unsigned char *ftrace_nop_replace(void) | 13 | static u32 ftrace_call_replace(unsigned long ip, unsigned long addr) |
13 | { | ||
14 | return (char *)&ftrace_nop; | ||
15 | } | ||
16 | |||
17 | unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | ||
18 | { | 14 | { |
19 | static u32 call; | 15 | static u32 call; |
20 | s32 off; | 16 | s32 off; |
@@ -22,15 +18,11 @@ unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | |||
22 | off = ((s32)addr - (s32)ip); | 18 | off = ((s32)addr - (s32)ip); |
23 | call = 0x40000000 | ((u32)off >> 2); | 19 | call = 0x40000000 | ((u32)off >> 2); |
24 | 20 | ||
25 | return (unsigned char *) &call; | 21 | return call; |
26 | } | 22 | } |
27 | 23 | ||
28 | int | 24 | static int ftrace_modify_code(unsigned long ip, u32 old, u32 new) |
29 | ftrace_modify_code(unsigned long ip, unsigned char *old_code, | ||
30 | unsigned char *new_code) | ||
31 | { | 25 | { |
32 | u32 old = *(u32 *)old_code; | ||
33 | u32 new = *(u32 *)new_code; | ||
34 | u32 replaced; | 26 | u32 replaced; |
35 | int faulted; | 27 | int faulted; |
36 | 28 | ||
@@ -59,18 +51,43 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, | |||
59 | return faulted; | 51 | return faulted; |
60 | } | 52 | } |
61 | 53 | ||
54 | int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) | ||
55 | { | ||
56 | unsigned long ip = rec->ip; | ||
57 | u32 old, new; | ||
58 | |||
59 | old = ftrace_call_replace(ip, addr); | ||
60 | new = ftrace_nop; | ||
61 | return ftrace_modify_code(ip, old, new); | ||
62 | } | ||
63 | |||
64 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | ||
65 | { | ||
66 | unsigned long ip = rec->ip; | ||
67 | u32 old, new; | ||
68 | |||
69 | old = ftrace_nop; | ||
70 | new = ftrace_call_replace(ip, addr); | ||
71 | return ftrace_modify_code(ip, old, new); | ||
72 | } | ||
73 | |||
62 | int ftrace_update_ftrace_func(ftrace_func_t func) | 74 | int ftrace_update_ftrace_func(ftrace_func_t func) |
63 | { | 75 | { |
64 | unsigned long ip = (unsigned long)(&ftrace_call); | 76 | unsigned long ip = (unsigned long)(&ftrace_call); |
65 | unsigned char old[MCOUNT_INSN_SIZE], *new; | 77 | u32 old, new; |
66 | 78 | ||
67 | memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); | 79 | old = *(u32 *) &ftrace_call; |
68 | new = ftrace_call_replace(ip, (unsigned long)func); | 80 | new = ftrace_call_replace(ip, (unsigned long)func); |
69 | return ftrace_modify_code(ip, old, new); | 81 | return ftrace_modify_code(ip, old, new); |
70 | } | 82 | } |
71 | 83 | ||
72 | int __init ftrace_dyn_arch_init(void *data) | 84 | int __init ftrace_dyn_arch_init(void *data) |
73 | { | 85 | { |
74 | ftrace_mcount_set(data); | 86 | unsigned long *p = data; |
87 | |||
88 | *p = 0; | ||
89 | |||
75 | return 0; | 90 | return 0; |
76 | } | 91 | } |
92 | #endif | ||
93 | |||
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index 91bf4c7f79b9..f8f21050448b 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S | |||
@@ -641,28 +641,6 @@ tlb_fixup_done: | |||
641 | /* Not reached... */ | 641 | /* Not reached... */ |
642 | 642 | ||
643 | 1: | 643 | 1: |
644 | /* If we boot on a non-zero cpu, all of the per-cpu | ||
645 | * variable references we make before setting up the | ||
646 | * per-cpu areas will use a bogus offset. Put a | ||
647 | * compensating factor into __per_cpu_base to handle | ||
648 | * this cleanly. | ||
649 | * | ||
650 | * What the per-cpu code calculates is: | ||
651 | * | ||
652 | * __per_cpu_base + (cpu << __per_cpu_shift) | ||
653 | * | ||
654 | * These two variables are zero initially, so to | ||
655 | * make it all cancel out to zero we need to put | ||
656 | * "0 - (cpu << 0)" into __per_cpu_base so that the | ||
657 | * above formula evaluates to zero. | ||
658 | * | ||
659 | * We cannot even perform a printk() until this stuff | ||
660 | * is setup as that calls cpu_clock() which uses | ||
661 | * per-cpu variables. | ||
662 | */ | ||
663 | sub %g0, %o0, %o1 | ||
664 | sethi %hi(__per_cpu_base), %o2 | ||
665 | stx %o1, [%o2 + %lo(__per_cpu_base)] | ||
666 | #else | 644 | #else |
667 | mov 0, %o0 | 645 | mov 0, %o0 |
668 | #endif | 646 | #endif |
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index d8900e1d5aad..0aeaefe696b9 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c | |||
@@ -351,8 +351,9 @@ static void dma_4u_free_coherent(struct device *dev, size_t size, | |||
351 | free_pages((unsigned long)cpu, order); | 351 | free_pages((unsigned long)cpu, order); |
352 | } | 352 | } |
353 | 353 | ||
354 | static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz, | 354 | static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, |
355 | enum dma_data_direction direction) | 355 | unsigned long offset, size_t sz, |
356 | enum dma_data_direction direction) | ||
356 | { | 357 | { |
357 | struct iommu *iommu; | 358 | struct iommu *iommu; |
358 | struct strbuf *strbuf; | 359 | struct strbuf *strbuf; |
@@ -368,7 +369,7 @@ static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz, | |||
368 | if (unlikely(direction == DMA_NONE)) | 369 | if (unlikely(direction == DMA_NONE)) |
369 | goto bad_no_ctx; | 370 | goto bad_no_ctx; |
370 | 371 | ||
371 | oaddr = (unsigned long)ptr; | 372 | oaddr = (unsigned long)(page_address(page) + offset); |
372 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); | 373 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); |
373 | npages >>= IO_PAGE_SHIFT; | 374 | npages >>= IO_PAGE_SHIFT; |
374 | 375 | ||
@@ -472,8 +473,8 @@ do_flush_sync: | |||
472 | vaddr, ctx, npages); | 473 | vaddr, ctx, npages); |
473 | } | 474 | } |
474 | 475 | ||
475 | static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr, | 476 | static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, |
476 | size_t sz, enum dma_data_direction direction) | 477 | size_t sz, enum dma_data_direction direction) |
477 | { | 478 | { |
478 | struct iommu *iommu; | 479 | struct iommu *iommu; |
479 | struct strbuf *strbuf; | 480 | struct strbuf *strbuf; |
@@ -824,8 +825,8 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, | |||
824 | static const struct dma_ops sun4u_dma_ops = { | 825 | static const struct dma_ops sun4u_dma_ops = { |
825 | .alloc_coherent = dma_4u_alloc_coherent, | 826 | .alloc_coherent = dma_4u_alloc_coherent, |
826 | .free_coherent = dma_4u_free_coherent, | 827 | .free_coherent = dma_4u_free_coherent, |
827 | .map_single = dma_4u_map_single, | 828 | .map_page = dma_4u_map_page, |
828 | .unmap_single = dma_4u_unmap_single, | 829 | .unmap_page = dma_4u_unmap_page, |
829 | .map_sg = dma_4u_map_sg, | 830 | .map_sg = dma_4u_map_sg, |
830 | .unmap_sg = dma_4u_unmap_sg, | 831 | .unmap_sg = dma_4u_unmap_sg, |
831 | .sync_single_for_cpu = dma_4u_sync_single_for_cpu, | 832 | .sync_single_for_cpu = dma_4u_sync_single_for_cpu, |
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index e5e78f9cfc95..bd075054942b 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <asm/cacheflush.h> | 45 | #include <asm/cacheflush.h> |
46 | 46 | ||
47 | #include "entry.h" | 47 | #include "entry.h" |
48 | #include "cpumap.h" | ||
48 | 49 | ||
49 | #define NUM_IVECS (IMAP_INR + 1) | 50 | #define NUM_IVECS (IMAP_INR + 1) |
50 | 51 | ||
@@ -256,35 +257,13 @@ static int irq_choose_cpu(unsigned int virt_irq) | |||
256 | int cpuid; | 257 | int cpuid; |
257 | 258 | ||
258 | cpumask_copy(&mask, irq_desc[virt_irq].affinity); | 259 | cpumask_copy(&mask, irq_desc[virt_irq].affinity); |
259 | if (cpus_equal(mask, CPU_MASK_ALL)) { | 260 | if (cpus_equal(mask, cpu_online_map)) { |
260 | static int irq_rover; | 261 | cpuid = map_to_cpu(virt_irq); |
261 | static DEFINE_SPINLOCK(irq_rover_lock); | ||
262 | unsigned long flags; | ||
263 | |||
264 | /* Round-robin distribution... */ | ||
265 | do_round_robin: | ||
266 | spin_lock_irqsave(&irq_rover_lock, flags); | ||
267 | |||
268 | while (!cpu_online(irq_rover)) { | ||
269 | if (++irq_rover >= nr_cpu_ids) | ||
270 | irq_rover = 0; | ||
271 | } | ||
272 | cpuid = irq_rover; | ||
273 | do { | ||
274 | if (++irq_rover >= nr_cpu_ids) | ||
275 | irq_rover = 0; | ||
276 | } while (!cpu_online(irq_rover)); | ||
277 | |||
278 | spin_unlock_irqrestore(&irq_rover_lock, flags); | ||
279 | } else { | 262 | } else { |
280 | cpumask_t tmp; | 263 | cpumask_t tmp; |
281 | 264 | ||
282 | cpus_and(tmp, cpu_online_map, mask); | 265 | cpus_and(tmp, cpu_online_map, mask); |
283 | 266 | cpuid = cpus_empty(tmp) ? map_to_cpu(virt_irq) : first_cpu(tmp); | |
284 | if (cpus_empty(tmp)) | ||
285 | goto do_round_robin; | ||
286 | |||
287 | cpuid = first_cpu(tmp); | ||
288 | } | 267 | } |
289 | 268 | ||
290 | return cpuid; | 269 | return cpuid; |
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index f0e6ed23a468..938da19dc065 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c | |||
@@ -574,7 +574,7 @@ static void __init report_platform_properties(void) | |||
574 | mdesc_release(hp); | 574 | mdesc_release(hp); |
575 | } | 575 | } |
576 | 576 | ||
577 | static void __devinit fill_in_one_cache(cpuinfo_sparc *c, | 577 | static void __cpuinit fill_in_one_cache(cpuinfo_sparc *c, |
578 | struct mdesc_handle *hp, | 578 | struct mdesc_handle *hp, |
579 | u64 mp) | 579 | u64 mp) |
580 | { | 580 | { |
@@ -619,8 +619,7 @@ static void __devinit fill_in_one_cache(cpuinfo_sparc *c, | |||
619 | } | 619 | } |
620 | } | 620 | } |
621 | 621 | ||
622 | static void __devinit mark_core_ids(struct mdesc_handle *hp, u64 mp, | 622 | static void __cpuinit mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id) |
623 | int core_id) | ||
624 | { | 623 | { |
625 | u64 a; | 624 | u64 a; |
626 | 625 | ||
@@ -653,7 +652,7 @@ static void __devinit mark_core_ids(struct mdesc_handle *hp, u64 mp, | |||
653 | } | 652 | } |
654 | } | 653 | } |
655 | 654 | ||
656 | static void __devinit set_core_ids(struct mdesc_handle *hp) | 655 | static void __cpuinit set_core_ids(struct mdesc_handle *hp) |
657 | { | 656 | { |
658 | int idx; | 657 | int idx; |
659 | u64 mp; | 658 | u64 mp; |
@@ -678,8 +677,7 @@ static void __devinit set_core_ids(struct mdesc_handle *hp) | |||
678 | } | 677 | } |
679 | } | 678 | } |
680 | 679 | ||
681 | static void __devinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, | 680 | static void __cpuinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) |
682 | int proc_id) | ||
683 | { | 681 | { |
684 | u64 a; | 682 | u64 a; |
685 | 683 | ||
@@ -698,8 +696,7 @@ static void __devinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, | |||
698 | } | 696 | } |
699 | } | 697 | } |
700 | 698 | ||
701 | static void __devinit __set_proc_ids(struct mdesc_handle *hp, | 699 | static void __cpuinit __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name) |
702 | const char *exec_unit_name) | ||
703 | { | 700 | { |
704 | int idx; | 701 | int idx; |
705 | u64 mp; | 702 | u64 mp; |
@@ -720,13 +717,13 @@ static void __devinit __set_proc_ids(struct mdesc_handle *hp, | |||
720 | } | 717 | } |
721 | } | 718 | } |
722 | 719 | ||
723 | static void __devinit set_proc_ids(struct mdesc_handle *hp) | 720 | static void __cpuinit set_proc_ids(struct mdesc_handle *hp) |
724 | { | 721 | { |
725 | __set_proc_ids(hp, "exec_unit"); | 722 | __set_proc_ids(hp, "exec_unit"); |
726 | __set_proc_ids(hp, "exec-unit"); | 723 | __set_proc_ids(hp, "exec-unit"); |
727 | } | 724 | } |
728 | 725 | ||
729 | static void __devinit get_one_mondo_bits(const u64 *p, unsigned int *mask, | 726 | static void __cpuinit get_one_mondo_bits(const u64 *p, unsigned int *mask, |
730 | unsigned char def) | 727 | unsigned char def) |
731 | { | 728 | { |
732 | u64 val; | 729 | u64 val; |
@@ -745,7 +742,7 @@ use_default: | |||
745 | *mask = ((1U << def) * 64U) - 1U; | 742 | *mask = ((1U << def) * 64U) - 1U; |
746 | } | 743 | } |
747 | 744 | ||
748 | static void __devinit get_mondo_data(struct mdesc_handle *hp, u64 mp, | 745 | static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp, |
749 | struct trap_per_cpu *tb) | 746 | struct trap_per_cpu *tb) |
750 | { | 747 | { |
751 | const u64 *val; | 748 | const u64 *val; |
@@ -763,23 +760,15 @@ static void __devinit get_mondo_data(struct mdesc_handle *hp, u64 mp, | |||
763 | get_one_mondo_bits(val, &tb->nonresum_qmask, 2); | 760 | get_one_mondo_bits(val, &tb->nonresum_qmask, 2); |
764 | } | 761 | } |
765 | 762 | ||
766 | void __cpuinit mdesc_fill_in_cpu_data(cpumask_t mask) | 763 | static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask) |
767 | { | 764 | { |
768 | struct mdesc_handle *hp = mdesc_grab(); | 765 | struct mdesc_handle *hp = mdesc_grab(); |
766 | void *ret = NULL; | ||
769 | u64 mp; | 767 | u64 mp; |
770 | 768 | ||
771 | ncpus_probed = 0; | ||
772 | mdesc_for_each_node_by_name(hp, mp, "cpu") { | 769 | mdesc_for_each_node_by_name(hp, mp, "cpu") { |
773 | const u64 *id = mdesc_get_property(hp, mp, "id", NULL); | 770 | const u64 *id = mdesc_get_property(hp, mp, "id", NULL); |
774 | const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL); | 771 | int cpuid = *id; |
775 | struct trap_per_cpu *tb; | ||
776 | cpuinfo_sparc *c; | ||
777 | int cpuid; | ||
778 | u64 a; | ||
779 | |||
780 | ncpus_probed++; | ||
781 | |||
782 | cpuid = *id; | ||
783 | 772 | ||
784 | #ifdef CONFIG_SMP | 773 | #ifdef CONFIG_SMP |
785 | if (cpuid >= NR_CPUS) { | 774 | if (cpuid >= NR_CPUS) { |
@@ -788,62 +777,104 @@ void __cpuinit mdesc_fill_in_cpu_data(cpumask_t mask) | |||
788 | cpuid, NR_CPUS); | 777 | cpuid, NR_CPUS); |
789 | continue; | 778 | continue; |
790 | } | 779 | } |
791 | if (!cpu_isset(cpuid, mask)) | 780 | if (!cpu_isset(cpuid, *mask)) |
792 | continue; | 781 | continue; |
793 | #else | ||
794 | /* On uniprocessor we only want the values for the | ||
795 | * real physical cpu the kernel booted onto, however | ||
796 | * cpu_data() only has one entry at index 0. | ||
797 | */ | ||
798 | if (cpuid != real_hard_smp_processor_id()) | ||
799 | continue; | ||
800 | cpuid = 0; | ||
801 | #endif | 782 | #endif |
802 | 783 | ||
803 | c = &cpu_data(cpuid); | 784 | ret = func(hp, mp, cpuid, arg); |
804 | c->clock_tick = *cfreq; | 785 | if (ret) |
786 | goto out; | ||
787 | } | ||
788 | out: | ||
789 | mdesc_release(hp); | ||
790 | return ret; | ||
791 | } | ||
805 | 792 | ||
806 | tb = &trap_block[cpuid]; | 793 | static void * __cpuinit record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg) |
807 | get_mondo_data(hp, mp, tb); | 794 | { |
795 | ncpus_probed++; | ||
796 | #ifdef CONFIG_SMP | ||
797 | set_cpu_present(cpuid, true); | ||
798 | #endif | ||
799 | return NULL; | ||
800 | } | ||
808 | 801 | ||
809 | mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { | 802 | void __cpuinit mdesc_populate_present_mask(cpumask_t *mask) |
810 | u64 j, t = mdesc_arc_target(hp, a); | 803 | { |
811 | const char *t_name; | 804 | if (tlb_type != hypervisor) |
805 | return; | ||
812 | 806 | ||
813 | t_name = mdesc_node_name(hp, t); | 807 | ncpus_probed = 0; |
814 | if (!strcmp(t_name, "cache")) { | 808 | mdesc_iterate_over_cpus(record_one_cpu, NULL, mask); |
815 | fill_in_one_cache(c, hp, t); | 809 | } |
816 | continue; | ||
817 | } | ||
818 | 810 | ||
819 | mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) { | 811 | static void * __cpuinit fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg) |
820 | u64 n = mdesc_arc_target(hp, j); | 812 | { |
821 | const char *n_name; | 813 | const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL); |
814 | struct trap_per_cpu *tb; | ||
815 | cpuinfo_sparc *c; | ||
816 | u64 a; | ||
822 | 817 | ||
823 | n_name = mdesc_node_name(hp, n); | 818 | #ifndef CONFIG_SMP |
824 | if (!strcmp(n_name, "cache")) | 819 | /* On uniprocessor we only want the values for the |
825 | fill_in_one_cache(c, hp, n); | 820 | * real physical cpu the kernel booted onto, however |
826 | } | 821 | * cpu_data() only has one entry at index 0. |
822 | */ | ||
823 | if (cpuid != real_hard_smp_processor_id()) | ||
824 | return NULL; | ||
825 | cpuid = 0; | ||
826 | #endif | ||
827 | |||
828 | c = &cpu_data(cpuid); | ||
829 | c->clock_tick = *cfreq; | ||
830 | |||
831 | tb = &trap_block[cpuid]; | ||
832 | get_mondo_data(hp, mp, tb); | ||
833 | |||
834 | mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { | ||
835 | u64 j, t = mdesc_arc_target(hp, a); | ||
836 | const char *t_name; | ||
837 | |||
838 | t_name = mdesc_node_name(hp, t); | ||
839 | if (!strcmp(t_name, "cache")) { | ||
840 | fill_in_one_cache(c, hp, t); | ||
841 | continue; | ||
827 | } | 842 | } |
828 | 843 | ||
829 | #ifdef CONFIG_SMP | 844 | mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) { |
830 | cpu_set(cpuid, cpu_present_map); | 845 | u64 n = mdesc_arc_target(hp, j); |
831 | #endif | 846 | const char *n_name; |
832 | 847 | ||
833 | c->core_id = 0; | 848 | n_name = mdesc_node_name(hp, n); |
834 | c->proc_id = -1; | 849 | if (!strcmp(n_name, "cache")) |
850 | fill_in_one_cache(c, hp, n); | ||
851 | } | ||
835 | } | 852 | } |
836 | 853 | ||
854 | c->core_id = 0; | ||
855 | c->proc_id = -1; | ||
856 | |||
857 | return NULL; | ||
858 | } | ||
859 | |||
860 | void __cpuinit mdesc_fill_in_cpu_data(cpumask_t *mask) | ||
861 | { | ||
862 | struct mdesc_handle *hp; | ||
863 | |||
864 | mdesc_iterate_over_cpus(fill_in_one_cpu, NULL, mask); | ||
865 | |||
837 | #ifdef CONFIG_SMP | 866 | #ifdef CONFIG_SMP |
838 | sparc64_multi_core = 1; | 867 | sparc64_multi_core = 1; |
839 | #endif | 868 | #endif |
840 | 869 | ||
870 | hp = mdesc_grab(); | ||
871 | |||
841 | set_core_ids(hp); | 872 | set_core_ids(hp); |
842 | set_proc_ids(hp); | 873 | set_proc_ids(hp); |
843 | 874 | ||
844 | smp_fill_in_sib_core_maps(); | ||
845 | |||
846 | mdesc_release(hp); | 875 | mdesc_release(hp); |
876 | |||
877 | smp_fill_in_sib_core_maps(); | ||
847 | } | 878 | } |
848 | 879 | ||
849 | static ssize_t mdesc_read(struct file *file, char __user *buf, | 880 | static ssize_t mdesc_read(struct file *file, char __user *buf, |
@@ -887,7 +918,6 @@ void __init sun4v_mdesc_init(void) | |||
887 | { | 918 | { |
888 | struct mdesc_handle *hp; | 919 | struct mdesc_handle *hp; |
889 | unsigned long len, real_len, status; | 920 | unsigned long len, real_len, status; |
890 | cpumask_t mask; | ||
891 | 921 | ||
892 | (void) sun4v_mach_desc(0UL, 0UL, &len); | 922 | (void) sun4v_mach_desc(0UL, 0UL, &len); |
893 | 923 | ||
@@ -911,7 +941,4 @@ void __init sun4v_mdesc_init(void) | |||
911 | cur_mdesc = hp; | 941 | cur_mdesc = hp; |
912 | 942 | ||
913 | report_platform_properties(); | 943 | report_platform_properties(); |
914 | |||
915 | cpus_setall(mask); | ||
916 | mdesc_fill_in_cpu_data(mask); | ||
917 | } | 944 | } |
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c index c8f14c1dc521..90396702ea2c 100644 --- a/arch/sparc/kernel/of_device_32.c +++ b/arch/sparc/kernel/of_device_32.c | |||
@@ -6,159 +6,11 @@ | |||
6 | #include <linux/mod_devicetable.h> | 6 | #include <linux/mod_devicetable.h> |
7 | #include <linux/slab.h> | 7 | #include <linux/slab.h> |
8 | #include <linux/errno.h> | 8 | #include <linux/errno.h> |
9 | #include <linux/irq.h> | ||
9 | #include <linux/of_device.h> | 10 | #include <linux/of_device.h> |
10 | #include <linux/of_platform.h> | 11 | #include <linux/of_platform.h> |
11 | 12 | ||
12 | static int node_match(struct device *dev, void *data) | 13 | #include "of_device_common.h" |
13 | { | ||
14 | struct of_device *op = to_of_device(dev); | ||
15 | struct device_node *dp = data; | ||
16 | |||
17 | return (op->node == dp); | ||
18 | } | ||
19 | |||
20 | struct of_device *of_find_device_by_node(struct device_node *dp) | ||
21 | { | ||
22 | struct device *dev = bus_find_device(&of_platform_bus_type, NULL, | ||
23 | dp, node_match); | ||
24 | |||
25 | if (dev) | ||
26 | return to_of_device(dev); | ||
27 | |||
28 | return NULL; | ||
29 | } | ||
30 | EXPORT_SYMBOL(of_find_device_by_node); | ||
31 | |||
32 | unsigned int irq_of_parse_and_map(struct device_node *node, int index) | ||
33 | { | ||
34 | struct of_device *op = of_find_device_by_node(node); | ||
35 | |||
36 | if (!op || index >= op->num_irqs) | ||
37 | return 0; | ||
38 | |||
39 | return op->irqs[index]; | ||
40 | } | ||
41 | EXPORT_SYMBOL(irq_of_parse_and_map); | ||
42 | |||
43 | /* Take the archdata values for IOMMU, STC, and HOSTDATA found in | ||
44 | * BUS and propagate to all child of_device objects. | ||
45 | */ | ||
46 | void of_propagate_archdata(struct of_device *bus) | ||
47 | { | ||
48 | struct dev_archdata *bus_sd = &bus->dev.archdata; | ||
49 | struct device_node *bus_dp = bus->node; | ||
50 | struct device_node *dp; | ||
51 | |||
52 | for (dp = bus_dp->child; dp; dp = dp->sibling) { | ||
53 | struct of_device *op = of_find_device_by_node(dp); | ||
54 | |||
55 | op->dev.archdata.iommu = bus_sd->iommu; | ||
56 | op->dev.archdata.stc = bus_sd->stc; | ||
57 | op->dev.archdata.host_controller = bus_sd->host_controller; | ||
58 | op->dev.archdata.numa_node = bus_sd->numa_node; | ||
59 | |||
60 | if (dp->child) | ||
61 | of_propagate_archdata(op); | ||
62 | } | ||
63 | } | ||
64 | |||
65 | struct bus_type of_platform_bus_type; | ||
66 | EXPORT_SYMBOL(of_platform_bus_type); | ||
67 | |||
68 | static inline u64 of_read_addr(const u32 *cell, int size) | ||
69 | { | ||
70 | u64 r = 0; | ||
71 | while (size--) | ||
72 | r = (r << 32) | *(cell++); | ||
73 | return r; | ||
74 | } | ||
75 | |||
76 | static void __init get_cells(struct device_node *dp, | ||
77 | int *addrc, int *sizec) | ||
78 | { | ||
79 | if (addrc) | ||
80 | *addrc = of_n_addr_cells(dp); | ||
81 | if (sizec) | ||
82 | *sizec = of_n_size_cells(dp); | ||
83 | } | ||
84 | |||
85 | /* Max address size we deal with */ | ||
86 | #define OF_MAX_ADDR_CELLS 4 | ||
87 | |||
88 | struct of_bus { | ||
89 | const char *name; | ||
90 | const char *addr_prop_name; | ||
91 | int (*match)(struct device_node *parent); | ||
92 | void (*count_cells)(struct device_node *child, | ||
93 | int *addrc, int *sizec); | ||
94 | int (*map)(u32 *addr, const u32 *range, | ||
95 | int na, int ns, int pna); | ||
96 | unsigned long (*get_flags)(const u32 *addr, unsigned long); | ||
97 | }; | ||
98 | |||
99 | /* | ||
100 | * Default translator (generic bus) | ||
101 | */ | ||
102 | |||
103 | static void of_bus_default_count_cells(struct device_node *dev, | ||
104 | int *addrc, int *sizec) | ||
105 | { | ||
106 | get_cells(dev, addrc, sizec); | ||
107 | } | ||
108 | |||
109 | /* Make sure the least significant 64-bits are in-range. Even | ||
110 | * for 3 or 4 cell values it is a good enough approximation. | ||
111 | */ | ||
112 | static int of_out_of_range(const u32 *addr, const u32 *base, | ||
113 | const u32 *size, int na, int ns) | ||
114 | { | ||
115 | u64 a = of_read_addr(addr, na); | ||
116 | u64 b = of_read_addr(base, na); | ||
117 | |||
118 | if (a < b) | ||
119 | return 1; | ||
120 | |||
121 | b += of_read_addr(size, ns); | ||
122 | if (a >= b) | ||
123 | return 1; | ||
124 | |||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | static int of_bus_default_map(u32 *addr, const u32 *range, | ||
129 | int na, int ns, int pna) | ||
130 | { | ||
131 | u32 result[OF_MAX_ADDR_CELLS]; | ||
132 | int i; | ||
133 | |||
134 | if (ns > 2) { | ||
135 | printk("of_device: Cannot handle size cells (%d) > 2.", ns); | ||
136 | return -EINVAL; | ||
137 | } | ||
138 | |||
139 | if (of_out_of_range(addr, range, range + na + pna, na, ns)) | ||
140 | return -EINVAL; | ||
141 | |||
142 | /* Start with the parent range base. */ | ||
143 | memcpy(result, range + na, pna * 4); | ||
144 | |||
145 | /* Add in the child address offset. */ | ||
146 | for (i = 0; i < na; i++) | ||
147 | result[pna - 1 - i] += | ||
148 | (addr[na - 1 - i] - | ||
149 | range[na - 1 - i]); | ||
150 | |||
151 | memcpy(addr, result, pna * 4); | ||
152 | |||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | static unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags) | ||
157 | { | ||
158 | if (flags) | ||
159 | return flags; | ||
160 | return IORESOURCE_MEM; | ||
161 | } | ||
162 | 14 | ||
163 | /* | 15 | /* |
164 | * PCI bus specific translator | 16 | * PCI bus specific translator |
@@ -240,47 +92,6 @@ static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags) | |||
240 | return flags; | 92 | return flags; |
241 | } | 93 | } |
242 | 94 | ||
243 | /* | ||
244 | * SBUS bus specific translator | ||
245 | */ | ||
246 | |||
247 | static int of_bus_sbus_match(struct device_node *np) | ||
248 | { | ||
249 | struct device_node *dp = np; | ||
250 | |||
251 | while (dp) { | ||
252 | if (!strcmp(dp->name, "sbus") || | ||
253 | !strcmp(dp->name, "sbi")) | ||
254 | return 1; | ||
255 | |||
256 | /* Have a look at use_1to1_mapping(). We're trying | ||
257 | * to match SBUS if that's the top-level bus and we | ||
258 | * don't have some intervening real bus that provides | ||
259 | * ranges based translations. | ||
260 | */ | ||
261 | if (of_find_property(dp, "ranges", NULL) != NULL) | ||
262 | break; | ||
263 | |||
264 | dp = dp->parent; | ||
265 | } | ||
266 | |||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | static void of_bus_sbus_count_cells(struct device_node *child, | ||
271 | int *addrc, int *sizec) | ||
272 | { | ||
273 | if (addrc) | ||
274 | *addrc = 2; | ||
275 | if (sizec) | ||
276 | *sizec = 1; | ||
277 | } | ||
278 | |||
279 | static int of_bus_sbus_map(u32 *addr, const u32 *range, int na, int ns, int pna) | ||
280 | { | ||
281 | return of_bus_default_map(addr, range, na, ns, pna); | ||
282 | } | ||
283 | |||
284 | static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags) | 95 | static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags) |
285 | { | 96 | { |
286 | return IORESOURCE_MEM; | 97 | return IORESOURCE_MEM; |
@@ -307,7 +118,7 @@ static struct of_bus of_busses[] = { | |||
307 | .addr_prop_name = "reg", | 118 | .addr_prop_name = "reg", |
308 | .match = of_bus_sbus_match, | 119 | .match = of_bus_sbus_match, |
309 | .count_cells = of_bus_sbus_count_cells, | 120 | .count_cells = of_bus_sbus_count_cells, |
310 | .map = of_bus_sbus_map, | 121 | .map = of_bus_default_map, |
311 | .get_flags = of_bus_sbus_get_flags, | 122 | .get_flags = of_bus_sbus_get_flags, |
312 | }, | 123 | }, |
313 | /* Default */ | 124 | /* Default */ |
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c index 5ac287ac03de..881947e59e95 100644 --- a/arch/sparc/kernel/of_device_64.c +++ b/arch/sparc/kernel/of_device_64.c | |||
@@ -10,6 +10,8 @@ | |||
10 | #include <linux/of_device.h> | 10 | #include <linux/of_device.h> |
11 | #include <linux/of_platform.h> | 11 | #include <linux/of_platform.h> |
12 | 12 | ||
13 | #include "of_device_common.h" | ||
14 | |||
13 | void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name) | 15 | void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name) |
14 | { | 16 | { |
15 | unsigned long ret = res->start + offset; | 17 | unsigned long ret = res->start + offset; |
@@ -35,156 +37,6 @@ void of_iounmap(struct resource *res, void __iomem *base, unsigned long size) | |||
35 | } | 37 | } |
36 | EXPORT_SYMBOL(of_iounmap); | 38 | EXPORT_SYMBOL(of_iounmap); |
37 | 39 | ||
38 | static int node_match(struct device *dev, void *data) | ||
39 | { | ||
40 | struct of_device *op = to_of_device(dev); | ||
41 | struct device_node *dp = data; | ||
42 | |||
43 | return (op->node == dp); | ||
44 | } | ||
45 | |||
46 | struct of_device *of_find_device_by_node(struct device_node *dp) | ||
47 | { | ||
48 | struct device *dev = bus_find_device(&of_platform_bus_type, NULL, | ||
49 | dp, node_match); | ||
50 | |||
51 | if (dev) | ||
52 | return to_of_device(dev); | ||
53 | |||
54 | return NULL; | ||
55 | } | ||
56 | EXPORT_SYMBOL(of_find_device_by_node); | ||
57 | |||
58 | unsigned int irq_of_parse_and_map(struct device_node *node, int index) | ||
59 | { | ||
60 | struct of_device *op = of_find_device_by_node(node); | ||
61 | |||
62 | if (!op || index >= op->num_irqs) | ||
63 | return 0; | ||
64 | |||
65 | return op->irqs[index]; | ||
66 | } | ||
67 | EXPORT_SYMBOL(irq_of_parse_and_map); | ||
68 | |||
69 | /* Take the archdata values for IOMMU, STC, and HOSTDATA found in | ||
70 | * BUS and propagate to all child of_device objects. | ||
71 | */ | ||
72 | void of_propagate_archdata(struct of_device *bus) | ||
73 | { | ||
74 | struct dev_archdata *bus_sd = &bus->dev.archdata; | ||
75 | struct device_node *bus_dp = bus->node; | ||
76 | struct device_node *dp; | ||
77 | |||
78 | for (dp = bus_dp->child; dp; dp = dp->sibling) { | ||
79 | struct of_device *op = of_find_device_by_node(dp); | ||
80 | |||
81 | op->dev.archdata.iommu = bus_sd->iommu; | ||
82 | op->dev.archdata.stc = bus_sd->stc; | ||
83 | op->dev.archdata.host_controller = bus_sd->host_controller; | ||
84 | op->dev.archdata.numa_node = bus_sd->numa_node; | ||
85 | |||
86 | if (dp->child) | ||
87 | of_propagate_archdata(op); | ||
88 | } | ||
89 | } | ||
90 | |||
91 | struct bus_type of_platform_bus_type; | ||
92 | EXPORT_SYMBOL(of_platform_bus_type); | ||
93 | |||
94 | static inline u64 of_read_addr(const u32 *cell, int size) | ||
95 | { | ||
96 | u64 r = 0; | ||
97 | while (size--) | ||
98 | r = (r << 32) | *(cell++); | ||
99 | return r; | ||
100 | } | ||
101 | |||
102 | static void get_cells(struct device_node *dp, int *addrc, int *sizec) | ||
103 | { | ||
104 | if (addrc) | ||
105 | *addrc = of_n_addr_cells(dp); | ||
106 | if (sizec) | ||
107 | *sizec = of_n_size_cells(dp); | ||
108 | } | ||
109 | |||
110 | /* Max address size we deal with */ | ||
111 | #define OF_MAX_ADDR_CELLS 4 | ||
112 | |||
113 | struct of_bus { | ||
114 | const char *name; | ||
115 | const char *addr_prop_name; | ||
116 | int (*match)(struct device_node *parent); | ||
117 | void (*count_cells)(struct device_node *child, | ||
118 | int *addrc, int *sizec); | ||
119 | int (*map)(u32 *addr, const u32 *range, | ||
120 | int na, int ns, int pna); | ||
121 | unsigned long (*get_flags)(const u32 *addr, unsigned long); | ||
122 | }; | ||
123 | |||
124 | /* | ||
125 | * Default translator (generic bus) | ||
126 | */ | ||
127 | |||
128 | static void of_bus_default_count_cells(struct device_node *dev, | ||
129 | int *addrc, int *sizec) | ||
130 | { | ||
131 | get_cells(dev, addrc, sizec); | ||
132 | } | ||
133 | |||
134 | /* Make sure the least significant 64-bits are in-range. Even | ||
135 | * for 3 or 4 cell values it is a good enough approximation. | ||
136 | */ | ||
137 | static int of_out_of_range(const u32 *addr, const u32 *base, | ||
138 | const u32 *size, int na, int ns) | ||
139 | { | ||
140 | u64 a = of_read_addr(addr, na); | ||
141 | u64 b = of_read_addr(base, na); | ||
142 | |||
143 | if (a < b) | ||
144 | return 1; | ||
145 | |||
146 | b += of_read_addr(size, ns); | ||
147 | if (a >= b) | ||
148 | return 1; | ||
149 | |||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | static int of_bus_default_map(u32 *addr, const u32 *range, | ||
154 | int na, int ns, int pna) | ||
155 | { | ||
156 | u32 result[OF_MAX_ADDR_CELLS]; | ||
157 | int i; | ||
158 | |||
159 | if (ns > 2) { | ||
160 | printk("of_device: Cannot handle size cells (%d) > 2.", ns); | ||
161 | return -EINVAL; | ||
162 | } | ||
163 | |||
164 | if (of_out_of_range(addr, range, range + na + pna, na, ns)) | ||
165 | return -EINVAL; | ||
166 | |||
167 | /* Start with the parent range base. */ | ||
168 | memcpy(result, range + na, pna * 4); | ||
169 | |||
170 | /* Add in the child address offset. */ | ||
171 | for (i = 0; i < na; i++) | ||
172 | result[pna - 1 - i] += | ||
173 | (addr[na - 1 - i] - | ||
174 | range[na - 1 - i]); | ||
175 | |||
176 | memcpy(addr, result, pna * 4); | ||
177 | |||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | static unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags) | ||
182 | { | ||
183 | if (flags) | ||
184 | return flags; | ||
185 | return IORESOURCE_MEM; | ||
186 | } | ||
187 | |||
188 | /* | 40 | /* |
189 | * PCI bus specific translator | 41 | * PCI bus specific translator |
190 | */ | 42 | */ |
@@ -295,42 +147,6 @@ static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags) | |||
295 | } | 147 | } |
296 | 148 | ||
297 | /* | 149 | /* |
298 | * SBUS bus specific translator | ||
299 | */ | ||
300 | |||
301 | static int of_bus_sbus_match(struct device_node *np) | ||
302 | { | ||
303 | struct device_node *dp = np; | ||
304 | |||
305 | while (dp) { | ||
306 | if (!strcmp(dp->name, "sbus") || | ||
307 | !strcmp(dp->name, "sbi")) | ||
308 | return 1; | ||
309 | |||
310 | /* Have a look at use_1to1_mapping(). We're trying | ||
311 | * to match SBUS if that's the top-level bus and we | ||
312 | * don't have some intervening real bus that provides | ||
313 | * ranges based translations. | ||
314 | */ | ||
315 | if (of_find_property(dp, "ranges", NULL) != NULL) | ||
316 | break; | ||
317 | |||
318 | dp = dp->parent; | ||
319 | } | ||
320 | |||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | static void of_bus_sbus_count_cells(struct device_node *child, | ||
325 | int *addrc, int *sizec) | ||
326 | { | ||
327 | if (addrc) | ||
328 | *addrc = 2; | ||
329 | if (sizec) | ||
330 | *sizec = 1; | ||
331 | } | ||
332 | |||
333 | /* | ||
334 | * FHC/Central bus specific translator. | 150 | * FHC/Central bus specific translator. |
335 | * | 151 | * |
336 | * This is just needed to hard-code the address and size cell | 152 | * This is just needed to hard-code the address and size cell |
diff --git a/arch/sparc/kernel/of_device_common.c b/arch/sparc/kernel/of_device_common.c new file mode 100644 index 000000000000..cb8eb799bb6c --- /dev/null +++ b/arch/sparc/kernel/of_device_common.c | |||
@@ -0,0 +1,174 @@ | |||
1 | #include <linux/string.h> | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/of.h> | ||
4 | #include <linux/init.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/mod_devicetable.h> | ||
7 | #include <linux/slab.h> | ||
8 | #include <linux/errno.h> | ||
9 | #include <linux/irq.h> | ||
10 | #include <linux/of_device.h> | ||
11 | #include <linux/of_platform.h> | ||
12 | |||
13 | #include "of_device_common.h" | ||
14 | |||
15 | static int node_match(struct device *dev, void *data) | ||
16 | { | ||
17 | struct of_device *op = to_of_device(dev); | ||
18 | struct device_node *dp = data; | ||
19 | |||
20 | return (op->node == dp); | ||
21 | } | ||
22 | |||
23 | struct of_device *of_find_device_by_node(struct device_node *dp) | ||
24 | { | ||
25 | struct device *dev = bus_find_device(&of_platform_bus_type, NULL, | ||
26 | dp, node_match); | ||
27 | |||
28 | if (dev) | ||
29 | return to_of_device(dev); | ||
30 | |||
31 | return NULL; | ||
32 | } | ||
33 | EXPORT_SYMBOL(of_find_device_by_node); | ||
34 | |||
35 | unsigned int irq_of_parse_and_map(struct device_node *node, int index) | ||
36 | { | ||
37 | struct of_device *op = of_find_device_by_node(node); | ||
38 | |||
39 | if (!op || index >= op->num_irqs) | ||
40 | return 0; | ||
41 | |||
42 | return op->irqs[index]; | ||
43 | } | ||
44 | EXPORT_SYMBOL(irq_of_parse_and_map); | ||
45 | |||
46 | /* Take the archdata values for IOMMU, STC, and HOSTDATA found in | ||
47 | * BUS and propagate to all child of_device objects. | ||
48 | */ | ||
49 | void of_propagate_archdata(struct of_device *bus) | ||
50 | { | ||
51 | struct dev_archdata *bus_sd = &bus->dev.archdata; | ||
52 | struct device_node *bus_dp = bus->node; | ||
53 | struct device_node *dp; | ||
54 | |||
55 | for (dp = bus_dp->child; dp; dp = dp->sibling) { | ||
56 | struct of_device *op = of_find_device_by_node(dp); | ||
57 | |||
58 | op->dev.archdata.iommu = bus_sd->iommu; | ||
59 | op->dev.archdata.stc = bus_sd->stc; | ||
60 | op->dev.archdata.host_controller = bus_sd->host_controller; | ||
61 | op->dev.archdata.numa_node = bus_sd->numa_node; | ||
62 | |||
63 | if (dp->child) | ||
64 | of_propagate_archdata(op); | ||
65 | } | ||
66 | } | ||
67 | |||
68 | struct bus_type of_platform_bus_type; | ||
69 | EXPORT_SYMBOL(of_platform_bus_type); | ||
70 | |||
71 | static void get_cells(struct device_node *dp, int *addrc, int *sizec) | ||
72 | { | ||
73 | if (addrc) | ||
74 | *addrc = of_n_addr_cells(dp); | ||
75 | if (sizec) | ||
76 | *sizec = of_n_size_cells(dp); | ||
77 | } | ||
78 | |||
79 | /* | ||
80 | * Default translator (generic bus) | ||
81 | */ | ||
82 | |||
83 | void of_bus_default_count_cells(struct device_node *dev, int *addrc, int *sizec) | ||
84 | { | ||
85 | get_cells(dev, addrc, sizec); | ||
86 | } | ||
87 | |||
88 | /* Make sure the least significant 64-bits are in-range. Even | ||
89 | * for 3 or 4 cell values it is a good enough approximation. | ||
90 | */ | ||
91 | int of_out_of_range(const u32 *addr, const u32 *base, | ||
92 | const u32 *size, int na, int ns) | ||
93 | { | ||
94 | u64 a = of_read_addr(addr, na); | ||
95 | u64 b = of_read_addr(base, na); | ||
96 | |||
97 | if (a < b) | ||
98 | return 1; | ||
99 | |||
100 | b += of_read_addr(size, ns); | ||
101 | if (a >= b) | ||
102 | return 1; | ||
103 | |||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | int of_bus_default_map(u32 *addr, const u32 *range, int na, int ns, int pna) | ||
108 | { | ||
109 | u32 result[OF_MAX_ADDR_CELLS]; | ||
110 | int i; | ||
111 | |||
112 | if (ns > 2) { | ||
113 | printk("of_device: Cannot handle size cells (%d) > 2.", ns); | ||
114 | return -EINVAL; | ||
115 | } | ||
116 | |||
117 | if (of_out_of_range(addr, range, range + na + pna, na, ns)) | ||
118 | return -EINVAL; | ||
119 | |||
120 | /* Start with the parent range base. */ | ||
121 | memcpy(result, range + na, pna * 4); | ||
122 | |||
123 | /* Add in the child address offset. */ | ||
124 | for (i = 0; i < na; i++) | ||
125 | result[pna - 1 - i] += | ||
126 | (addr[na - 1 - i] - | ||
127 | range[na - 1 - i]); | ||
128 | |||
129 | memcpy(addr, result, pna * 4); | ||
130 | |||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags) | ||
135 | { | ||
136 | if (flags) | ||
137 | return flags; | ||
138 | return IORESOURCE_MEM; | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * SBUS bus specific translator | ||
143 | */ | ||
144 | |||
145 | int of_bus_sbus_match(struct device_node *np) | ||
146 | { | ||
147 | struct device_node *dp = np; | ||
148 | |||
149 | while (dp) { | ||
150 | if (!strcmp(dp->name, "sbus") || | ||
151 | !strcmp(dp->name, "sbi")) | ||
152 | return 1; | ||
153 | |||
154 | /* Have a look at use_1to1_mapping(). We're trying | ||
155 | * to match SBUS if that's the top-level bus and we | ||
156 | * don't have some intervening real bus that provides | ||
157 | * ranges based translations. | ||
158 | */ | ||
159 | if (of_find_property(dp, "ranges", NULL) != NULL) | ||
160 | break; | ||
161 | |||
162 | dp = dp->parent; | ||
163 | } | ||
164 | |||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | void of_bus_sbus_count_cells(struct device_node *child, int *addrc, int *sizec) | ||
169 | { | ||
170 | if (addrc) | ||
171 | *addrc = 2; | ||
172 | if (sizec) | ||
173 | *sizec = 1; | ||
174 | } | ||
diff --git a/arch/sparc/kernel/of_device_common.h b/arch/sparc/kernel/of_device_common.h new file mode 100644 index 000000000000..cdfd23992841 --- /dev/null +++ b/arch/sparc/kernel/of_device_common.h | |||
@@ -0,0 +1,36 @@ | |||
1 | #ifndef _OF_DEVICE_COMMON_H | ||
2 | #define _OF_DEVICE_COMMON_H | ||
3 | |||
4 | static inline u64 of_read_addr(const u32 *cell, int size) | ||
5 | { | ||
6 | u64 r = 0; | ||
7 | while (size--) | ||
8 | r = (r << 32) | *(cell++); | ||
9 | return r; | ||
10 | } | ||
11 | |||
12 | void of_bus_default_count_cells(struct device_node *dev, int *addrc, | ||
13 | int *sizec); | ||
14 | int of_out_of_range(const u32 *addr, const u32 *base, | ||
15 | const u32 *size, int na, int ns); | ||
16 | int of_bus_default_map(u32 *addr, const u32 *range, int na, int ns, int pna); | ||
17 | unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags); | ||
18 | |||
19 | int of_bus_sbus_match(struct device_node *np); | ||
20 | void of_bus_sbus_count_cells(struct device_node *child, int *addrc, int *sizec); | ||
21 | |||
22 | /* Max address size we deal with */ | ||
23 | #define OF_MAX_ADDR_CELLS 4 | ||
24 | |||
25 | struct of_bus { | ||
26 | const char *name; | ||
27 | const char *addr_prop_name; | ||
28 | int (*match)(struct device_node *parent); | ||
29 | void (*count_cells)(struct device_node *child, | ||
30 | int *addrc, int *sizec); | ||
31 | int (*map)(u32 *addr, const u32 *range, | ||
32 | int na, int ns, int pna); | ||
33 | unsigned long (*get_flags)(const u32 *addr, unsigned long); | ||
34 | }; | ||
35 | |||
36 | #endif /* _OF_DEVICE_COMMON_H */ | ||
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 5db5ebed35da..2485eaa23101 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c | |||
@@ -230,8 +230,9 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, | |||
230 | free_pages((unsigned long)cpu, order); | 230 | free_pages((unsigned long)cpu, order); |
231 | } | 231 | } |
232 | 232 | ||
233 | static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz, | 233 | static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, |
234 | enum dma_data_direction direction) | 234 | unsigned long offset, size_t sz, |
235 | enum dma_data_direction direction) | ||
235 | { | 236 | { |
236 | struct iommu *iommu; | 237 | struct iommu *iommu; |
237 | unsigned long flags, npages, oaddr; | 238 | unsigned long flags, npages, oaddr; |
@@ -245,7 +246,7 @@ static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz, | |||
245 | if (unlikely(direction == DMA_NONE)) | 246 | if (unlikely(direction == DMA_NONE)) |
246 | goto bad; | 247 | goto bad; |
247 | 248 | ||
248 | oaddr = (unsigned long)ptr; | 249 | oaddr = (unsigned long)(page_address(page) + offset); |
249 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); | 250 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); |
250 | npages >>= IO_PAGE_SHIFT; | 251 | npages >>= IO_PAGE_SHIFT; |
251 | 252 | ||
@@ -294,8 +295,8 @@ iommu_map_fail: | |||
294 | return DMA_ERROR_CODE; | 295 | return DMA_ERROR_CODE; |
295 | } | 296 | } |
296 | 297 | ||
297 | static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr, | 298 | static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, |
298 | size_t sz, enum dma_data_direction direction) | 299 | size_t sz, enum dma_data_direction direction) |
299 | { | 300 | { |
300 | struct pci_pbm_info *pbm; | 301 | struct pci_pbm_info *pbm; |
301 | struct iommu *iommu; | 302 | struct iommu *iommu; |
@@ -537,8 +538,8 @@ static void dma_4v_sync_sg_for_cpu(struct device *dev, | |||
537 | static const struct dma_ops sun4v_dma_ops = { | 538 | static const struct dma_ops sun4v_dma_ops = { |
538 | .alloc_coherent = dma_4v_alloc_coherent, | 539 | .alloc_coherent = dma_4v_alloc_coherent, |
539 | .free_coherent = dma_4v_free_coherent, | 540 | .free_coherent = dma_4v_free_coherent, |
540 | .map_single = dma_4v_map_single, | 541 | .map_page = dma_4v_map_page, |
541 | .unmap_single = dma_4v_unmap_single, | 542 | .unmap_page = dma_4v_unmap_page, |
542 | .map_sg = dma_4v_map_sg, | 543 | .map_sg = dma_4v_map_sg, |
543 | .unmap_sg = dma_4v_unmap_sg, | 544 | .unmap_sg = dma_4v_unmap_sg, |
544 | .sync_single_for_cpu = dma_4v_sync_single_for_cpu, | 545 | .sync_single_for_cpu = dma_4v_sync_single_for_cpu, |
diff --git a/arch/sparc/kernel/prom.h b/arch/sparc/kernel/prom.h index bb0f0fda6cab..453397fe5e14 100644 --- a/arch/sparc/kernel/prom.h +++ b/arch/sparc/kernel/prom.h | |||
@@ -22,7 +22,6 @@ static inline int is_root_node(const struct device_node *dp) | |||
22 | 22 | ||
23 | extern char *build_path_component(struct device_node *dp); | 23 | extern char *build_path_component(struct device_node *dp); |
24 | extern void of_console_init(void); | 24 | extern void of_console_init(void); |
25 | extern void of_fill_in_cpu_data(void); | ||
26 | 25 | ||
27 | extern unsigned int prom_early_allocated; | 26 | extern unsigned int prom_early_allocated; |
28 | 27 | ||
diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c index ca55c7012f77..fb06ac2bd38f 100644 --- a/arch/sparc/kernel/prom_64.c +++ b/arch/sparc/kernel/prom_64.c | |||
@@ -374,75 +374,26 @@ static const char *get_mid_prop(void) | |||
374 | return (tlb_type == spitfire ? "upa-portid" : "portid"); | 374 | return (tlb_type == spitfire ? "upa-portid" : "portid"); |
375 | } | 375 | } |
376 | 376 | ||
377 | struct device_node *of_find_node_by_cpuid(int cpuid) | 377 | static void *of_iterate_over_cpus(void *(*func)(struct device_node *, int, int), int arg) |
378 | { | ||
379 | struct device_node *dp; | ||
380 | const char *mid_prop = get_mid_prop(); | ||
381 | |||
382 | for_each_node_by_type(dp, "cpu") { | ||
383 | int id = of_getintprop_default(dp, mid_prop, -1); | ||
384 | const char *this_mid_prop = mid_prop; | ||
385 | |||
386 | if (id < 0) { | ||
387 | this_mid_prop = "cpuid"; | ||
388 | id = of_getintprop_default(dp, this_mid_prop, -1); | ||
389 | } | ||
390 | |||
391 | if (id < 0) { | ||
392 | prom_printf("OF: Serious problem, cpu lacks " | ||
393 | "%s property", this_mid_prop); | ||
394 | prom_halt(); | ||
395 | } | ||
396 | if (cpuid == id) | ||
397 | return dp; | ||
398 | } | ||
399 | return NULL; | ||
400 | } | ||
401 | |||
402 | void __init of_fill_in_cpu_data(void) | ||
403 | { | 378 | { |
404 | struct device_node *dp; | 379 | struct device_node *dp; |
405 | const char *mid_prop; | 380 | const char *mid_prop; |
406 | 381 | ||
407 | if (tlb_type == hypervisor) | ||
408 | return; | ||
409 | |||
410 | mid_prop = get_mid_prop(); | 382 | mid_prop = get_mid_prop(); |
411 | ncpus_probed = 0; | ||
412 | for_each_node_by_type(dp, "cpu") { | 383 | for_each_node_by_type(dp, "cpu") { |
413 | int cpuid = of_getintprop_default(dp, mid_prop, -1); | 384 | int cpuid = of_getintprop_default(dp, mid_prop, -1); |
414 | const char *this_mid_prop = mid_prop; | 385 | const char *this_mid_prop = mid_prop; |
415 | struct device_node *portid_parent; | 386 | void *ret; |
416 | int portid = -1; | ||
417 | 387 | ||
418 | portid_parent = NULL; | ||
419 | if (cpuid < 0) { | 388 | if (cpuid < 0) { |
420 | this_mid_prop = "cpuid"; | 389 | this_mid_prop = "cpuid"; |
421 | cpuid = of_getintprop_default(dp, this_mid_prop, -1); | 390 | cpuid = of_getintprop_default(dp, this_mid_prop, -1); |
422 | if (cpuid >= 0) { | ||
423 | int limit = 2; | ||
424 | |||
425 | portid_parent = dp; | ||
426 | while (limit--) { | ||
427 | portid_parent = portid_parent->parent; | ||
428 | if (!portid_parent) | ||
429 | break; | ||
430 | portid = of_getintprop_default(portid_parent, | ||
431 | "portid", -1); | ||
432 | if (portid >= 0) | ||
433 | break; | ||
434 | } | ||
435 | } | ||
436 | } | 391 | } |
437 | |||
438 | if (cpuid < 0) { | 392 | if (cpuid < 0) { |
439 | prom_printf("OF: Serious problem, cpu lacks " | 393 | prom_printf("OF: Serious problem, cpu lacks " |
440 | "%s property", this_mid_prop); | 394 | "%s property", this_mid_prop); |
441 | prom_halt(); | 395 | prom_halt(); |
442 | } | 396 | } |
443 | |||
444 | ncpus_probed++; | ||
445 | |||
446 | #ifdef CONFIG_SMP | 397 | #ifdef CONFIG_SMP |
447 | if (cpuid >= NR_CPUS) { | 398 | if (cpuid >= NR_CPUS) { |
448 | printk(KERN_WARNING "Ignoring CPU %d which is " | 399 | printk(KERN_WARNING "Ignoring CPU %d which is " |
@@ -450,79 +401,142 @@ void __init of_fill_in_cpu_data(void) | |||
450 | cpuid, NR_CPUS); | 401 | cpuid, NR_CPUS); |
451 | continue; | 402 | continue; |
452 | } | 403 | } |
453 | #else | ||
454 | /* On uniprocessor we only want the values for the | ||
455 | * real physical cpu the kernel booted onto, however | ||
456 | * cpu_data() only has one entry at index 0. | ||
457 | */ | ||
458 | if (cpuid != real_hard_smp_processor_id()) | ||
459 | continue; | ||
460 | cpuid = 0; | ||
461 | #endif | 404 | #endif |
405 | ret = func(dp, cpuid, arg); | ||
406 | if (ret) | ||
407 | return ret; | ||
408 | } | ||
409 | return NULL; | ||
410 | } | ||
462 | 411 | ||
463 | cpu_data(cpuid).clock_tick = | 412 | static void *check_cpu_node(struct device_node *dp, int cpuid, int id) |
464 | of_getintprop_default(dp, "clock-frequency", 0); | 413 | { |
465 | 414 | if (id == cpuid) | |
466 | if (portid_parent) { | 415 | return dp; |
467 | cpu_data(cpuid).dcache_size = | 416 | return NULL; |
468 | of_getintprop_default(dp, "l1-dcache-size", | 417 | } |
469 | 16 * 1024); | 418 | |
470 | cpu_data(cpuid).dcache_line_size = | 419 | struct device_node *of_find_node_by_cpuid(int cpuid) |
471 | of_getintprop_default(dp, "l1-dcache-line-size", | 420 | { |
472 | 32); | 421 | return of_iterate_over_cpus(check_cpu_node, cpuid); |
473 | cpu_data(cpuid).icache_size = | 422 | } |
474 | of_getintprop_default(dp, "l1-icache-size", | 423 | |
475 | 8 * 1024); | 424 | static void *record_one_cpu(struct device_node *dp, int cpuid, int arg) |
476 | cpu_data(cpuid).icache_line_size = | 425 | { |
477 | of_getintprop_default(dp, "l1-icache-line-size", | 426 | ncpus_probed++; |
478 | 32); | ||
479 | cpu_data(cpuid).ecache_size = | ||
480 | of_getintprop_default(dp, "l2-cache-size", 0); | ||
481 | cpu_data(cpuid).ecache_line_size = | ||
482 | of_getintprop_default(dp, "l2-cache-line-size", 0); | ||
483 | if (!cpu_data(cpuid).ecache_size || | ||
484 | !cpu_data(cpuid).ecache_line_size) { | ||
485 | cpu_data(cpuid).ecache_size = | ||
486 | of_getintprop_default(portid_parent, | ||
487 | "l2-cache-size", | ||
488 | (4 * 1024 * 1024)); | ||
489 | cpu_data(cpuid).ecache_line_size = | ||
490 | of_getintprop_default(portid_parent, | ||
491 | "l2-cache-line-size", 64); | ||
492 | } | ||
493 | |||
494 | cpu_data(cpuid).core_id = portid + 1; | ||
495 | cpu_data(cpuid).proc_id = portid; | ||
496 | #ifdef CONFIG_SMP | 427 | #ifdef CONFIG_SMP |
497 | sparc64_multi_core = 1; | 428 | set_cpu_present(cpuid, true); |
429 | set_cpu_possible(cpuid, true); | ||
498 | #endif | 430 | #endif |
499 | } else { | 431 | return NULL; |
500 | cpu_data(cpuid).dcache_size = | 432 | } |
501 | of_getintprop_default(dp, "dcache-size", 16 * 1024); | ||
502 | cpu_data(cpuid).dcache_line_size = | ||
503 | of_getintprop_default(dp, "dcache-line-size", 32); | ||
504 | 433 | ||
505 | cpu_data(cpuid).icache_size = | 434 | void __init of_populate_present_mask(void) |
506 | of_getintprop_default(dp, "icache-size", 16 * 1024); | 435 | { |
507 | cpu_data(cpuid).icache_line_size = | 436 | if (tlb_type == hypervisor) |
508 | of_getintprop_default(dp, "icache-line-size", 32); | 437 | return; |
438 | |||
439 | ncpus_probed = 0; | ||
440 | of_iterate_over_cpus(record_one_cpu, 0); | ||
441 | } | ||
509 | 442 | ||
443 | static void *fill_in_one_cpu(struct device_node *dp, int cpuid, int arg) | ||
444 | { | ||
445 | struct device_node *portid_parent = NULL; | ||
446 | int portid = -1; | ||
447 | |||
448 | if (of_find_property(dp, "cpuid", NULL)) { | ||
449 | int limit = 2; | ||
450 | |||
451 | portid_parent = dp; | ||
452 | while (limit--) { | ||
453 | portid_parent = portid_parent->parent; | ||
454 | if (!portid_parent) | ||
455 | break; | ||
456 | portid = of_getintprop_default(portid_parent, | ||
457 | "portid", -1); | ||
458 | if (portid >= 0) | ||
459 | break; | ||
460 | } | ||
461 | } | ||
462 | |||
463 | #ifndef CONFIG_SMP | ||
464 | /* On uniprocessor we only want the values for the | ||
465 | * real physical cpu the kernel booted onto, however | ||
466 | * cpu_data() only has one entry at index 0. | ||
467 | */ | ||
468 | if (cpuid != real_hard_smp_processor_id()) | ||
469 | return NULL; | ||
470 | cpuid = 0; | ||
471 | #endif | ||
472 | |||
473 | cpu_data(cpuid).clock_tick = | ||
474 | of_getintprop_default(dp, "clock-frequency", 0); | ||
475 | |||
476 | if (portid_parent) { | ||
477 | cpu_data(cpuid).dcache_size = | ||
478 | of_getintprop_default(dp, "l1-dcache-size", | ||
479 | 16 * 1024); | ||
480 | cpu_data(cpuid).dcache_line_size = | ||
481 | of_getintprop_default(dp, "l1-dcache-line-size", | ||
482 | 32); | ||
483 | cpu_data(cpuid).icache_size = | ||
484 | of_getintprop_default(dp, "l1-icache-size", | ||
485 | 8 * 1024); | ||
486 | cpu_data(cpuid).icache_line_size = | ||
487 | of_getintprop_default(dp, "l1-icache-line-size", | ||
488 | 32); | ||
489 | cpu_data(cpuid).ecache_size = | ||
490 | of_getintprop_default(dp, "l2-cache-size", 0); | ||
491 | cpu_data(cpuid).ecache_line_size = | ||
492 | of_getintprop_default(dp, "l2-cache-line-size", 0); | ||
493 | if (!cpu_data(cpuid).ecache_size || | ||
494 | !cpu_data(cpuid).ecache_line_size) { | ||
510 | cpu_data(cpuid).ecache_size = | 495 | cpu_data(cpuid).ecache_size = |
511 | of_getintprop_default(dp, "ecache-size", | 496 | of_getintprop_default(portid_parent, |
497 | "l2-cache-size", | ||
512 | (4 * 1024 * 1024)); | 498 | (4 * 1024 * 1024)); |
513 | cpu_data(cpuid).ecache_line_size = | 499 | cpu_data(cpuid).ecache_line_size = |
514 | of_getintprop_default(dp, "ecache-line-size", 64); | 500 | of_getintprop_default(portid_parent, |
515 | 501 | "l2-cache-line-size", 64); | |
516 | cpu_data(cpuid).core_id = 0; | ||
517 | cpu_data(cpuid).proc_id = -1; | ||
518 | } | 502 | } |
519 | 503 | ||
504 | cpu_data(cpuid).core_id = portid + 1; | ||
505 | cpu_data(cpuid).proc_id = portid; | ||
520 | #ifdef CONFIG_SMP | 506 | #ifdef CONFIG_SMP |
521 | set_cpu_present(cpuid, true); | 507 | sparc64_multi_core = 1; |
522 | set_cpu_possible(cpuid, true); | ||
523 | #endif | 508 | #endif |
509 | } else { | ||
510 | cpu_data(cpuid).dcache_size = | ||
511 | of_getintprop_default(dp, "dcache-size", 16 * 1024); | ||
512 | cpu_data(cpuid).dcache_line_size = | ||
513 | of_getintprop_default(dp, "dcache-line-size", 32); | ||
514 | |||
515 | cpu_data(cpuid).icache_size = | ||
516 | of_getintprop_default(dp, "icache-size", 16 * 1024); | ||
517 | cpu_data(cpuid).icache_line_size = | ||
518 | of_getintprop_default(dp, "icache-line-size", 32); | ||
519 | |||
520 | cpu_data(cpuid).ecache_size = | ||
521 | of_getintprop_default(dp, "ecache-size", | ||
522 | (4 * 1024 * 1024)); | ||
523 | cpu_data(cpuid).ecache_line_size = | ||
524 | of_getintprop_default(dp, "ecache-line-size", 64); | ||
525 | |||
526 | cpu_data(cpuid).core_id = 0; | ||
527 | cpu_data(cpuid).proc_id = -1; | ||
524 | } | 528 | } |
525 | 529 | ||
530 | return NULL; | ||
531 | } | ||
532 | |||
533 | void __init of_fill_in_cpu_data(void) | ||
534 | { | ||
535 | if (tlb_type == hypervisor) | ||
536 | return; | ||
537 | |||
538 | of_iterate_over_cpus(fill_in_one_cpu, 0); | ||
539 | |||
526 | smp_fill_in_sib_core_maps(); | 540 | smp_fill_in_sib_core_maps(); |
527 | } | 541 | } |
528 | 542 | ||
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c index ff7b591c8946..0fb5789d43c8 100644 --- a/arch/sparc/kernel/prom_common.c +++ b/arch/sparc/kernel/prom_common.c | |||
@@ -313,6 +313,4 @@ void __init prom_build_devicetree(void) | |||
313 | 313 | ||
314 | printk("PROM: Built device tree with %u bytes of memory.\n", | 314 | printk("PROM: Built device tree with %u bytes of memory.\n", |
315 | prom_early_allocated); | 315 | prom_early_allocated); |
316 | |||
317 | of_fill_in_cpu_data(); | ||
318 | } | 316 | } |
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index f7642e5a94db..fa44eaf8d897 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
@@ -20,7 +20,8 @@ | |||
20 | #include <linux/cache.h> | 20 | #include <linux/cache.h> |
21 | #include <linux/jiffies.h> | 21 | #include <linux/jiffies.h> |
22 | #include <linux/profile.h> | 22 | #include <linux/profile.h> |
23 | #include <linux/lmb.h> | 23 | #include <linux/bootmem.h> |
24 | #include <linux/vmalloc.h> | ||
24 | #include <linux/cpu.h> | 25 | #include <linux/cpu.h> |
25 | 26 | ||
26 | #include <asm/head.h> | 27 | #include <asm/head.h> |
@@ -47,6 +48,8 @@ | |||
47 | #include <asm/ldc.h> | 48 | #include <asm/ldc.h> |
48 | #include <asm/hypervisor.h> | 49 | #include <asm/hypervisor.h> |
49 | 50 | ||
51 | #include "cpumap.h" | ||
52 | |||
50 | int sparc64_multi_core __read_mostly; | 53 | int sparc64_multi_core __read_mostly; |
51 | 54 | ||
52 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; | 55 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; |
@@ -278,7 +281,7 @@ static unsigned long kimage_addr_to_ra(void *p) | |||
278 | return kern_base + (val - KERNBASE); | 281 | return kern_base + (val - KERNBASE); |
279 | } | 282 | } |
280 | 283 | ||
281 | static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg) | 284 | static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, void **descrp) |
282 | { | 285 | { |
283 | extern unsigned long sparc64_ttable_tl0; | 286 | extern unsigned long sparc64_ttable_tl0; |
284 | extern unsigned long kern_locked_tte_data; | 287 | extern unsigned long kern_locked_tte_data; |
@@ -298,12 +301,12 @@ static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread | |||
298 | "hvtramp_descr.\n"); | 301 | "hvtramp_descr.\n"); |
299 | return; | 302 | return; |
300 | } | 303 | } |
304 | *descrp = hdesc; | ||
301 | 305 | ||
302 | hdesc->cpu = cpu; | 306 | hdesc->cpu = cpu; |
303 | hdesc->num_mappings = num_kernel_image_mappings; | 307 | hdesc->num_mappings = num_kernel_image_mappings; |
304 | 308 | ||
305 | tb = &trap_block[cpu]; | 309 | tb = &trap_block[cpu]; |
306 | tb->hdesc = hdesc; | ||
307 | 310 | ||
308 | hdesc->fault_info_va = (unsigned long) &tb->fault_info; | 311 | hdesc->fault_info_va = (unsigned long) &tb->fault_info; |
309 | hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info); | 312 | hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info); |
@@ -341,12 +344,12 @@ static struct thread_info *cpu_new_thread = NULL; | |||
341 | 344 | ||
342 | static int __cpuinit smp_boot_one_cpu(unsigned int cpu) | 345 | static int __cpuinit smp_boot_one_cpu(unsigned int cpu) |
343 | { | 346 | { |
344 | struct trap_per_cpu *tb = &trap_block[cpu]; | ||
345 | unsigned long entry = | 347 | unsigned long entry = |
346 | (unsigned long)(&sparc64_cpu_startup); | 348 | (unsigned long)(&sparc64_cpu_startup); |
347 | unsigned long cookie = | 349 | unsigned long cookie = |
348 | (unsigned long)(&cpu_new_thread); | 350 | (unsigned long)(&cpu_new_thread); |
349 | struct task_struct *p; | 351 | struct task_struct *p; |
352 | void *descr = NULL; | ||
350 | int timeout, ret; | 353 | int timeout, ret; |
351 | 354 | ||
352 | p = fork_idle(cpu); | 355 | p = fork_idle(cpu); |
@@ -359,7 +362,8 @@ static int __cpuinit smp_boot_one_cpu(unsigned int cpu) | |||
359 | #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) | 362 | #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) |
360 | if (ldom_domaining_enabled) | 363 | if (ldom_domaining_enabled) |
361 | ldom_startcpu_cpuid(cpu, | 364 | ldom_startcpu_cpuid(cpu, |
362 | (unsigned long) cpu_new_thread); | 365 | (unsigned long) cpu_new_thread, |
366 | &descr); | ||
363 | else | 367 | else |
364 | #endif | 368 | #endif |
365 | prom_startcpu_cpuid(cpu, entry, cookie); | 369 | prom_startcpu_cpuid(cpu, entry, cookie); |
@@ -383,10 +387,7 @@ static int __cpuinit smp_boot_one_cpu(unsigned int cpu) | |||
383 | } | 387 | } |
384 | cpu_new_thread = NULL; | 388 | cpu_new_thread = NULL; |
385 | 389 | ||
386 | if (tb->hdesc) { | 390 | kfree(descr); |
387 | kfree(tb->hdesc); | ||
388 | tb->hdesc = NULL; | ||
389 | } | ||
390 | 391 | ||
391 | return ret; | 392 | return ret; |
392 | } | 393 | } |
@@ -1315,6 +1316,8 @@ int __cpu_disable(void) | |||
1315 | cpu_clear(cpu, cpu_online_map); | 1316 | cpu_clear(cpu, cpu_online_map); |
1316 | ipi_call_unlock(); | 1317 | ipi_call_unlock(); |
1317 | 1318 | ||
1319 | cpu_map_rebuild(); | ||
1320 | |||
1318 | return 0; | 1321 | return 0; |
1319 | } | 1322 | } |
1320 | 1323 | ||
@@ -1373,36 +1376,171 @@ void smp_send_stop(void) | |||
1373 | { | 1376 | { |
1374 | } | 1377 | } |
1375 | 1378 | ||
1376 | unsigned long __per_cpu_base __read_mostly; | 1379 | /** |
1377 | unsigned long __per_cpu_shift __read_mostly; | 1380 | * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu |
1381 | * @cpu: cpu to allocate for | ||
1382 | * @size: size allocation in bytes | ||
1383 | * @align: alignment | ||
1384 | * | ||
1385 | * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper | ||
1386 | * does the right thing for NUMA regardless of the current | ||
1387 | * configuration. | ||
1388 | * | ||
1389 | * RETURNS: | ||
1390 | * Pointer to the allocated area on success, NULL on failure. | ||
1391 | */ | ||
1392 | static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, | ||
1393 | unsigned long align) | ||
1394 | { | ||
1395 | const unsigned long goal = __pa(MAX_DMA_ADDRESS); | ||
1396 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
1397 | int node = cpu_to_node(cpu); | ||
1398 | void *ptr; | ||
1399 | |||
1400 | if (!node_online(node) || !NODE_DATA(node)) { | ||
1401 | ptr = __alloc_bootmem(size, align, goal); | ||
1402 | pr_info("cpu %d has no node %d or node-local memory\n", | ||
1403 | cpu, node); | ||
1404 | pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", | ||
1405 | cpu, size, __pa(ptr)); | ||
1406 | } else { | ||
1407 | ptr = __alloc_bootmem_node(NODE_DATA(node), | ||
1408 | size, align, goal); | ||
1409 | pr_debug("per cpu data for cpu%d %lu bytes on node%d at " | ||
1410 | "%016lx\n", cpu, size, node, __pa(ptr)); | ||
1411 | } | ||
1412 | return ptr; | ||
1413 | #else | ||
1414 | return __alloc_bootmem(size, align, goal); | ||
1415 | #endif | ||
1416 | } | ||
1378 | 1417 | ||
1379 | EXPORT_SYMBOL(__per_cpu_base); | 1418 | static size_t pcpur_size __initdata; |
1380 | EXPORT_SYMBOL(__per_cpu_shift); | 1419 | static void **pcpur_ptrs __initdata; |
1381 | 1420 | ||
1382 | void __init real_setup_per_cpu_areas(void) | 1421 | static struct page * __init pcpur_get_page(unsigned int cpu, int pageno) |
1383 | { | 1422 | { |
1384 | unsigned long paddr, goal, size, i; | 1423 | size_t off = (size_t)pageno << PAGE_SHIFT; |
1385 | char *ptr; | ||
1386 | 1424 | ||
1387 | /* Copy section for each CPU (we discard the original) */ | 1425 | if (off >= pcpur_size) |
1388 | goal = PERCPU_ENOUGH_ROOM; | 1426 | return NULL; |
1389 | 1427 | ||
1390 | __per_cpu_shift = PAGE_SHIFT; | 1428 | return virt_to_page(pcpur_ptrs[cpu] + off); |
1391 | for (size = PAGE_SIZE; size < goal; size <<= 1UL) | 1429 | } |
1392 | __per_cpu_shift++; | 1430 | |
1431 | #define PCPU_CHUNK_SIZE (4UL * 1024UL * 1024UL) | ||
1432 | |||
1433 | static void __init pcpu_map_range(unsigned long start, unsigned long end, | ||
1434 | struct page *page) | ||
1435 | { | ||
1436 | unsigned long pfn = page_to_pfn(page); | ||
1437 | unsigned long pte_base; | ||
1438 | |||
1439 | BUG_ON((pfn<<PAGE_SHIFT)&(PCPU_CHUNK_SIZE - 1UL)); | ||
1440 | |||
1441 | pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U | | ||
1442 | _PAGE_CP_4U | _PAGE_CV_4U | | ||
1443 | _PAGE_P_4U | _PAGE_W_4U); | ||
1444 | if (tlb_type == hypervisor) | ||
1445 | pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V | | ||
1446 | _PAGE_CP_4V | _PAGE_CV_4V | | ||
1447 | _PAGE_P_4V | _PAGE_W_4V); | ||
1448 | |||
1449 | while (start < end) { | ||
1450 | pgd_t *pgd = pgd_offset_k(start); | ||
1451 | unsigned long this_end; | ||
1452 | pud_t *pud; | ||
1453 | pmd_t *pmd; | ||
1454 | pte_t *pte; | ||
1455 | |||
1456 | pud = pud_offset(pgd, start); | ||
1457 | if (pud_none(*pud)) { | ||
1458 | pmd_t *new; | ||
1459 | |||
1460 | new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); | ||
1461 | pud_populate(&init_mm, pud, new); | ||
1462 | } | ||
1463 | |||
1464 | pmd = pmd_offset(pud, start); | ||
1465 | if (!pmd_present(*pmd)) { | ||
1466 | pte_t *new; | ||
1467 | |||
1468 | new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); | ||
1469 | pmd_populate_kernel(&init_mm, pmd, new); | ||
1470 | } | ||
1393 | 1471 | ||
1394 | paddr = lmb_alloc(size * NR_CPUS, PAGE_SIZE); | 1472 | pte = pte_offset_kernel(pmd, start); |
1395 | if (!paddr) { | 1473 | this_end = (start + PMD_SIZE) & PMD_MASK; |
1396 | prom_printf("Cannot allocate per-cpu memory.\n"); | 1474 | if (this_end > end) |
1397 | prom_halt(); | 1475 | this_end = end; |
1476 | |||
1477 | while (start < this_end) { | ||
1478 | unsigned long paddr = pfn << PAGE_SHIFT; | ||
1479 | |||
1480 | pte_val(*pte) = (paddr | pte_base); | ||
1481 | |||
1482 | start += PAGE_SIZE; | ||
1483 | pte++; | ||
1484 | pfn++; | ||
1485 | } | ||
1486 | } | ||
1487 | } | ||
1488 | |||
1489 | void __init setup_per_cpu_areas(void) | ||
1490 | { | ||
1491 | size_t dyn_size, static_size = __per_cpu_end - __per_cpu_start; | ||
1492 | static struct vm_struct vm; | ||
1493 | unsigned long delta, cpu; | ||
1494 | size_t pcpu_unit_size; | ||
1495 | size_t ptrs_size; | ||
1496 | |||
1497 | pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + | ||
1498 | PERCPU_DYNAMIC_RESERVE); | ||
1499 | dyn_size = pcpur_size - static_size - PERCPU_MODULE_RESERVE; | ||
1500 | |||
1501 | |||
1502 | ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0])); | ||
1503 | pcpur_ptrs = alloc_bootmem(ptrs_size); | ||
1504 | |||
1505 | for_each_possible_cpu(cpu) { | ||
1506 | pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PCPU_CHUNK_SIZE, | ||
1507 | PCPU_CHUNK_SIZE); | ||
1508 | |||
1509 | free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size), | ||
1510 | PCPU_CHUNK_SIZE - pcpur_size); | ||
1511 | |||
1512 | memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size); | ||
1398 | } | 1513 | } |
1399 | 1514 | ||
1400 | ptr = __va(paddr); | 1515 | /* allocate address and map */ |
1401 | __per_cpu_base = ptr - __per_cpu_start; | 1516 | vm.flags = VM_ALLOC; |
1517 | vm.size = num_possible_cpus() * PCPU_CHUNK_SIZE; | ||
1518 | vm_area_register_early(&vm, PCPU_CHUNK_SIZE); | ||
1519 | |||
1520 | for_each_possible_cpu(cpu) { | ||
1521 | unsigned long start = (unsigned long) vm.addr; | ||
1522 | unsigned long end; | ||
1523 | |||
1524 | start += cpu * PCPU_CHUNK_SIZE; | ||
1525 | end = start + PCPU_CHUNK_SIZE; | ||
1526 | pcpu_map_range(start, end, virt_to_page(pcpur_ptrs[cpu])); | ||
1527 | } | ||
1402 | 1528 | ||
1403 | for (i = 0; i < NR_CPUS; i++, ptr += size) | 1529 | pcpu_unit_size = pcpu_setup_first_chunk(pcpur_get_page, static_size, |
1404 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | 1530 | PERCPU_MODULE_RESERVE, dyn_size, |
1531 | PCPU_CHUNK_SIZE, vm.addr, NULL); | ||
1532 | |||
1533 | free_bootmem(__pa(pcpur_ptrs), ptrs_size); | ||
1534 | |||
1535 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; | ||
1536 | for_each_possible_cpu(cpu) { | ||
1537 | __per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size; | ||
1538 | } | ||
1405 | 1539 | ||
1406 | /* Setup %g5 for the boot cpu. */ | 1540 | /* Setup %g5 for the boot cpu. */ |
1407 | __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); | 1541 | __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); |
1542 | |||
1543 | of_fill_in_cpu_data(); | ||
1544 | if (tlb_type == hypervisor) | ||
1545 | mdesc_fill_in_cpu_data(cpu_all_mask); | ||
1408 | } | 1546 | } |
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index 00ec3b15f38c..690901657291 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S | |||
@@ -81,4 +81,6 @@ sys_call_table: | |||
81 | /*305*/ .long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait | 81 | /*305*/ .long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait |
82 | /*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate | 82 | /*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate |
83 | /*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 | 83 | /*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 |
84 | /*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv, sys_pwritev | 84 | /*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv |
85 | /*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo | ||
86 | |||
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index 82b5bf85b9d2..6b3ee88e253c 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S | |||
@@ -82,7 +82,8 @@ sys_call_table32: | |||
82 | .word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait | 82 | .word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait |
83 | /*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate | 83 | /*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate |
84 | .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 | 84 | .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 |
85 | /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv, compat_sys_pwritev | 85 | /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv |
86 | .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo | ||
86 | 87 | ||
87 | #endif /* CONFIG_COMPAT */ | 88 | #endif /* CONFIG_COMPAT */ |
88 | 89 | ||
@@ -156,4 +157,5 @@ sys_call_table: | |||
156 | .word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait | 157 | .word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait |
157 | /*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate | 158 | /*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate |
158 | .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 | 159 | .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 |
159 | /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv, sys_pwritev | 160 | /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv |
161 | .word sys_pwritev, sys_rt_tgsigqueueinfo | ||
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index d809c4ebb48f..10f7bb9fc140 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c | |||
@@ -2509,6 +2509,7 @@ void do_getpsr(struct pt_regs *regs) | |||
2509 | } | 2509 | } |
2510 | 2510 | ||
2511 | struct trap_per_cpu trap_block[NR_CPUS]; | 2511 | struct trap_per_cpu trap_block[NR_CPUS]; |
2512 | EXPORT_SYMBOL(trap_block); | ||
2512 | 2513 | ||
2513 | /* This can get invoked before sched_init() so play it super safe | 2514 | /* This can get invoked before sched_init() so play it super safe |
2514 | * and use hard_smp_processor_id(). | 2515 | * and use hard_smp_processor_id(). |
@@ -2530,84 +2531,97 @@ extern void tsb_config_offsets_are_bolixed_dave(void); | |||
2530 | void __init trap_init(void) | 2531 | void __init trap_init(void) |
2531 | { | 2532 | { |
2532 | /* Compile time sanity check. */ | 2533 | /* Compile time sanity check. */ |
2533 | if (TI_TASK != offsetof(struct thread_info, task) || | 2534 | BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) || |
2534 | TI_FLAGS != offsetof(struct thread_info, flags) || | 2535 | TI_FLAGS != offsetof(struct thread_info, flags) || |
2535 | TI_CPU != offsetof(struct thread_info, cpu) || | 2536 | TI_CPU != offsetof(struct thread_info, cpu) || |
2536 | TI_FPSAVED != offsetof(struct thread_info, fpsaved) || | 2537 | TI_FPSAVED != offsetof(struct thread_info, fpsaved) || |
2537 | TI_KSP != offsetof(struct thread_info, ksp) || | 2538 | TI_KSP != offsetof(struct thread_info, ksp) || |
2538 | TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) || | 2539 | TI_FAULT_ADDR != offsetof(struct thread_info, |
2539 | TI_KREGS != offsetof(struct thread_info, kregs) || | 2540 | fault_address) || |
2540 | TI_UTRAPS != offsetof(struct thread_info, utraps) || | 2541 | TI_KREGS != offsetof(struct thread_info, kregs) || |
2541 | TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) || | 2542 | TI_UTRAPS != offsetof(struct thread_info, utraps) || |
2542 | TI_REG_WINDOW != offsetof(struct thread_info, reg_window) || | 2543 | TI_EXEC_DOMAIN != offsetof(struct thread_info, |
2543 | TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) || | 2544 | exec_domain) || |
2544 | TI_GSR != offsetof(struct thread_info, gsr) || | 2545 | TI_REG_WINDOW != offsetof(struct thread_info, |
2545 | TI_XFSR != offsetof(struct thread_info, xfsr) || | 2546 | reg_window) || |
2546 | TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) || | 2547 | TI_RWIN_SPTRS != offsetof(struct thread_info, |
2547 | TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) || | 2548 | rwbuf_stkptrs) || |
2548 | TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) || | 2549 | TI_GSR != offsetof(struct thread_info, gsr) || |
2549 | TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) || | 2550 | TI_XFSR != offsetof(struct thread_info, xfsr) || |
2550 | TI_PCR != offsetof(struct thread_info, pcr_reg) || | 2551 | TI_USER_CNTD0 != offsetof(struct thread_info, |
2551 | TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) || | 2552 | user_cntd0) || |
2552 | TI_NEW_CHILD != offsetof(struct thread_info, new_child) || | 2553 | TI_USER_CNTD1 != offsetof(struct thread_info, |
2553 | TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) || | 2554 | user_cntd1) || |
2554 | TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) || | 2555 | TI_KERN_CNTD0 != offsetof(struct thread_info, |
2555 | TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) || | 2556 | kernel_cntd0) || |
2556 | TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) || | 2557 | TI_KERN_CNTD1 != offsetof(struct thread_info, |
2557 | TI_FPREGS != offsetof(struct thread_info, fpregs) || | 2558 | kernel_cntd1) || |
2558 | (TI_FPREGS & (64 - 1))) | 2559 | TI_PCR != offsetof(struct thread_info, pcr_reg) || |
2559 | thread_info_offsets_are_bolixed_dave(); | 2560 | TI_PRE_COUNT != offsetof(struct thread_info, |
2560 | 2561 | preempt_count) || | |
2561 | if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) || | 2562 | TI_NEW_CHILD != offsetof(struct thread_info, new_child) || |
2562 | (TRAP_PER_CPU_PGD_PADDR != | 2563 | TI_SYS_NOERROR != offsetof(struct thread_info, |
2563 | offsetof(struct trap_per_cpu, pgd_paddr)) || | 2564 | syscall_noerror) || |
2564 | (TRAP_PER_CPU_CPU_MONDO_PA != | 2565 | TI_RESTART_BLOCK != offsetof(struct thread_info, |
2565 | offsetof(struct trap_per_cpu, cpu_mondo_pa)) || | 2566 | restart_block) || |
2566 | (TRAP_PER_CPU_DEV_MONDO_PA != | 2567 | TI_KUNA_REGS != offsetof(struct thread_info, |
2567 | offsetof(struct trap_per_cpu, dev_mondo_pa)) || | 2568 | kern_una_regs) || |
2568 | (TRAP_PER_CPU_RESUM_MONDO_PA != | 2569 | TI_KUNA_INSN != offsetof(struct thread_info, |
2569 | offsetof(struct trap_per_cpu, resum_mondo_pa)) || | 2570 | kern_una_insn) || |
2570 | (TRAP_PER_CPU_RESUM_KBUF_PA != | 2571 | TI_FPREGS != offsetof(struct thread_info, fpregs) || |
2571 | offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) || | 2572 | (TI_FPREGS & (64 - 1))); |
2572 | (TRAP_PER_CPU_NONRESUM_MONDO_PA != | 2573 | |
2573 | offsetof(struct trap_per_cpu, nonresum_mondo_pa)) || | 2574 | BUILD_BUG_ON(TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, |
2574 | (TRAP_PER_CPU_NONRESUM_KBUF_PA != | 2575 | thread) || |
2575 | offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) || | 2576 | (TRAP_PER_CPU_PGD_PADDR != |
2576 | (TRAP_PER_CPU_FAULT_INFO != | 2577 | offsetof(struct trap_per_cpu, pgd_paddr)) || |
2577 | offsetof(struct trap_per_cpu, fault_info)) || | 2578 | (TRAP_PER_CPU_CPU_MONDO_PA != |
2578 | (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA != | 2579 | offsetof(struct trap_per_cpu, cpu_mondo_pa)) || |
2579 | offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) || | 2580 | (TRAP_PER_CPU_DEV_MONDO_PA != |
2580 | (TRAP_PER_CPU_CPU_LIST_PA != | 2581 | offsetof(struct trap_per_cpu, dev_mondo_pa)) || |
2581 | offsetof(struct trap_per_cpu, cpu_list_pa)) || | 2582 | (TRAP_PER_CPU_RESUM_MONDO_PA != |
2582 | (TRAP_PER_CPU_TSB_HUGE != | 2583 | offsetof(struct trap_per_cpu, resum_mondo_pa)) || |
2583 | offsetof(struct trap_per_cpu, tsb_huge)) || | 2584 | (TRAP_PER_CPU_RESUM_KBUF_PA != |
2584 | (TRAP_PER_CPU_TSB_HUGE_TEMP != | 2585 | offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) || |
2585 | offsetof(struct trap_per_cpu, tsb_huge_temp)) || | 2586 | (TRAP_PER_CPU_NONRESUM_MONDO_PA != |
2586 | (TRAP_PER_CPU_IRQ_WORKLIST_PA != | 2587 | offsetof(struct trap_per_cpu, nonresum_mondo_pa)) || |
2587 | offsetof(struct trap_per_cpu, irq_worklist_pa)) || | 2588 | (TRAP_PER_CPU_NONRESUM_KBUF_PA != |
2588 | (TRAP_PER_CPU_CPU_MONDO_QMASK != | 2589 | offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) || |
2589 | offsetof(struct trap_per_cpu, cpu_mondo_qmask)) || | 2590 | (TRAP_PER_CPU_FAULT_INFO != |
2590 | (TRAP_PER_CPU_DEV_MONDO_QMASK != | 2591 | offsetof(struct trap_per_cpu, fault_info)) || |
2591 | offsetof(struct trap_per_cpu, dev_mondo_qmask)) || | 2592 | (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA != |
2592 | (TRAP_PER_CPU_RESUM_QMASK != | 2593 | offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) || |
2593 | offsetof(struct trap_per_cpu, resum_qmask)) || | 2594 | (TRAP_PER_CPU_CPU_LIST_PA != |
2594 | (TRAP_PER_CPU_NONRESUM_QMASK != | 2595 | offsetof(struct trap_per_cpu, cpu_list_pa)) || |
2595 | offsetof(struct trap_per_cpu, nonresum_qmask))) | 2596 | (TRAP_PER_CPU_TSB_HUGE != |
2596 | trap_per_cpu_offsets_are_bolixed_dave(); | 2597 | offsetof(struct trap_per_cpu, tsb_huge)) || |
2597 | 2598 | (TRAP_PER_CPU_TSB_HUGE_TEMP != | |
2598 | if ((TSB_CONFIG_TSB != | 2599 | offsetof(struct trap_per_cpu, tsb_huge_temp)) || |
2599 | offsetof(struct tsb_config, tsb)) || | 2600 | (TRAP_PER_CPU_IRQ_WORKLIST_PA != |
2600 | (TSB_CONFIG_RSS_LIMIT != | 2601 | offsetof(struct trap_per_cpu, irq_worklist_pa)) || |
2601 | offsetof(struct tsb_config, tsb_rss_limit)) || | 2602 | (TRAP_PER_CPU_CPU_MONDO_QMASK != |
2602 | (TSB_CONFIG_NENTRIES != | 2603 | offsetof(struct trap_per_cpu, cpu_mondo_qmask)) || |
2603 | offsetof(struct tsb_config, tsb_nentries)) || | 2604 | (TRAP_PER_CPU_DEV_MONDO_QMASK != |
2604 | (TSB_CONFIG_REG_VAL != | 2605 | offsetof(struct trap_per_cpu, dev_mondo_qmask)) || |
2605 | offsetof(struct tsb_config, tsb_reg_val)) || | 2606 | (TRAP_PER_CPU_RESUM_QMASK != |
2606 | (TSB_CONFIG_MAP_VADDR != | 2607 | offsetof(struct trap_per_cpu, resum_qmask)) || |
2607 | offsetof(struct tsb_config, tsb_map_vaddr)) || | 2608 | (TRAP_PER_CPU_NONRESUM_QMASK != |
2608 | (TSB_CONFIG_MAP_PTE != | 2609 | offsetof(struct trap_per_cpu, nonresum_qmask)) || |
2609 | offsetof(struct tsb_config, tsb_map_pte))) | 2610 | (TRAP_PER_CPU_PER_CPU_BASE != |
2610 | tsb_config_offsets_are_bolixed_dave(); | 2611 | offsetof(struct trap_per_cpu, __per_cpu_base))); |
2612 | |||
2613 | BUILD_BUG_ON((TSB_CONFIG_TSB != | ||
2614 | offsetof(struct tsb_config, tsb)) || | ||
2615 | (TSB_CONFIG_RSS_LIMIT != | ||
2616 | offsetof(struct tsb_config, tsb_rss_limit)) || | ||
2617 | (TSB_CONFIG_NENTRIES != | ||
2618 | offsetof(struct tsb_config, tsb_nentries)) || | ||
2619 | (TSB_CONFIG_REG_VAL != | ||
2620 | offsetof(struct tsb_config, tsb_reg_val)) || | ||
2621 | (TSB_CONFIG_MAP_VADDR != | ||
2622 | offsetof(struct tsb_config, tsb_map_vaddr)) || | ||
2623 | (TSB_CONFIG_MAP_PTE != | ||
2624 | offsetof(struct tsb_config, tsb_map_pte))); | ||
2611 | 2625 | ||
2612 | /* Attach to the address space of init_task. On SMP we | 2626 | /* Attach to the address space of init_task. On SMP we |
2613 | * do this in smp.c:smp_callin for other cpus. | 2627 | * do this in smp.c:smp_callin for other cpus. |
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index cbb282dab5a7..26bb3919ff1f 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c | |||
@@ -358,6 +358,7 @@ void __init paging_init(void) | |||
358 | protection_map[15] = PAGE_SHARED; | 358 | protection_map[15] = PAGE_SHARED; |
359 | btfixup(); | 359 | btfixup(); |
360 | prom_build_devicetree(); | 360 | prom_build_devicetree(); |
361 | of_fill_in_cpu_data(); | ||
361 | device_scan(); | 362 | device_scan(); |
362 | } | 363 | } |
363 | 364 | ||
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index f26a352c08a0..ca92e2f54e4d 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -1679,11 +1679,6 @@ pgd_t swapper_pg_dir[2048]; | |||
1679 | static void sun4u_pgprot_init(void); | 1679 | static void sun4u_pgprot_init(void); |
1680 | static void sun4v_pgprot_init(void); | 1680 | static void sun4v_pgprot_init(void); |
1681 | 1681 | ||
1682 | /* Dummy function */ | ||
1683 | void __init setup_per_cpu_areas(void) | ||
1684 | { | ||
1685 | } | ||
1686 | |||
1687 | void __init paging_init(void) | 1682 | void __init paging_init(void) |
1688 | { | 1683 | { |
1689 | unsigned long end_pfn, shift, phys_base; | 1684 | unsigned long end_pfn, shift, phys_base; |
@@ -1799,16 +1794,13 @@ void __init paging_init(void) | |||
1799 | if (tlb_type == hypervisor) | 1794 | if (tlb_type == hypervisor) |
1800 | sun4v_ktsb_register(); | 1795 | sun4v_ktsb_register(); |
1801 | 1796 | ||
1802 | /* We must setup the per-cpu areas before we pull in the | ||
1803 | * PROM and the MDESC. The code there fills in cpu and | ||
1804 | * other information into per-cpu data structures. | ||
1805 | */ | ||
1806 | real_setup_per_cpu_areas(); | ||
1807 | |||
1808 | prom_build_devicetree(); | 1797 | prom_build_devicetree(); |
1798 | of_populate_present_mask(); | ||
1809 | 1799 | ||
1810 | if (tlb_type == hypervisor) | 1800 | if (tlb_type == hypervisor) { |
1811 | sun4v_mdesc_init(); | 1801 | sun4v_mdesc_init(); |
1802 | mdesc_populate_present_mask(cpu_all_mask); | ||
1803 | } | ||
1812 | 1804 | ||
1813 | /* Once the OF device tree and MDESC have been setup, we know | 1805 | /* Once the OF device tree and MDESC have been setup, we know |
1814 | * the list of possible cpus. Therefore we can allocate the | 1806 | * the list of possible cpus. Therefore we can allocate the |
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 06c9a7d98206..ade4eb373bdd 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/fs.h> | 19 | #include <linux/fs.h> |
20 | #include <linux/seq_file.h> | 20 | #include <linux/seq_file.h> |
21 | #include <linux/kdebug.h> | 21 | #include <linux/kdebug.h> |
22 | #include <linux/log2.h> | ||
22 | 23 | ||
23 | #include <asm/bitext.h> | 24 | #include <asm/bitext.h> |
24 | #include <asm/page.h> | 25 | #include <asm/page.h> |
@@ -349,7 +350,7 @@ static void srmmu_free_nocache(unsigned long vaddr, int size) | |||
349 | vaddr, srmmu_nocache_end); | 350 | vaddr, srmmu_nocache_end); |
350 | BUG(); | 351 | BUG(); |
351 | } | 352 | } |
352 | if (size & (size-1)) { | 353 | if (!is_power_of_2(size)) { |
353 | printk("Size 0x%x is not a power of 2\n", size); | 354 | printk("Size 0x%x is not a power of 2\n", size); |
354 | BUG(); | 355 | BUG(); |
355 | } | 356 | } |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index ef0ae207a7c8..096d19aea2f7 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -463,7 +463,7 @@ static void uv_heartbeat(unsigned long ignored) | |||
463 | uv_set_scir_bits(bits); | 463 | uv_set_scir_bits(bits); |
464 | 464 | ||
465 | /* enable next timer period */ | 465 | /* enable next timer period */ |
466 | mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL); | 466 | mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL); |
467 | } | 467 | } |
468 | 468 | ||
469 | static void __cpuinit uv_heartbeat_enable(int cpu) | 469 | static void __cpuinit uv_heartbeat_enable(int cpu) |