diff options
Diffstat (limited to 'arch')
163 files changed, 3040 insertions, 1894 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index 4f55c736be11..5b448a74d0f7 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -47,18 +47,29 @@ config KPROBES | |||
47 | If in doubt, say "N". | 47 | If in doubt, say "N". |
48 | 48 | ||
49 | config JUMP_LABEL | 49 | config JUMP_LABEL |
50 | bool "Optimize trace point call sites" | 50 | bool "Optimize very unlikely/likely branches" |
51 | depends on HAVE_ARCH_JUMP_LABEL | 51 | depends on HAVE_ARCH_JUMP_LABEL |
52 | help | 52 | help |
53 | This option enables a transparent branch optimization that | ||
54 | makes certain almost-always-true or almost-always-false branch | ||
55 | conditions even cheaper to execute within the kernel. | ||
56 | |||
57 | Certain performance-sensitive kernel code, such as trace points, | ||
58 | scheduler functionality, networking code and KVM have such | ||
59 | branches and include support for this optimization technique. | ||
60 | |||
53 | If it is detected that the compiler has support for "asm goto", | 61 | If it is detected that the compiler has support for "asm goto", |
54 | the kernel will compile trace point locations with just a | 62 | the kernel will compile such branches with just a nop |
55 | nop instruction. When trace points are enabled, the nop will | 63 | instruction. When the condition flag is toggled to true, the |
56 | be converted to a jump to the trace function. This technique | 64 | nop will be converted to a jump instruction to execute the |
57 | lowers overhead and stress on the branch prediction of the | 65 | conditional block of instructions. |
58 | processor. | 66 | |
59 | 67 | This technique lowers overhead and stress on the branch prediction | |
60 | On i386, options added to the compiler flags may increase | 68 | of the processor and generally makes the kernel faster. The update |
61 | the size of the kernel slightly. | 69 | of the condition is slower, but those are always very rare. |
70 | |||
71 | ( On 32-bit x86, the necessary options added to the compiler | ||
72 | flags may increase the size of the kernel slightly. ) | ||
62 | 73 | ||
63 | config OPTPROBES | 74 | config OPTPROBES |
64 | def_bool y | 75 | def_bool y |
diff --git a/arch/alpha/include/asm/machvec.h b/arch/alpha/include/asm/machvec.h index 13cd42743810..72dbf2359270 100644 --- a/arch/alpha/include/asm/machvec.h +++ b/arch/alpha/include/asm/machvec.h | |||
@@ -90,7 +90,7 @@ struct alpha_machine_vector | |||
90 | void (*kill_arch)(int); | 90 | void (*kill_arch)(int); |
91 | 91 | ||
92 | u8 (*pci_swizzle)(struct pci_dev *, u8 *); | 92 | u8 (*pci_swizzle)(struct pci_dev *, u8 *); |
93 | int (*pci_map_irq)(struct pci_dev *, u8, u8); | 93 | int (*pci_map_irq)(const struct pci_dev *, u8, u8); |
94 | struct pci_ops *pci_ops; | 94 | struct pci_ops *pci_ops; |
95 | 95 | ||
96 | struct _alpha_agp_info *(*agp_info)(void); | 96 | struct _alpha_agp_info *(*agp_info)(void); |
diff --git a/arch/alpha/include/asm/socket.h b/arch/alpha/include/asm/socket.h index 082355f159e6..dcb221a4b5be 100644 --- a/arch/alpha/include/asm/socket.h +++ b/arch/alpha/include/asm/socket.h | |||
@@ -71,6 +71,10 @@ | |||
71 | 71 | ||
72 | #define SO_WIFI_STATUS 41 | 72 | #define SO_WIFI_STATUS 41 |
73 | #define SCM_WIFI_STATUS SO_WIFI_STATUS | 73 | #define SCM_WIFI_STATUS SO_WIFI_STATUS |
74 | #define SO_PEEK_OFF 42 | ||
75 | |||
76 | /* Instruct lower device to use last 4-bytes of skb data as FCS */ | ||
77 | #define SO_NOFCS 43 | ||
74 | 78 | ||
75 | /* O_NONBLOCK clashes with the bits used for socket types. Therefore we | 79 | /* O_NONBLOCK clashes with the bits used for socket types. Therefore we |
76 | * have to define SOCK_NONBLOCK to a different value here. | 80 | * have to define SOCK_NONBLOCK to a different value here. |
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index 8143cd7cdbfb..0dae252f7a33 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c | |||
@@ -685,6 +685,10 @@ static int alpha_pmu_event_init(struct perf_event *event) | |||
685 | { | 685 | { |
686 | int err; | 686 | int err; |
687 | 687 | ||
688 | /* does not support taken branch sampling */ | ||
689 | if (has_branch_stack(event)) | ||
690 | return -EOPNOTSUPP; | ||
691 | |||
688 | switch (event->attr.type) { | 692 | switch (event->attr.type) { |
689 | case PERF_TYPE_RAW: | 693 | case PERF_TYPE_RAW: |
690 | case PERF_TYPE_HARDWARE: | 694 | case PERF_TYPE_HARDWARE: |
diff --git a/arch/alpha/kernel/srmcons.c b/arch/alpha/kernel/srmcons.c index 783f4e50c111..3ea809430eda 100644 --- a/arch/alpha/kernel/srmcons.c +++ b/arch/alpha/kernel/srmcons.c | |||
@@ -30,10 +30,9 @@ static int srm_is_registered_console = 0; | |||
30 | #define MAX_SRM_CONSOLE_DEVICES 1 /* only support 1 console device */ | 30 | #define MAX_SRM_CONSOLE_DEVICES 1 /* only support 1 console device */ |
31 | 31 | ||
32 | struct srmcons_private { | 32 | struct srmcons_private { |
33 | struct tty_struct *tty; | 33 | struct tty_port port; |
34 | struct timer_list timer; | 34 | struct timer_list timer; |
35 | spinlock_t lock; | 35 | } srmcons_singleton; |
36 | }; | ||
37 | 36 | ||
38 | typedef union _srmcons_result { | 37 | typedef union _srmcons_result { |
39 | struct { | 38 | struct { |
@@ -68,22 +67,21 @@ static void | |||
68 | srmcons_receive_chars(unsigned long data) | 67 | srmcons_receive_chars(unsigned long data) |
69 | { | 68 | { |
70 | struct srmcons_private *srmconsp = (struct srmcons_private *)data; | 69 | struct srmcons_private *srmconsp = (struct srmcons_private *)data; |
70 | struct tty_port *port = &srmconsp->port; | ||
71 | unsigned long flags; | 71 | unsigned long flags; |
72 | int incr = 10; | 72 | int incr = 10; |
73 | 73 | ||
74 | local_irq_save(flags); | 74 | local_irq_save(flags); |
75 | if (spin_trylock(&srmcons_callback_lock)) { | 75 | if (spin_trylock(&srmcons_callback_lock)) { |
76 | if (!srmcons_do_receive_chars(srmconsp->tty)) | 76 | if (!srmcons_do_receive_chars(port->tty)) |
77 | incr = 100; | 77 | incr = 100; |
78 | spin_unlock(&srmcons_callback_lock); | 78 | spin_unlock(&srmcons_callback_lock); |
79 | } | 79 | } |
80 | 80 | ||
81 | spin_lock(&srmconsp->lock); | 81 | spin_lock(&port->lock); |
82 | if (srmconsp->tty) { | 82 | if (port->tty) |
83 | srmconsp->timer.expires = jiffies + incr; | 83 | mod_timer(&srmconsp->timer, jiffies + incr); |
84 | add_timer(&srmconsp->timer); | 84 | spin_unlock(&port->lock); |
85 | } | ||
86 | spin_unlock(&srmconsp->lock); | ||
87 | 85 | ||
88 | local_irq_restore(flags); | 86 | local_irq_restore(flags); |
89 | } | 87 | } |
@@ -156,56 +154,22 @@ srmcons_chars_in_buffer(struct tty_struct *tty) | |||
156 | } | 154 | } |
157 | 155 | ||
158 | static int | 156 | static int |
159 | srmcons_get_private_struct(struct srmcons_private **ps) | ||
160 | { | ||
161 | static struct srmcons_private *srmconsp = NULL; | ||
162 | static DEFINE_SPINLOCK(srmconsp_lock); | ||
163 | unsigned long flags; | ||
164 | int retval = 0; | ||
165 | |||
166 | if (srmconsp == NULL) { | ||
167 | srmconsp = kmalloc(sizeof(*srmconsp), GFP_KERNEL); | ||
168 | spin_lock_irqsave(&srmconsp_lock, flags); | ||
169 | |||
170 | if (srmconsp == NULL) | ||
171 | retval = -ENOMEM; | ||
172 | else { | ||
173 | srmconsp->tty = NULL; | ||
174 | spin_lock_init(&srmconsp->lock); | ||
175 | init_timer(&srmconsp->timer); | ||
176 | } | ||
177 | |||
178 | spin_unlock_irqrestore(&srmconsp_lock, flags); | ||
179 | } | ||
180 | |||
181 | *ps = srmconsp; | ||
182 | return retval; | ||
183 | } | ||
184 | |||
185 | static int | ||
186 | srmcons_open(struct tty_struct *tty, struct file *filp) | 157 | srmcons_open(struct tty_struct *tty, struct file *filp) |
187 | { | 158 | { |
188 | struct srmcons_private *srmconsp; | 159 | struct srmcons_private *srmconsp = &srmcons_singleton; |
160 | struct tty_port *port = &srmconsp->port; | ||
189 | unsigned long flags; | 161 | unsigned long flags; |
190 | int retval; | ||
191 | |||
192 | retval = srmcons_get_private_struct(&srmconsp); | ||
193 | if (retval) | ||
194 | return retval; | ||
195 | 162 | ||
196 | spin_lock_irqsave(&srmconsp->lock, flags); | 163 | spin_lock_irqsave(&port->lock, flags); |
197 | 164 | ||
198 | if (!srmconsp->tty) { | 165 | if (!port->tty) { |
199 | tty->driver_data = srmconsp; | 166 | tty->driver_data = srmconsp; |
200 | 167 | tty->port = port; | |
201 | srmconsp->tty = tty; | 168 | port->tty = tty; /* XXX proper refcounting */ |
202 | srmconsp->timer.function = srmcons_receive_chars; | 169 | mod_timer(&srmconsp->timer, jiffies + 10); |
203 | srmconsp->timer.data = (unsigned long)srmconsp; | ||
204 | srmconsp->timer.expires = jiffies + 10; | ||
205 | add_timer(&srmconsp->timer); | ||
206 | } | 170 | } |
207 | 171 | ||
208 | spin_unlock_irqrestore(&srmconsp->lock, flags); | 172 | spin_unlock_irqrestore(&port->lock, flags); |
209 | 173 | ||
210 | return 0; | 174 | return 0; |
211 | } | 175 | } |
@@ -214,16 +178,17 @@ static void | |||
214 | srmcons_close(struct tty_struct *tty, struct file *filp) | 178 | srmcons_close(struct tty_struct *tty, struct file *filp) |
215 | { | 179 | { |
216 | struct srmcons_private *srmconsp = tty->driver_data; | 180 | struct srmcons_private *srmconsp = tty->driver_data; |
181 | struct tty_port *port = &srmconsp->port; | ||
217 | unsigned long flags; | 182 | unsigned long flags; |
218 | 183 | ||
219 | spin_lock_irqsave(&srmconsp->lock, flags); | 184 | spin_lock_irqsave(&port->lock, flags); |
220 | 185 | ||
221 | if (tty->count == 1) { | 186 | if (tty->count == 1) { |
222 | srmconsp->tty = NULL; | 187 | port->tty = NULL; |
223 | del_timer(&srmconsp->timer); | 188 | del_timer(&srmconsp->timer); |
224 | } | 189 | } |
225 | 190 | ||
226 | spin_unlock_irqrestore(&srmconsp->lock, flags); | 191 | spin_unlock_irqrestore(&port->lock, flags); |
227 | } | 192 | } |
228 | 193 | ||
229 | 194 | ||
@@ -240,6 +205,9 @@ static const struct tty_operations srmcons_ops = { | |||
240 | static int __init | 205 | static int __init |
241 | srmcons_init(void) | 206 | srmcons_init(void) |
242 | { | 207 | { |
208 | tty_port_init(&srmcons_singleton.port); | ||
209 | setup_timer(&srmcons_singleton.timer, srmcons_receive_chars, | ||
210 | (unsigned long)&srmcons_singleton); | ||
243 | if (srm_is_registered_console) { | 211 | if (srm_is_registered_console) { |
244 | struct tty_driver *driver; | 212 | struct tty_driver *driver; |
245 | int err; | 213 | int err; |
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c index bb7f0c7cb17a..13f0717fc7fe 100644 --- a/arch/alpha/kernel/sys_dp264.c +++ b/arch/alpha/kernel/sys_dp264.c | |||
@@ -366,7 +366,7 @@ clipper_init_irq(void) | |||
366 | */ | 366 | */ |
367 | 367 | ||
368 | static int __init | 368 | static int __init |
369 | isa_irq_fixup(struct pci_dev *dev, int irq) | 369 | isa_irq_fixup(const struct pci_dev *dev, int irq) |
370 | { | 370 | { |
371 | u8 irq8; | 371 | u8 irq8; |
372 | 372 | ||
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index 99cfe3607989..7523340afb8a 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h | |||
@@ -12,10 +12,6 @@ | |||
12 | #ifndef __ARM_PERF_EVENT_H__ | 12 | #ifndef __ARM_PERF_EVENT_H__ |
13 | #define __ARM_PERF_EVENT_H__ | 13 | #define __ARM_PERF_EVENT_H__ |
14 | 14 | ||
15 | /* ARM performance counters start from 1 (in the cp15 accesses) so use the | ||
16 | * same indexes here for consistency. */ | ||
17 | #define PERF_EVENT_INDEX_OFFSET 1 | ||
18 | |||
19 | /* ARM perf PMU IDs for use by internal perf clients. */ | 15 | /* ARM perf PMU IDs for use by internal perf clients. */ |
20 | enum arm_perf_pmu_ids { | 16 | enum arm_perf_pmu_ids { |
21 | ARM_PERF_PMU_ID_XSCALE1 = 0, | 17 | ARM_PERF_PMU_ID_XSCALE1 = 0, |
diff --git a/arch/arm/include/asm/socket.h b/arch/arm/include/asm/socket.h index dec6f9afb3cf..6433cadb6ed4 100644 --- a/arch/arm/include/asm/socket.h +++ b/arch/arm/include/asm/socket.h | |||
@@ -64,5 +64,9 @@ | |||
64 | 64 | ||
65 | #define SO_WIFI_STATUS 41 | 65 | #define SO_WIFI_STATUS 41 |
66 | #define SCM_WIFI_STATUS SO_WIFI_STATUS | 66 | #define SCM_WIFI_STATUS SO_WIFI_STATUS |
67 | #define SO_PEEK_OFF 42 | ||
68 | |||
69 | /* Instruct lower device to use last 4-bytes of skb data as FCS */ | ||
70 | #define SO_NOFCS 43 | ||
67 | 71 | ||
68 | #endif /* _ASM_SOCKET_H */ | 72 | #endif /* _ASM_SOCKET_H */ |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index b2abfa18f137..8a89d3b7626b 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -539,6 +539,10 @@ static int armpmu_event_init(struct perf_event *event) | |||
539 | int err = 0; | 539 | int err = 0; |
540 | atomic_t *active_events = &armpmu->active_events; | 540 | atomic_t *active_events = &armpmu->active_events; |
541 | 541 | ||
542 | /* does not support taken branch sampling */ | ||
543 | if (has_branch_stack(event)) | ||
544 | return -EOPNOTSUPP; | ||
545 | |||
542 | if (armpmu->map_event(event) == -ENOENT) | 546 | if (armpmu->map_event(event) == -ENOENT) |
543 | return -ENOENT; | 547 | return -ENOENT; |
544 | 548 | ||
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 971d65c253a9..c2ae3cd331fe 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -239,9 +239,7 @@ void cpu_idle(void) | |||
239 | leds_event(led_idle_end); | 239 | leds_event(led_idle_end); |
240 | rcu_idle_exit(); | 240 | rcu_idle_exit(); |
241 | tick_nohz_idle_exit(); | 241 | tick_nohz_idle_exit(); |
242 | preempt_enable_no_resched(); | 242 | schedule_preempt_disabled(); |
243 | schedule(); | ||
244 | preempt_disable(); | ||
245 | } | 243 | } |
246 | } | 244 | } |
247 | 245 | ||
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index cdeb727527d3..d616ed51e7a7 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -295,13 +295,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void) | |||
295 | */ | 295 | */ |
296 | percpu_timer_setup(); | 296 | percpu_timer_setup(); |
297 | 297 | ||
298 | while (!cpu_active(cpu)) | ||
299 | cpu_relax(); | ||
300 | |||
301 | /* | ||
302 | * cpu_active bit is set, so it's safe to enalbe interrupts | ||
303 | * now. | ||
304 | */ | ||
305 | local_irq_enable(); | 298 | local_irq_enable(); |
306 | local_fiq_enable(); | 299 | local_fiq_enable(); |
307 | 300 | ||
diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c index 1024396797e1..e5fd241fccdc 100644 --- a/arch/arm/mach-bcmring/dma.c +++ b/arch/arm/mach-bcmring/dma.c | |||
@@ -35,7 +35,6 @@ | |||
35 | 35 | ||
36 | #include <linux/pfn.h> | 36 | #include <linux/pfn.h> |
37 | #include <linux/atomic.h> | 37 | #include <linux/atomic.h> |
38 | #include <linux/sched.h> | ||
39 | #include <mach/dma.h> | 38 | #include <mach/dma.h> |
40 | 39 | ||
41 | /* ---- Public Variables ------------------------------------------------- */ | 40 | /* ---- Public Variables ------------------------------------------------- */ |
diff --git a/arch/arm/mach-imx/mx31moboard-devboard.c b/arch/arm/mach-imx/mx31moboard-devboard.c index 0aa25364360d..cc285e507286 100644 --- a/arch/arm/mach-imx/mx31moboard-devboard.c +++ b/arch/arm/mach-imx/mx31moboard-devboard.c | |||
@@ -158,7 +158,7 @@ static int devboard_usbh1_hw_init(struct platform_device *pdev) | |||
158 | #define USBH1_VBUSEN_B IOMUX_TO_GPIO(MX31_PIN_NFRE_B) | 158 | #define USBH1_VBUSEN_B IOMUX_TO_GPIO(MX31_PIN_NFRE_B) |
159 | #define USBH1_MODE IOMUX_TO_GPIO(MX31_PIN_NFALE) | 159 | #define USBH1_MODE IOMUX_TO_GPIO(MX31_PIN_NFALE) |
160 | 160 | ||
161 | static int devboard_isp1105_init(struct otg_transceiver *otg) | 161 | static int devboard_isp1105_init(struct usb_phy *otg) |
162 | { | 162 | { |
163 | int ret = gpio_request(USBH1_MODE, "usbh1-mode"); | 163 | int ret = gpio_request(USBH1_MODE, "usbh1-mode"); |
164 | if (ret) | 164 | if (ret) |
@@ -177,7 +177,7 @@ static int devboard_isp1105_init(struct otg_transceiver *otg) | |||
177 | } | 177 | } |
178 | 178 | ||
179 | 179 | ||
180 | static int devboard_isp1105_set_vbus(struct otg_transceiver *otg, bool on) | 180 | static int devboard_isp1105_set_vbus(struct usb_otg *otg, bool on) |
181 | { | 181 | { |
182 | if (on) | 182 | if (on) |
183 | gpio_set_value(USBH1_VBUSEN_B, 0); | 183 | gpio_set_value(USBH1_VBUSEN_B, 0); |
@@ -194,18 +194,24 @@ static struct mxc_usbh_platform_data usbh1_pdata __initdata = { | |||
194 | 194 | ||
195 | static int __init devboard_usbh1_init(void) | 195 | static int __init devboard_usbh1_init(void) |
196 | { | 196 | { |
197 | struct otg_transceiver *otg; | 197 | struct usb_phy *phy; |
198 | struct platform_device *pdev; | 198 | struct platform_device *pdev; |
199 | 199 | ||
200 | otg = kzalloc(sizeof(*otg), GFP_KERNEL); | 200 | phy = kzalloc(sizeof(*phy), GFP_KERNEL); |
201 | if (!otg) | 201 | if (!phy) |
202 | return -ENOMEM; | 202 | return -ENOMEM; |
203 | 203 | ||
204 | otg->label = "ISP1105"; | 204 | phy->otg = kzalloc(sizeof(struct usb_otg), GFP_KERNEL); |
205 | otg->init = devboard_isp1105_init; | 205 | if (!phy->otg) { |
206 | otg->set_vbus = devboard_isp1105_set_vbus; | 206 | kfree(phy); |
207 | return -ENOMEM; | ||
208 | } | ||
209 | |||
210 | phy->label = "ISP1105"; | ||
211 | phy->init = devboard_isp1105_init; | ||
212 | phy->otg->set_vbus = devboard_isp1105_set_vbus; | ||
207 | 213 | ||
208 | usbh1_pdata.otg = otg; | 214 | usbh1_pdata.otg = phy; |
209 | 215 | ||
210 | pdev = imx31_add_mxc_ehci_hs(1, &usbh1_pdata); | 216 | pdev = imx31_add_mxc_ehci_hs(1, &usbh1_pdata); |
211 | if (IS_ERR(pdev)) | 217 | if (IS_ERR(pdev)) |
diff --git a/arch/arm/mach-imx/mx31moboard-marxbot.c b/arch/arm/mach-imx/mx31moboard-marxbot.c index bb639cbda4e5..135c90e3a45f 100644 --- a/arch/arm/mach-imx/mx31moboard-marxbot.c +++ b/arch/arm/mach-imx/mx31moboard-marxbot.c | |||
@@ -272,7 +272,7 @@ static int marxbot_usbh1_hw_init(struct platform_device *pdev) | |||
272 | #define USBH1_VBUSEN_B IOMUX_TO_GPIO(MX31_PIN_NFRE_B) | 272 | #define USBH1_VBUSEN_B IOMUX_TO_GPIO(MX31_PIN_NFRE_B) |
273 | #define USBH1_MODE IOMUX_TO_GPIO(MX31_PIN_NFALE) | 273 | #define USBH1_MODE IOMUX_TO_GPIO(MX31_PIN_NFALE) |
274 | 274 | ||
275 | static int marxbot_isp1105_init(struct otg_transceiver *otg) | 275 | static int marxbot_isp1105_init(struct usb_phy *otg) |
276 | { | 276 | { |
277 | int ret = gpio_request(USBH1_MODE, "usbh1-mode"); | 277 | int ret = gpio_request(USBH1_MODE, "usbh1-mode"); |
278 | if (ret) | 278 | if (ret) |
@@ -291,7 +291,7 @@ static int marxbot_isp1105_init(struct otg_transceiver *otg) | |||
291 | } | 291 | } |
292 | 292 | ||
293 | 293 | ||
294 | static int marxbot_isp1105_set_vbus(struct otg_transceiver *otg, bool on) | 294 | static int marxbot_isp1105_set_vbus(struct usb_otg *otg, bool on) |
295 | { | 295 | { |
296 | if (on) | 296 | if (on) |
297 | gpio_set_value(USBH1_VBUSEN_B, 0); | 297 | gpio_set_value(USBH1_VBUSEN_B, 0); |
@@ -308,18 +308,24 @@ static struct mxc_usbh_platform_data usbh1_pdata __initdata = { | |||
308 | 308 | ||
309 | static int __init marxbot_usbh1_init(void) | 309 | static int __init marxbot_usbh1_init(void) |
310 | { | 310 | { |
311 | struct otg_transceiver *otg; | 311 | struct usb_phy *phy; |
312 | struct platform_device *pdev; | 312 | struct platform_device *pdev; |
313 | 313 | ||
314 | otg = kzalloc(sizeof(*otg), GFP_KERNEL); | 314 | phy = kzalloc(sizeof(*phy), GFP_KERNEL); |
315 | if (!otg) | 315 | if (!phy) |
316 | return -ENOMEM; | 316 | return -ENOMEM; |
317 | 317 | ||
318 | otg->label = "ISP1105"; | 318 | phy->otg = kzalloc(sizeof(struct usb_otg), GFP_KERNEL); |
319 | otg->init = marxbot_isp1105_init; | 319 | if (!phy->otg) { |
320 | otg->set_vbus = marxbot_isp1105_set_vbus; | 320 | kfree(phy); |
321 | return -ENOMEM; | ||
322 | } | ||
323 | |||
324 | phy->label = "ISP1105"; | ||
325 | phy->init = marxbot_isp1105_init; | ||
326 | phy->otg->set_vbus = marxbot_isp1105_set_vbus; | ||
321 | 327 | ||
322 | usbh1_pdata.otg = otg; | 328 | usbh1_pdata.otg = phy; |
323 | 329 | ||
324 | pdev = imx31_add_mxc_ehci_hs(1, &usbh1_pdata); | 330 | pdev = imx31_add_mxc_ehci_hs(1, &usbh1_pdata); |
325 | if (IS_ERR(pdev)) | 331 | if (IS_ERR(pdev)) |
diff --git a/arch/arm/mach-ks8695/leds.c b/arch/arm/mach-ks8695/leds.c index d6f6502ac9b5..4bd707547293 100644 --- a/arch/arm/mach-ks8695/leds.c +++ b/arch/arm/mach-ks8695/leds.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/gpio.h> | ||
15 | 14 | ||
16 | #include <asm/leds.h> | 15 | #include <asm/leds.h> |
17 | #include <mach/devices.h> | 16 | #include <mach/devices.h> |
diff --git a/arch/arm/mach-omap1/lcd_dma.c b/arch/arm/mach-omap1/lcd_dma.c index 453809359ba6..4c5ce7d829c2 100644 --- a/arch/arm/mach-omap1/lcd_dma.c +++ b/arch/arm/mach-omap1/lcd_dma.c | |||
@@ -117,7 +117,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_mirror); | |||
117 | void omap_set_lcd_dma_b1_vxres(unsigned long vxres) | 117 | void omap_set_lcd_dma_b1_vxres(unsigned long vxres) |
118 | { | 118 | { |
119 | if (cpu_is_omap15xx()) { | 119 | if (cpu_is_omap15xx()) { |
120 | printk(KERN_ERR "DMA virtual resulotion is not supported " | 120 | printk(KERN_ERR "DMA virtual resolution is not supported " |
121 | "in 1510 mode\n"); | 121 | "in 1510 mode\n"); |
122 | BUG(); | 122 | BUG(); |
123 | } | 123 | } |
diff --git a/arch/arm/mach-orion5x/db88f5281-setup.c b/arch/arm/mach-orion5x/db88f5281-setup.c index a104d5a80e11..e52108c9aaea 100644 --- a/arch/arm/mach-orion5x/db88f5281-setup.c +++ b/arch/arm/mach-orion5x/db88f5281-setup.c | |||
@@ -214,7 +214,7 @@ void __init db88f5281_pci_preinit(void) | |||
214 | if (gpio_direction_input(pin) == 0) { | 214 | if (gpio_direction_input(pin) == 0) { |
215 | irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); | 215 | irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); |
216 | } else { | 216 | } else { |
217 | printk(KERN_ERR "db88f5281_pci_preinit faield to " | 217 | printk(KERN_ERR "db88f5281_pci_preinit failed to " |
218 | "set_irq_type pin %d\n", pin); | 218 | "set_irq_type pin %d\n", pin); |
219 | gpio_free(pin); | 219 | gpio_free(pin); |
220 | } | 220 | } |
@@ -227,7 +227,7 @@ void __init db88f5281_pci_preinit(void) | |||
227 | if (gpio_direction_input(pin) == 0) { | 227 | if (gpio_direction_input(pin) == 0) { |
228 | irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); | 228 | irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); |
229 | } else { | 229 | } else { |
230 | printk(KERN_ERR "db88f5281_pci_preinit faield " | 230 | printk(KERN_ERR "db88f5281_pci_preinit failed " |
231 | "to set_irq_type pin %d\n", pin); | 231 | "to set_irq_type pin %d\n", pin); |
232 | gpio_free(pin); | 232 | gpio_free(pin); |
233 | } | 233 | } |
diff --git a/arch/arm/mach-orion5x/rd88f5182-setup.c b/arch/arm/mach-orion5x/rd88f5182-setup.c index 96438b6b2022..e3ce61711478 100644 --- a/arch/arm/mach-orion5x/rd88f5182-setup.c +++ b/arch/arm/mach-orion5x/rd88f5182-setup.c | |||
@@ -149,7 +149,7 @@ void __init rd88f5182_pci_preinit(void) | |||
149 | if (gpio_direction_input(pin) == 0) { | 149 | if (gpio_direction_input(pin) == 0) { |
150 | irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); | 150 | irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); |
151 | } else { | 151 | } else { |
152 | printk(KERN_ERR "rd88f5182_pci_preinit faield to " | 152 | printk(KERN_ERR "rd88f5182_pci_preinit failed to " |
153 | "set_irq_type pin %d\n", pin); | 153 | "set_irq_type pin %d\n", pin); |
154 | gpio_free(pin); | 154 | gpio_free(pin); |
155 | } | 155 | } |
@@ -162,7 +162,7 @@ void __init rd88f5182_pci_preinit(void) | |||
162 | if (gpio_direction_input(pin) == 0) { | 162 | if (gpio_direction_input(pin) == 0) { |
163 | irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); | 163 | irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); |
164 | } else { | 164 | } else { |
165 | printk(KERN_ERR "rd88f5182_pci_preinit faield to " | 165 | printk(KERN_ERR "rd88f5182_pci_preinit failed to " |
166 | "set_irq_type pin %d\n", pin); | 166 | "set_irq_type pin %d\n", pin); |
167 | gpio_free(pin); | 167 | gpio_free(pin); |
168 | } | 168 | } |
diff --git a/arch/arm/mach-pxa/pxa3xx-ulpi.c b/arch/arm/mach-pxa/pxa3xx-ulpi.c index e28dfb88827f..5ead6d480c6d 100644 --- a/arch/arm/mach-pxa/pxa3xx-ulpi.c +++ b/arch/arm/mach-pxa/pxa3xx-ulpi.c | |||
@@ -33,7 +33,7 @@ struct pxa3xx_u2d_ulpi { | |||
33 | struct clk *clk; | 33 | struct clk *clk; |
34 | void __iomem *mmio_base; | 34 | void __iomem *mmio_base; |
35 | 35 | ||
36 | struct otg_transceiver *otg; | 36 | struct usb_phy *otg; |
37 | unsigned int ulpi_mode; | 37 | unsigned int ulpi_mode; |
38 | }; | 38 | }; |
39 | 39 | ||
@@ -79,7 +79,7 @@ static int pxa310_ulpi_poll(void) | |||
79 | return -ETIMEDOUT; | 79 | return -ETIMEDOUT; |
80 | } | 80 | } |
81 | 81 | ||
82 | static int pxa310_ulpi_read(struct otg_transceiver *otg, u32 reg) | 82 | static int pxa310_ulpi_read(struct usb_phy *otg, u32 reg) |
83 | { | 83 | { |
84 | int err; | 84 | int err; |
85 | 85 | ||
@@ -98,7 +98,7 @@ static int pxa310_ulpi_read(struct otg_transceiver *otg, u32 reg) | |||
98 | return u2d_readl(U2DOTGUCR) & U2DOTGUCR_RDATA; | 98 | return u2d_readl(U2DOTGUCR) & U2DOTGUCR_RDATA; |
99 | } | 99 | } |
100 | 100 | ||
101 | static int pxa310_ulpi_write(struct otg_transceiver *otg, u32 val, u32 reg) | 101 | static int pxa310_ulpi_write(struct usb_phy *otg, u32 val, u32 reg) |
102 | { | 102 | { |
103 | if (pxa310_ulpi_get_phymode() != SYNCH) { | 103 | if (pxa310_ulpi_get_phymode() != SYNCH) { |
104 | pr_warning("%s: PHY is not in SYNCH mode!\n", __func__); | 104 | pr_warning("%s: PHY is not in SYNCH mode!\n", __func__); |
@@ -111,7 +111,7 @@ static int pxa310_ulpi_write(struct otg_transceiver *otg, u32 val, u32 reg) | |||
111 | return pxa310_ulpi_poll(); | 111 | return pxa310_ulpi_poll(); |
112 | } | 112 | } |
113 | 113 | ||
114 | struct otg_io_access_ops pxa310_ulpi_access_ops = { | 114 | struct usb_phy_io_ops pxa310_ulpi_access_ops = { |
115 | .read = pxa310_ulpi_read, | 115 | .read = pxa310_ulpi_read, |
116 | .write = pxa310_ulpi_write, | 116 | .write = pxa310_ulpi_write, |
117 | }; | 117 | }; |
@@ -139,19 +139,19 @@ static int pxa310_start_otg_host_transcvr(struct usb_bus *host) | |||
139 | 139 | ||
140 | pxa310_otg_transceiver_rtsm(); | 140 | pxa310_otg_transceiver_rtsm(); |
141 | 141 | ||
142 | err = otg_init(u2d->otg); | 142 | err = usb_phy_init(u2d->otg); |
143 | if (err) { | 143 | if (err) { |
144 | pr_err("OTG transceiver init failed"); | 144 | pr_err("OTG transceiver init failed"); |
145 | return err; | 145 | return err; |
146 | } | 146 | } |
147 | 147 | ||
148 | err = otg_set_vbus(u2d->otg, 1); | 148 | err = otg_set_vbus(u2d->otg->otg, 1); |
149 | if (err) { | 149 | if (err) { |
150 | pr_err("OTG transceiver VBUS set failed"); | 150 | pr_err("OTG transceiver VBUS set failed"); |
151 | return err; | 151 | return err; |
152 | } | 152 | } |
153 | 153 | ||
154 | err = otg_set_host(u2d->otg, host); | 154 | err = otg_set_host(u2d->otg->otg, host); |
155 | if (err) | 155 | if (err) |
156 | pr_err("OTG transceiver Host mode set failed"); | 156 | pr_err("OTG transceiver Host mode set failed"); |
157 | 157 | ||
@@ -189,9 +189,9 @@ static void pxa310_stop_otg_hc(void) | |||
189 | { | 189 | { |
190 | pxa310_otg_transceiver_rtsm(); | 190 | pxa310_otg_transceiver_rtsm(); |
191 | 191 | ||
192 | otg_set_host(u2d->otg, NULL); | 192 | otg_set_host(u2d->otg->otg, NULL); |
193 | otg_set_vbus(u2d->otg, 0); | 193 | otg_set_vbus(u2d->otg->otg, 0); |
194 | otg_shutdown(u2d->otg); | 194 | usb_phy_shutdown(u2d->otg); |
195 | } | 195 | } |
196 | 196 | ||
197 | static void pxa310_u2d_setup_otg_hc(void) | 197 | static void pxa310_u2d_setup_otg_hc(void) |
diff --git a/arch/arm/mach-spear3xx/spear300.c b/arch/arm/mach-spear3xx/spear300.c index a5e46b4ade20..4f7f5182dd4d 100644 --- a/arch/arm/mach-spear3xx/spear300.c +++ b/arch/arm/mach-spear3xx/spear300.c | |||
@@ -469,7 +469,7 @@ void __init spear300_init(struct pmx_mode *pmx_mode, struct pmx_dev **pmx_devs, | |||
469 | if (pmx_driver.base) { | 469 | if (pmx_driver.base) { |
470 | ret = pmx_register(&pmx_driver); | 470 | ret = pmx_register(&pmx_driver); |
471 | if (ret) | 471 | if (ret) |
472 | printk(KERN_ERR "padmux: registeration failed. err no" | 472 | printk(KERN_ERR "padmux: registration failed. err no" |
473 | ": %d\n", ret); | 473 | ": %d\n", ret); |
474 | /* Free Mapping, device selection already done */ | 474 | /* Free Mapping, device selection already done */ |
475 | iounmap(pmx_driver.base); | 475 | iounmap(pmx_driver.base); |
diff --git a/arch/arm/mach-spear3xx/spear310.c b/arch/arm/mach-spear3xx/spear310.c index 9004cf9f01bf..febaa6fcfb6a 100644 --- a/arch/arm/mach-spear3xx/spear310.c +++ b/arch/arm/mach-spear3xx/spear310.c | |||
@@ -303,6 +303,6 @@ void __init spear310_init(struct pmx_mode *pmx_mode, struct pmx_dev **pmx_devs, | |||
303 | 303 | ||
304 | ret = pmx_register(&pmx_driver); | 304 | ret = pmx_register(&pmx_driver); |
305 | if (ret) | 305 | if (ret) |
306 | printk(KERN_ERR "padmux: registeration failed. err no: %d\n", | 306 | printk(KERN_ERR "padmux: registration failed. err no: %d\n", |
307 | ret); | 307 | ret); |
308 | } | 308 | } |
diff --git a/arch/arm/mach-spear3xx/spear320.c b/arch/arm/mach-spear3xx/spear320.c index ee29bef43074..deaaf199612c 100644 --- a/arch/arm/mach-spear3xx/spear320.c +++ b/arch/arm/mach-spear3xx/spear320.c | |||
@@ -550,6 +550,6 @@ void __init spear320_init(struct pmx_mode *pmx_mode, struct pmx_dev **pmx_devs, | |||
550 | 550 | ||
551 | ret = pmx_register(&pmx_driver); | 551 | ret = pmx_register(&pmx_driver); |
552 | if (ret) | 552 | if (ret) |
553 | printk(KERN_ERR "padmux: registeration failed. err no: %d\n", | 553 | printk(KERN_ERR "padmux: registration failed. err no: %d\n", |
554 | ret); | 554 | ret); |
555 | } | 555 | } |
diff --git a/arch/arm/mach-tegra/include/mach/usb_phy.h b/arch/arm/mach-tegra/include/mach/usb_phy.h index d4b8f9e298a8..de1a0f602b28 100644 --- a/arch/arm/mach-tegra/include/mach/usb_phy.h +++ b/arch/arm/mach-tegra/include/mach/usb_phy.h | |||
@@ -58,7 +58,7 @@ struct tegra_usb_phy { | |||
58 | struct clk *pad_clk; | 58 | struct clk *pad_clk; |
59 | enum tegra_usb_phy_mode mode; | 59 | enum tegra_usb_phy_mode mode; |
60 | void *config; | 60 | void *config; |
61 | struct otg_transceiver *ulpi; | 61 | struct usb_phy *ulpi; |
62 | }; | 62 | }; |
63 | 63 | ||
64 | struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs, | 64 | struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs, |
diff --git a/arch/arm/mach-tegra/usb_phy.c b/arch/arm/mach-tegra/usb_phy.c index 37576a721aeb..ad321f9e2bb8 100644 --- a/arch/arm/mach-tegra/usb_phy.c +++ b/arch/arm/mach-tegra/usb_phy.c | |||
@@ -608,13 +608,13 @@ static int ulpi_phy_power_on(struct tegra_usb_phy *phy) | |||
608 | writel(val, base + ULPI_TIMING_CTRL_1); | 608 | writel(val, base + ULPI_TIMING_CTRL_1); |
609 | 609 | ||
610 | /* Fix VbusInvalid due to floating VBUS */ | 610 | /* Fix VbusInvalid due to floating VBUS */ |
611 | ret = otg_io_write(phy->ulpi, 0x40, 0x08); | 611 | ret = usb_phy_io_write(phy->ulpi, 0x40, 0x08); |
612 | if (ret) { | 612 | if (ret) { |
613 | pr_err("%s: ulpi write failed\n", __func__); | 613 | pr_err("%s: ulpi write failed\n", __func__); |
614 | return ret; | 614 | return ret; |
615 | } | 615 | } |
616 | 616 | ||
617 | ret = otg_io_write(phy->ulpi, 0x80, 0x0B); | 617 | ret = usb_phy_io_write(phy->ulpi, 0x80, 0x0B); |
618 | if (ret) { | 618 | if (ret) { |
619 | pr_err("%s: ulpi write failed\n", __func__); | 619 | pr_err("%s: ulpi write failed\n", __func__); |
620 | return ret; | 620 | return ret; |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 5dc7d127a40f..245a55a0a5bb 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -32,7 +32,6 @@ | |||
32 | 32 | ||
33 | #include <asm/mach/arch.h> | 33 | #include <asm/mach/arch.h> |
34 | #include <asm/mach/map.h> | 34 | #include <asm/mach/map.h> |
35 | #include <asm/memblock.h> | ||
36 | 35 | ||
37 | #include "mm.h" | 36 | #include "mm.h" |
38 | 37 | ||
diff --git a/arch/arm/plat-mxc/include/mach/mxc_ehci.h b/arch/arm/plat-mxc/include/mach/mxc_ehci.h index 2c159dc2398b..9ffd1bbe615f 100644 --- a/arch/arm/plat-mxc/include/mach/mxc_ehci.h +++ b/arch/arm/plat-mxc/include/mach/mxc_ehci.h | |||
@@ -44,7 +44,7 @@ struct mxc_usbh_platform_data { | |||
44 | int (*exit)(struct platform_device *pdev); | 44 | int (*exit)(struct platform_device *pdev); |
45 | 45 | ||
46 | unsigned int portsc; | 46 | unsigned int portsc; |
47 | struct otg_transceiver *otg; | 47 | struct usb_phy *otg; |
48 | }; | 48 | }; |
49 | 49 | ||
50 | int mx51_initialize_usb_hw(int port, unsigned int flags); | 50 | int mx51_initialize_usb_hw(int port, unsigned int flags); |
diff --git a/arch/arm/plat-mxc/include/mach/ulpi.h b/arch/arm/plat-mxc/include/mach/ulpi.h index f9161c96d7bd..42bdaca6d7d9 100644 --- a/arch/arm/plat-mxc/include/mach/ulpi.h +++ b/arch/arm/plat-mxc/include/mach/ulpi.h | |||
@@ -2,15 +2,15 @@ | |||
2 | #define __MACH_ULPI_H | 2 | #define __MACH_ULPI_H |
3 | 3 | ||
4 | #ifdef CONFIG_USB_ULPI | 4 | #ifdef CONFIG_USB_ULPI |
5 | struct otg_transceiver *imx_otg_ulpi_create(unsigned int flags); | 5 | struct usb_phy *imx_otg_ulpi_create(unsigned int flags); |
6 | #else | 6 | #else |
7 | static inline struct otg_transceiver *imx_otg_ulpi_create(unsigned int flags) | 7 | static inline struct usb_phy *imx_otg_ulpi_create(unsigned int flags) |
8 | { | 8 | { |
9 | return NULL; | 9 | return NULL; |
10 | } | 10 | } |
11 | #endif | 11 | #endif |
12 | 12 | ||
13 | extern struct otg_io_access_ops mxc_ulpi_access_ops; | 13 | extern struct usb_phy_io_ops mxc_ulpi_access_ops; |
14 | 14 | ||
15 | #endif /* __MACH_ULPI_H */ | 15 | #endif /* __MACH_ULPI_H */ |
16 | 16 | ||
diff --git a/arch/arm/plat-mxc/ulpi.c b/arch/arm/plat-mxc/ulpi.c index 477e45bea1be..d2963427184f 100644 --- a/arch/arm/plat-mxc/ulpi.c +++ b/arch/arm/plat-mxc/ulpi.c | |||
@@ -58,7 +58,7 @@ static int ulpi_poll(void __iomem *view, u32 bit) | |||
58 | return -ETIMEDOUT; | 58 | return -ETIMEDOUT; |
59 | } | 59 | } |
60 | 60 | ||
61 | static int ulpi_read(struct otg_transceiver *otg, u32 reg) | 61 | static int ulpi_read(struct usb_phy *otg, u32 reg) |
62 | { | 62 | { |
63 | int ret; | 63 | int ret; |
64 | void __iomem *view = otg->io_priv; | 64 | void __iomem *view = otg->io_priv; |
@@ -84,7 +84,7 @@ static int ulpi_read(struct otg_transceiver *otg, u32 reg) | |||
84 | return (__raw_readl(view) >> ULPIVW_RDATA_SHIFT) & ULPIVW_RDATA_MASK; | 84 | return (__raw_readl(view) >> ULPIVW_RDATA_SHIFT) & ULPIVW_RDATA_MASK; |
85 | } | 85 | } |
86 | 86 | ||
87 | static int ulpi_write(struct otg_transceiver *otg, u32 val, u32 reg) | 87 | static int ulpi_write(struct usb_phy *otg, u32 val, u32 reg) |
88 | { | 88 | { |
89 | int ret; | 89 | int ret; |
90 | void __iomem *view = otg->io_priv; | 90 | void __iomem *view = otg->io_priv; |
@@ -106,13 +106,13 @@ static int ulpi_write(struct otg_transceiver *otg, u32 val, u32 reg) | |||
106 | return ulpi_poll(view, ULPIVW_RUN); | 106 | return ulpi_poll(view, ULPIVW_RUN); |
107 | } | 107 | } |
108 | 108 | ||
109 | struct otg_io_access_ops mxc_ulpi_access_ops = { | 109 | struct usb_phy_io_ops mxc_ulpi_access_ops = { |
110 | .read = ulpi_read, | 110 | .read = ulpi_read, |
111 | .write = ulpi_write, | 111 | .write = ulpi_write, |
112 | }; | 112 | }; |
113 | EXPORT_SYMBOL_GPL(mxc_ulpi_access_ops); | 113 | EXPORT_SYMBOL_GPL(mxc_ulpi_access_ops); |
114 | 114 | ||
115 | struct otg_transceiver *imx_otg_ulpi_create(unsigned int flags) | 115 | struct usb_phy *imx_otg_ulpi_create(unsigned int flags) |
116 | { | 116 | { |
117 | return otg_ulpi_create(&mxc_ulpi_access_ops, flags); | 117 | return otg_ulpi_create(&mxc_ulpi_access_ops, flags); |
118 | } | 118 | } |
diff --git a/arch/arm/plat-s3c24xx/pm-simtec.c b/arch/arm/plat-s3c24xx/pm-simtec.c index 68296b1fe7e5..699f93171297 100644 --- a/arch/arm/plat-s3c24xx/pm-simtec.c +++ b/arch/arm/plat-s3c24xx/pm-simtec.c | |||
@@ -52,7 +52,7 @@ static __init int pm_simtec_init(void) | |||
52 | !machine_is_aml_m5900()) | 52 | !machine_is_aml_m5900()) |
53 | return 0; | 53 | return 0; |
54 | 54 | ||
55 | printk(KERN_INFO "Simtec Board Power Manangement" COPYRIGHT "\n"); | 55 | printk(KERN_INFO "Simtec Board Power Management" COPYRIGHT "\n"); |
56 | 56 | ||
57 | gstatus4 = (__raw_readl(S3C2410_BANKCON7) & 0x3) << 30; | 57 | gstatus4 = (__raw_readl(S3C2410_BANKCON7) & 0x3) << 30; |
58 | gstatus4 |= (__raw_readl(S3C2410_BANKCON6) & 0x3) << 28; | 58 | gstatus4 |= (__raw_readl(S3C2410_BANKCON6) & 0x3) << 28; |
diff --git a/arch/avr32/include/asm/socket.h b/arch/avr32/include/asm/socket.h index 247b88c760be..a473f8c6a9aa 100644 --- a/arch/avr32/include/asm/socket.h +++ b/arch/avr32/include/asm/socket.h | |||
@@ -64,5 +64,9 @@ | |||
64 | 64 | ||
65 | #define SO_WIFI_STATUS 41 | 65 | #define SO_WIFI_STATUS 41 |
66 | #define SCM_WIFI_STATUS SO_WIFI_STATUS | 66 | #define SCM_WIFI_STATUS SO_WIFI_STATUS |
67 | #define SO_PEEK_OFF 42 | ||
68 | |||
69 | /* Instruct lower device to use last 4-bytes of skb data as FCS */ | ||
70 | #define SO_NOFCS 43 | ||
67 | 71 | ||
68 | #endif /* __ASM_AVR32_SOCKET_H */ | 72 | #endif /* __ASM_AVR32_SOCKET_H */ |
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c index ea3395750324..92c5af98a6f7 100644 --- a/arch/avr32/kernel/process.c +++ b/arch/avr32/kernel/process.c | |||
@@ -40,9 +40,7 @@ void cpu_idle(void) | |||
40 | cpu_idle_sleep(); | 40 | cpu_idle_sleep(); |
41 | rcu_idle_exit(); | 41 | rcu_idle_exit(); |
42 | tick_nohz_idle_exit(); | 42 | tick_nohz_idle_exit(); |
43 | preempt_enable_no_resched(); | 43 | schedule_preempt_disabled(); |
44 | schedule(); | ||
45 | preempt_disable(); | ||
46 | } | 44 | } |
47 | } | 45 | } |
48 | 46 | ||
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index 8dd0416673cb..a80a643f3691 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c | |||
@@ -94,9 +94,7 @@ void cpu_idle(void) | |||
94 | idle(); | 94 | idle(); |
95 | rcu_idle_exit(); | 95 | rcu_idle_exit(); |
96 | tick_nohz_idle_exit(); | 96 | tick_nohz_idle_exit(); |
97 | preempt_enable_no_resched(); | 97 | schedule_preempt_disabled(); |
98 | schedule(); | ||
99 | preempt_disable(); | ||
100 | } | 98 | } |
101 | } | 99 | } |
102 | 100 | ||
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c index a2d96d31bbf1..a17395727efa 100644 --- a/arch/blackfin/mach-bf518/boards/ezbrd.c +++ b/arch/blackfin/mach-bf518/boards/ezbrd.c | |||
@@ -821,7 +821,7 @@ void native_machine_restart(char *cmd) | |||
821 | bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS); | 821 | bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS); |
822 | } | 822 | } |
823 | 823 | ||
824 | void bfin_get_ether_addr(char *addr) | 824 | int bfin_get_ether_addr(char *addr) |
825 | { | 825 | { |
826 | /* the MAC is stored in OTP memory page 0xDF */ | 826 | /* the MAC is stored in OTP memory page 0xDF */ |
827 | u32 ret; | 827 | u32 ret; |
@@ -834,5 +834,6 @@ void bfin_get_ether_addr(char *addr) | |||
834 | for (ret = 0; ret < 6; ++ret) | 834 | for (ret = 0; ret < 6; ++ret) |
835 | addr[ret] = otp_mac_p[5 - ret]; | 835 | addr[ret] = otp_mac_p[5 - ret]; |
836 | } | 836 | } |
837 | return 0; | ||
837 | } | 838 | } |
838 | EXPORT_SYMBOL(bfin_get_ether_addr); | 839 | EXPORT_SYMBOL(bfin_get_ether_addr); |
diff --git a/arch/blackfin/mach-bf518/boards/tcm-bf518.c b/arch/blackfin/mach-bf518/boards/tcm-bf518.c index f271310f739d..6eebee4e4217 100644 --- a/arch/blackfin/mach-bf518/boards/tcm-bf518.c +++ b/arch/blackfin/mach-bf518/boards/tcm-bf518.c | |||
@@ -730,9 +730,8 @@ void native_machine_restart(char *cmd) | |||
730 | bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS); | 730 | bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS); |
731 | } | 731 | } |
732 | 732 | ||
733 | void bfin_get_ether_addr(char *addr) | 733 | int bfin_get_ether_addr(char *addr) |
734 | { | 734 | { |
735 | random_ether_addr(addr); | 735 | return 1; |
736 | printk(KERN_WARNING "%s:%s: Setting Ethernet MAC to a random one\n", __FILE__, __func__); | ||
737 | } | 736 | } |
738 | EXPORT_SYMBOL(bfin_get_ether_addr); | 737 | EXPORT_SYMBOL(bfin_get_ether_addr); |
diff --git a/arch/blackfin/mach-bf527/boards/ad7160eval.c b/arch/blackfin/mach-bf527/boards/ad7160eval.c index c8d5d2b7c732..fad7fea1b0bf 100644 --- a/arch/blackfin/mach-bf527/boards/ad7160eval.c +++ b/arch/blackfin/mach-bf527/boards/ad7160eval.c | |||
@@ -846,7 +846,7 @@ void native_machine_restart(char *cmd) | |||
846 | bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS); | 846 | bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS); |
847 | } | 847 | } |
848 | 848 | ||
849 | void bfin_get_ether_addr(char *addr) | 849 | int bfin_get_ether_addr(char *addr) |
850 | { | 850 | { |
851 | /* the MAC is stored in OTP memory page 0xDF */ | 851 | /* the MAC is stored in OTP memory page 0xDF */ |
852 | u32 ret; | 852 | u32 ret; |
@@ -859,5 +859,6 @@ void bfin_get_ether_addr(char *addr) | |||
859 | for (ret = 0; ret < 6; ++ret) | 859 | for (ret = 0; ret < 6; ++ret) |
860 | addr[ret] = otp_mac_p[5 - ret]; | 860 | addr[ret] = otp_mac_p[5 - ret]; |
861 | } | 861 | } |
862 | return 0; | ||
862 | } | 863 | } |
863 | EXPORT_SYMBOL(bfin_get_ether_addr); | 864 | EXPORT_SYMBOL(bfin_get_ether_addr); |
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c index 7330607856e9..65b7fbd30e16 100644 --- a/arch/blackfin/mach-bf527/boards/cm_bf527.c +++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c | |||
@@ -983,9 +983,8 @@ void native_machine_restart(char *cmd) | |||
983 | bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS); | 983 | bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS); |
984 | } | 984 | } |
985 | 985 | ||
986 | void bfin_get_ether_addr(char *addr) | 986 | int bfin_get_ether_addr(char *addr) |
987 | { | 987 | { |
988 | random_ether_addr(addr); | 988 | return 1; |
989 | printk(KERN_WARNING "%s:%s: Setting Ethernet MAC to a random one\n", __FILE__, __func__); | ||
990 | } | 989 | } |
991 | EXPORT_SYMBOL(bfin_get_ether_addr); | 990 | EXPORT_SYMBOL(bfin_get_ether_addr); |
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c index db3ecfce8306..17c6a24cc076 100644 --- a/arch/blackfin/mach-bf527/boards/ezbrd.c +++ b/arch/blackfin/mach-bf527/boards/ezbrd.c | |||
@@ -870,7 +870,7 @@ void native_machine_restart(char *cmd) | |||
870 | bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS); | 870 | bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS); |
871 | } | 871 | } |
872 | 872 | ||
873 | void bfin_get_ether_addr(char *addr) | 873 | int bfin_get_ether_addr(char *addr) |
874 | { | 874 | { |
875 | /* the MAC is stored in OTP memory page 0xDF */ | 875 | /* the MAC is stored in OTP memory page 0xDF */ |
876 | u32 ret; | 876 | u32 ret; |
@@ -883,5 +883,6 @@ void bfin_get_ether_addr(char *addr) | |||
883 | for (ret = 0; ret < 6; ++ret) | 883 | for (ret = 0; ret < 6; ++ret) |
884 | addr[ret] = otp_mac_p[5 - ret]; | 884 | addr[ret] = otp_mac_p[5 - ret]; |
885 | } | 885 | } |
886 | return 0; | ||
886 | } | 887 | } |
887 | EXPORT_SYMBOL(bfin_get_ether_addr); | 888 | EXPORT_SYMBOL(bfin_get_ether_addr); |
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c index dfdd8e6bac72..2f9a2bd83ce4 100644 --- a/arch/blackfin/mach-bf527/boards/ezkit.c +++ b/arch/blackfin/mach-bf527/boards/ezkit.c | |||
@@ -1311,7 +1311,7 @@ void native_machine_restart(char *cmd) | |||
1311 | bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS); | 1311 | bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS); |
1312 | } | 1312 | } |
1313 | 1313 | ||
1314 | void bfin_get_ether_addr(char *addr) | 1314 | int bfin_get_ether_addr(char *addr) |
1315 | { | 1315 | { |
1316 | /* the MAC is stored in OTP memory page 0xDF */ | 1316 | /* the MAC is stored in OTP memory page 0xDF */ |
1317 | u32 ret; | 1317 | u32 ret; |
@@ -1324,5 +1324,6 @@ void bfin_get_ether_addr(char *addr) | |||
1324 | for (ret = 0; ret < 6; ++ret) | 1324 | for (ret = 0; ret < 6; ++ret) |
1325 | addr[ret] = otp_mac_p[5 - ret]; | 1325 | addr[ret] = otp_mac_p[5 - ret]; |
1326 | } | 1326 | } |
1327 | return 0; | ||
1327 | } | 1328 | } |
1328 | EXPORT_SYMBOL(bfin_get_ether_addr); | 1329 | EXPORT_SYMBOL(bfin_get_ether_addr); |
diff --git a/arch/blackfin/mach-bf527/boards/tll6527m.c b/arch/blackfin/mach-bf527/boards/tll6527m.c index 360e97fc5293..d192c0ac941c 100644 --- a/arch/blackfin/mach-bf527/boards/tll6527m.c +++ b/arch/blackfin/mach-bf527/boards/tll6527m.c | |||
@@ -931,7 +931,7 @@ void native_machine_restart(char *cmd) | |||
931 | bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS); | 931 | bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS); |
932 | } | 932 | } |
933 | 933 | ||
934 | void bfin_get_ether_addr(char *addr) | 934 | int bfin_get_ether_addr(char *addr) |
935 | { | 935 | { |
936 | /* the MAC is stored in OTP memory page 0xDF */ | 936 | /* the MAC is stored in OTP memory page 0xDF */ |
937 | u32 ret; | 937 | u32 ret; |
@@ -945,5 +945,6 @@ void bfin_get_ether_addr(char *addr) | |||
945 | for (ret = 0; ret < 6; ++ret) | 945 | for (ret = 0; ret < 6; ++ret) |
946 | addr[ret] = otp_mac_p[5 - ret]; | 946 | addr[ret] = otp_mac_p[5 - ret]; |
947 | } | 947 | } |
948 | return 0; | ||
948 | } | 949 | } |
949 | EXPORT_SYMBOL(bfin_get_ether_addr); | 950 | EXPORT_SYMBOL(bfin_get_ether_addr); |
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c index 0d4a2f61a973..27fd2c32ae9a 100644 --- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c +++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c | |||
@@ -813,9 +813,8 @@ void __init native_machine_early_platform_add_devices(void) | |||
813 | ARRAY_SIZE(cm_bf537e_early_devices)); | 813 | ARRAY_SIZE(cm_bf537e_early_devices)); |
814 | } | 814 | } |
815 | 815 | ||
816 | void bfin_get_ether_addr(char *addr) | 816 | int bfin_get_ether_addr(char *addr) |
817 | { | 817 | { |
818 | random_ether_addr(addr); | 818 | return 1; |
819 | printk(KERN_WARNING "%s:%s: Setting Ethernet MAC to a random one\n", __FILE__, __func__); | ||
820 | } | 819 | } |
821 | EXPORT_SYMBOL(bfin_get_ether_addr); | 820 | EXPORT_SYMBOL(bfin_get_ether_addr); |
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c index f5536982706c..3f3abad86ec3 100644 --- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c +++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c | |||
@@ -790,9 +790,8 @@ void __init native_machine_early_platform_add_devices(void) | |||
790 | ARRAY_SIZE(cm_bf537u_early_devices)); | 790 | ARRAY_SIZE(cm_bf537u_early_devices)); |
791 | } | 791 | } |
792 | 792 | ||
793 | void bfin_get_ether_addr(char *addr) | 793 | int bfin_get_ether_addr(char *addr) |
794 | { | 794 | { |
795 | random_ether_addr(addr); | 795 | return 1; |
796 | printk(KERN_WARNING "%s:%s: Setting Ethernet MAC to a random one\n", __FILE__, __func__); | ||
797 | } | 796 | } |
798 | EXPORT_SYMBOL(bfin_get_ether_addr); | 797 | EXPORT_SYMBOL(bfin_get_ether_addr); |
diff --git a/arch/blackfin/mach-bf537/boards/dnp5370.c b/arch/blackfin/mach-bf537/boards/dnp5370.c index 11dadeb33d79..6f77bf708ec0 100644 --- a/arch/blackfin/mach-bf537/boards/dnp5370.c +++ b/arch/blackfin/mach-bf537/boards/dnp5370.c | |||
@@ -399,9 +399,10 @@ arch_initcall(dnp5370_init); | |||
399 | /* | 399 | /* |
400 | * Currently the MAC address is saved in Flash by U-Boot | 400 | * Currently the MAC address is saved in Flash by U-Boot |
401 | */ | 401 | */ |
402 | void bfin_get_ether_addr(char *addr) | 402 | int bfin_get_ether_addr(char *addr) |
403 | { | 403 | { |
404 | *(u32 *)(&(addr[0])) = bfin_read32(FLASH_MAC); | 404 | *(u32 *)(&(addr[0])) = bfin_read32(FLASH_MAC); |
405 | *(u16 *)(&(addr[4])) = bfin_read16(FLASH_MAC + 4); | 405 | *(u16 *)(&(addr[4])) = bfin_read16(FLASH_MAC + 4); |
406 | return 0; | ||
406 | } | 407 | } |
407 | EXPORT_SYMBOL(bfin_get_ether_addr); | 408 | EXPORT_SYMBOL(bfin_get_ether_addr); |
diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c index 6fd84709fc68..e9507feea319 100644 --- a/arch/blackfin/mach-bf537/boards/pnav10.c +++ b/arch/blackfin/mach-bf537/boards/pnav10.c | |||
@@ -535,9 +535,8 @@ void __init native_machine_early_platform_add_devices(void) | |||
535 | ARRAY_SIZE(stamp_early_devices)); | 535 | ARRAY_SIZE(stamp_early_devices)); |
536 | } | 536 | } |
537 | 537 | ||
538 | void bfin_get_ether_addr(char *addr) | 538 | int bfin_get_ether_addr(char *addr) |
539 | { | 539 | { |
540 | random_ether_addr(addr); | 540 | return 1; |
541 | printk(KERN_WARNING "%s:%s: Setting Ethernet MAC to a random one\n", __FILE__, __func__); | ||
542 | } | 541 | } |
543 | EXPORT_SYMBOL(bfin_get_ether_addr); | 542 | EXPORT_SYMBOL(bfin_get_ether_addr); |
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 2221173e489e..0b807253f4d6 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c | |||
@@ -2993,9 +2993,10 @@ void native_machine_restart(char *cmd) | |||
2993 | * Currently the MAC address is saved in Flash by U-Boot | 2993 | * Currently the MAC address is saved in Flash by U-Boot |
2994 | */ | 2994 | */ |
2995 | #define FLASH_MAC 0x203f0000 | 2995 | #define FLASH_MAC 0x203f0000 |
2996 | void bfin_get_ether_addr(char *addr) | 2996 | int bfin_get_ether_addr(char *addr) |
2997 | { | 2997 | { |
2998 | *(u32 *)(&(addr[0])) = bfin_read32(FLASH_MAC); | 2998 | *(u32 *)(&(addr[0])) = bfin_read32(FLASH_MAC); |
2999 | *(u16 *)(&(addr[4])) = bfin_read16(FLASH_MAC + 4); | 2999 | *(u16 *)(&(addr[4])) = bfin_read16(FLASH_MAC + 4); |
3000 | return 0; | ||
3000 | } | 3001 | } |
3001 | EXPORT_SYMBOL(bfin_get_ether_addr); | 3002 | EXPORT_SYMBOL(bfin_get_ether_addr); |
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c index 988517671a5d..3fb421823857 100644 --- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c +++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c | |||
@@ -780,9 +780,8 @@ void __init native_machine_early_platform_add_devices(void) | |||
780 | ARRAY_SIZE(cm_bf537_early_devices)); | 780 | ARRAY_SIZE(cm_bf537_early_devices)); |
781 | } | 781 | } |
782 | 782 | ||
783 | void bfin_get_ether_addr(char *addr) | 783 | int bfin_get_ether_addr(char *addr) |
784 | { | 784 | { |
785 | random_ether_addr(addr); | 785 | return 1; |
786 | printk(KERN_WARNING "%s:%s: Setting Ethernet MAC to a random one\n", __FILE__, __func__); | ||
787 | } | 786 | } |
788 | EXPORT_SYMBOL(bfin_get_ether_addr); | 787 | EXPORT_SYMBOL(bfin_get_ether_addr); |
diff --git a/arch/cris/include/asm/socket.h b/arch/cris/include/asm/socket.h index e269264df7c4..ae52825021af 100644 --- a/arch/cris/include/asm/socket.h +++ b/arch/cris/include/asm/socket.h | |||
@@ -66,6 +66,10 @@ | |||
66 | 66 | ||
67 | #define SO_WIFI_STATUS 41 | 67 | #define SO_WIFI_STATUS 41 |
68 | #define SCM_WIFI_STATUS SO_WIFI_STATUS | 68 | #define SCM_WIFI_STATUS SO_WIFI_STATUS |
69 | #define SO_PEEK_OFF 42 | ||
70 | |||
71 | /* Instruct lower device to use last 4-bytes of skb data as FCS */ | ||
72 | #define SO_NOFCS 43 | ||
69 | 73 | ||
70 | #endif /* _ASM_SOCKET_H */ | 74 | #endif /* _ASM_SOCKET_H */ |
71 | 75 | ||
diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c index aa585e4e979e..d8f50ff6fadd 100644 --- a/arch/cris/kernel/process.c +++ b/arch/cris/kernel/process.c | |||
@@ -115,9 +115,7 @@ void cpu_idle (void) | |||
115 | idle = default_idle; | 115 | idle = default_idle; |
116 | idle(); | 116 | idle(); |
117 | } | 117 | } |
118 | preempt_enable_no_resched(); | 118 | schedule_preempt_disabled(); |
119 | schedule(); | ||
120 | preempt_disable(); | ||
121 | } | 119 | } |
122 | } | 120 | } |
123 | 121 | ||
diff --git a/arch/frv/include/asm/perf_event.h b/arch/frv/include/asm/perf_event.h index a69e0155d146..c52ea5546b5b 100644 --- a/arch/frv/include/asm/perf_event.h +++ b/arch/frv/include/asm/perf_event.h | |||
@@ -12,6 +12,4 @@ | |||
12 | #ifndef _ASM_PERF_EVENT_H | 12 | #ifndef _ASM_PERF_EVENT_H |
13 | #define _ASM_PERF_EVENT_H | 13 | #define _ASM_PERF_EVENT_H |
14 | 14 | ||
15 | #define PERF_EVENT_INDEX_OFFSET 0 | ||
16 | |||
17 | #endif /* _ASM_PERF_EVENT_H */ | 15 | #endif /* _ASM_PERF_EVENT_H */ |
diff --git a/arch/frv/include/asm/socket.h b/arch/frv/include/asm/socket.h index ce80fdadcce5..a5b1d7dbb205 100644 --- a/arch/frv/include/asm/socket.h +++ b/arch/frv/include/asm/socket.h | |||
@@ -64,6 +64,10 @@ | |||
64 | 64 | ||
65 | #define SO_WIFI_STATUS 41 | 65 | #define SO_WIFI_STATUS 41 |
66 | #define SCM_WIFI_STATUS SO_WIFI_STATUS | 66 | #define SCM_WIFI_STATUS SO_WIFI_STATUS |
67 | #define SO_PEEK_OFF 42 | ||
68 | |||
69 | /* Instruct lower device to use last 4-bytes of skb data as FCS */ | ||
70 | #define SO_NOFCS 43 | ||
67 | 71 | ||
68 | #endif /* _ASM_SOCKET_H */ | 72 | #endif /* _ASM_SOCKET_H */ |
69 | 73 | ||
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c index 3901df1213c0..29cc49783787 100644 --- a/arch/frv/kernel/process.c +++ b/arch/frv/kernel/process.c | |||
@@ -92,9 +92,7 @@ void cpu_idle(void) | |||
92 | idle(); | 92 | idle(); |
93 | } | 93 | } |
94 | 94 | ||
95 | preempt_enable_no_resched(); | 95 | schedule_preempt_disabled(); |
96 | schedule(); | ||
97 | preempt_disable(); | ||
98 | } | 96 | } |
99 | } | 97 | } |
100 | 98 | ||
diff --git a/arch/h8300/include/asm/socket.h b/arch/h8300/include/asm/socket.h index cf1daab6f27e..ec4554e7b04b 100644 --- a/arch/h8300/include/asm/socket.h +++ b/arch/h8300/include/asm/socket.h | |||
@@ -64,5 +64,9 @@ | |||
64 | 64 | ||
65 | #define SO_WIFI_STATUS 41 | 65 | #define SO_WIFI_STATUS 41 |
66 | #define SCM_WIFI_STATUS SO_WIFI_STATUS | 66 | #define SCM_WIFI_STATUS SO_WIFI_STATUS |
67 | #define SO_PEEK_OFF 42 | ||
68 | |||
69 | /* Instruct lower device to use last 4-bytes of skb data as FCS */ | ||
70 | #define SO_NOFCS 43 | ||
67 | 71 | ||
68 | #endif /* _ASM_SOCKET_H */ | 72 | #endif /* _ASM_SOCKET_H */ |
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c index 933bd388efb2..1a173b35f475 100644 --- a/arch/h8300/kernel/process.c +++ b/arch/h8300/kernel/process.c | |||
@@ -81,9 +81,7 @@ void cpu_idle(void) | |||
81 | while (1) { | 81 | while (1) { |
82 | while (!need_resched()) | 82 | while (!need_resched()) |
83 | idle(); | 83 | idle(); |
84 | preempt_enable_no_resched(); | 84 | schedule_preempt_disabled(); |
85 | schedule(); | ||
86 | preempt_disable(); | ||
87 | } | 85 | } |
88 | } | 86 | } |
89 | 87 | ||
diff --git a/arch/hexagon/include/asm/perf_event.h b/arch/hexagon/include/asm/perf_event.h index 6c2910f91180..8b8526b491c7 100644 --- a/arch/hexagon/include/asm/perf_event.h +++ b/arch/hexagon/include/asm/perf_event.h | |||
@@ -19,6 +19,4 @@ | |||
19 | #ifndef _ASM_PERF_EVENT_H | 19 | #ifndef _ASM_PERF_EVENT_H |
20 | #define _ASM_PERF_EVENT_H | 20 | #define _ASM_PERF_EVENT_H |
21 | 21 | ||
22 | #define PERF_EVENT_INDEX_OFFSET 0 | ||
23 | |||
24 | #endif /* _ASM_PERF_EVENT_H */ | 22 | #endif /* _ASM_PERF_EVENT_H */ |
diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c index c871a2cffaef..0123c63e9a3a 100644 --- a/arch/hexagon/kernel/smp.c +++ b/arch/hexagon/kernel/smp.c | |||
@@ -179,8 +179,6 @@ void __cpuinit start_secondary(void) | |||
179 | printk(KERN_INFO "%s cpu %d\n", __func__, current_thread_info()->cpu); | 179 | printk(KERN_INFO "%s cpu %d\n", __func__, current_thread_info()->cpu); |
180 | 180 | ||
181 | set_cpu_online(cpu, true); | 181 | set_cpu_online(cpu, true); |
182 | while (!cpumask_test_cpu(cpu, cpu_active_mask)) | ||
183 | cpu_relax(); | ||
184 | local_irq_enable(); | 182 | local_irq_enable(); |
185 | 183 | ||
186 | cpu_idle(); | 184 | cpu_idle(); |
diff --git a/arch/ia64/hp/sim/boot/fw-emu.c b/arch/ia64/hp/sim/boot/fw-emu.c index bf6d9d8c802f..0216e28300fa 100644 --- a/arch/ia64/hp/sim/boot/fw-emu.c +++ b/arch/ia64/hp/sim/boot/fw-emu.c | |||
@@ -160,28 +160,19 @@ sal_emulator (long index, unsigned long in1, unsigned long in2, | |||
160 | */ | 160 | */ |
161 | status = 0; | 161 | status = 0; |
162 | if (index == SAL_FREQ_BASE) { | 162 | if (index == SAL_FREQ_BASE) { |
163 | switch (in1) { | 163 | if (in1 == SAL_FREQ_BASE_PLATFORM) |
164 | case SAL_FREQ_BASE_PLATFORM: | ||
165 | r9 = 200000000; | 164 | r9 = 200000000; |
166 | break; | 165 | else if (in1 == SAL_FREQ_BASE_INTERVAL_TIMER) { |
167 | |||
168 | case SAL_FREQ_BASE_INTERVAL_TIMER: | ||
169 | /* | 166 | /* |
170 | * Is this supposed to be the cr.itc frequency | 167 | * Is this supposed to be the cr.itc frequency |
171 | * or something platform specific? The SAL | 168 | * or something platform specific? The SAL |
172 | * doc ain't exactly clear on this... | 169 | * doc ain't exactly clear on this... |
173 | */ | 170 | */ |
174 | r9 = 700000000; | 171 | r9 = 700000000; |
175 | break; | 172 | } else if (in1 == SAL_FREQ_BASE_REALTIME_CLOCK) |
176 | |||
177 | case SAL_FREQ_BASE_REALTIME_CLOCK: | ||
178 | r9 = 1; | 173 | r9 = 1; |
179 | break; | 174 | else |
180 | |||
181 | default: | ||
182 | status = -1; | 175 | status = -1; |
183 | break; | ||
184 | } | ||
185 | } else if (index == SAL_SET_VECTORS) { | 176 | } else if (index == SAL_SET_VECTORS) { |
186 | ; | 177 | ; |
187 | } else if (index == SAL_GET_STATE_INFO) { | 178 | } else if (index == SAL_GET_STATE_INFO) { |
diff --git a/arch/ia64/hp/sim/hpsim_irq.c b/arch/ia64/hp/sim/hpsim_irq.c index 4bd9a63260ee..0aa70ebda49d 100644 --- a/arch/ia64/hp/sim/hpsim_irq.c +++ b/arch/ia64/hp/sim/hpsim_irq.c | |||
@@ -10,6 +10,8 @@ | |||
10 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
11 | #include <linux/irq.h> | 11 | #include <linux/irq.h> |
12 | 12 | ||
13 | #include "hpsim_ssc.h" | ||
14 | |||
13 | static unsigned int | 15 | static unsigned int |
14 | hpsim_irq_startup(struct irq_data *data) | 16 | hpsim_irq_startup(struct irq_data *data) |
15 | { | 17 | { |
@@ -37,15 +39,37 @@ static struct irq_chip irq_type_hp_sim = { | |||
37 | .irq_set_affinity = hpsim_set_affinity_noop, | 39 | .irq_set_affinity = hpsim_set_affinity_noop, |
38 | }; | 40 | }; |
39 | 41 | ||
42 | static void hpsim_irq_set_chip(int irq) | ||
43 | { | ||
44 | struct irq_chip *chip = irq_get_chip(irq); | ||
45 | |||
46 | if (chip == &no_irq_chip) | ||
47 | irq_set_chip(irq, &irq_type_hp_sim); | ||
48 | } | ||
49 | |||
50 | static void hpsim_connect_irq(int intr, int irq) | ||
51 | { | ||
52 | ia64_ssc(intr, irq, 0, 0, SSC_CONNECT_INTERRUPT); | ||
53 | } | ||
54 | |||
55 | int hpsim_get_irq(int intr) | ||
56 | { | ||
57 | int irq = assign_irq_vector(AUTO_ASSIGN); | ||
58 | |||
59 | if (irq >= 0) { | ||
60 | hpsim_irq_set_chip(irq); | ||
61 | irq_set_handler(irq, handle_simple_irq); | ||
62 | hpsim_connect_irq(intr, irq); | ||
63 | } | ||
64 | |||
65 | return irq; | ||
66 | } | ||
67 | |||
40 | void __init | 68 | void __init |
41 | hpsim_irq_init (void) | 69 | hpsim_irq_init (void) |
42 | { | 70 | { |
43 | int i; | 71 | int i; |
44 | 72 | ||
45 | for_each_active_irq(i) { | 73 | for_each_active_irq(i) |
46 | struct irq_chip *chip = irq_get_chip(i); | 74 | hpsim_irq_set_chip(i); |
47 | |||
48 | if (chip == &no_irq_chip) | ||
49 | irq_set_chip(i, &irq_type_hp_sim); | ||
50 | } | ||
51 | } | 75 | } |
diff --git a/arch/ia64/hp/sim/hpsim_setup.c b/arch/ia64/hp/sim/hpsim_setup.c index f629e903ebc7..664a5402a695 100644 --- a/arch/ia64/hp/sim/hpsim_setup.c +++ b/arch/ia64/hp/sim/hpsim_setup.c | |||
@@ -26,12 +26,6 @@ | |||
26 | #include "hpsim_ssc.h" | 26 | #include "hpsim_ssc.h" |
27 | 27 | ||
28 | void | 28 | void |
29 | ia64_ssc_connect_irq (long intr, long irq) | ||
30 | { | ||
31 | ia64_ssc(intr, irq, 0, 0, SSC_CONNECT_INTERRUPT); | ||
32 | } | ||
33 | |||
34 | void | ||
35 | ia64_ctl_trace (long on) | 29 | ia64_ctl_trace (long on) |
36 | { | 30 | { |
37 | ia64_ssc(on, 0, 0, 0, SSC_CTL_TRACE); | 31 | ia64_ssc(on, 0, 0, 0, SSC_CTL_TRACE); |
diff --git a/arch/ia64/hp/sim/simeth.c b/arch/ia64/hp/sim/simeth.c index 47afcc61f6e5..a63218e1f6c9 100644 --- a/arch/ia64/hp/sim/simeth.c +++ b/arch/ia64/hp/sim/simeth.c | |||
@@ -129,17 +129,6 @@ netdev_probe(char *name, unsigned char *ether) | |||
129 | 129 | ||
130 | 130 | ||
131 | static inline int | 131 | static inline int |
132 | netdev_connect(int irq) | ||
133 | { | ||
134 | /* XXX Fix me | ||
135 | * this does not support multiple cards | ||
136 | * also no return value | ||
137 | */ | ||
138 | ia64_ssc_connect_irq(NETWORK_INTR, irq); | ||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | static inline int | ||
143 | netdev_attach(int fd, int irq, unsigned int ipaddr) | 132 | netdev_attach(int fd, int irq, unsigned int ipaddr) |
144 | { | 133 | { |
145 | /* this puts the host interface in the right mode (start interrupting) */ | 134 | /* this puts the host interface in the right mode (start interrupting) */ |
@@ -193,7 +182,7 @@ simeth_probe1(void) | |||
193 | unsigned char mac_addr[ETH_ALEN]; | 182 | unsigned char mac_addr[ETH_ALEN]; |
194 | struct simeth_local *local; | 183 | struct simeth_local *local; |
195 | struct net_device *dev; | 184 | struct net_device *dev; |
196 | int fd, i, err, rc; | 185 | int fd, err, rc; |
197 | 186 | ||
198 | /* | 187 | /* |
199 | * XXX Fix me | 188 | * XXX Fix me |
@@ -226,22 +215,16 @@ simeth_probe1(void) | |||
226 | return err; | 215 | return err; |
227 | } | 216 | } |
228 | 217 | ||
229 | if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0) | ||
230 | panic("%s: out of interrupt vectors!\n", __func__); | ||
231 | dev->irq = rc; | ||
232 | |||
233 | /* | 218 | /* |
234 | * attach the interrupt in the simulator, this does enable interrupts | 219 | * attach the interrupt in the simulator, this does enable interrupts |
235 | * until a netdev_attach() is called | 220 | * until a netdev_attach() is called |
236 | */ | 221 | */ |
237 | netdev_connect(dev->irq); | 222 | if ((rc = hpsim_get_irq(NETWORK_INTR)) < 0) |
223 | panic("%s: out of interrupt vectors!\n", __func__); | ||
224 | dev->irq = rc; | ||
238 | 225 | ||
239 | printk(KERN_INFO "%s: hosteth=%s simfd=%d, HwAddr", | 226 | printk(KERN_INFO "%s: hosteth=%s simfd=%d, HwAddr=%pm, IRQ %d\n", |
240 | dev->name, simeth_device, local->simfd); | 227 | dev->name, simeth_device, local->simfd, dev->dev_addr, dev->irq); |
241 | for(i = 0; i < ETH_ALEN; i++) { | ||
242 | printk(" %2.2x", dev->dev_addr[i]); | ||
243 | } | ||
244 | printk(", IRQ %d\n", dev->irq); | ||
245 | 228 | ||
246 | return 0; | 229 | return 0; |
247 | } | 230 | } |
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index bff0824cf8a4..c34785dca92b 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c | |||
@@ -4,16 +4,11 @@ | |||
4 | * This driver is mostly used for bringup purposes and will go away. | 4 | * This driver is mostly used for bringup purposes and will go away. |
5 | * It has a strong dependency on the system console. All outputs | 5 | * It has a strong dependency on the system console. All outputs |
6 | * are rerouted to the same facility as the one used by printk which, in our | 6 | * are rerouted to the same facility as the one used by printk which, in our |
7 | * case means sys_sim.c console (goes via the simulator). The code hereafter | 7 | * case means sys_sim.c console (goes via the simulator). |
8 | * is completely leveraged from the serial.c driver. | ||
9 | * | 8 | * |
10 | * Copyright (C) 1999-2000, 2002-2003 Hewlett-Packard Co | 9 | * Copyright (C) 1999-2000, 2002-2003 Hewlett-Packard Co |
11 | * Stephane Eranian <eranian@hpl.hp.com> | 10 | * Stephane Eranian <eranian@hpl.hp.com> |
12 | * David Mosberger-Tang <davidm@hpl.hp.com> | 11 | * David Mosberger-Tang <davidm@hpl.hp.com> |
13 | * | ||
14 | * 02/04/00 D. Mosberger Merged in serial.c bug fixes in rs_close(). | ||
15 | * 02/25/00 D. Mosberger Synced up with 2.3.99pre-5 version of serial.c. | ||
16 | * 07/30/02 D. Mosberger Replace sti()/cli() with explicit spinlocks & local irq masking | ||
17 | */ | 12 | */ |
18 | 13 | ||
19 | #include <linux/init.h> | 14 | #include <linux/init.h> |
@@ -27,15 +22,17 @@ | |||
27 | #include <linux/seq_file.h> | 22 | #include <linux/seq_file.h> |
28 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
29 | #include <linux/capability.h> | 24 | #include <linux/capability.h> |
25 | #include <linux/circ_buf.h> | ||
30 | #include <linux/console.h> | 26 | #include <linux/console.h> |
27 | #include <linux/irq.h> | ||
31 | #include <linux/module.h> | 28 | #include <linux/module.h> |
32 | #include <linux/serial.h> | 29 | #include <linux/serial.h> |
33 | #include <linux/serialP.h> | ||
34 | #include <linux/sysrq.h> | 30 | #include <linux/sysrq.h> |
31 | #include <linux/uaccess.h> | ||
32 | |||
33 | #include <asm/hpsim.h> | ||
35 | 34 | ||
36 | #include <asm/irq.h> | 35 | #include "hpsim_ssc.h" |
37 | #include <asm/hw_irq.h> | ||
38 | #include <asm/uaccess.h> | ||
39 | 36 | ||
40 | #undef SIMSERIAL_DEBUG /* define this to get some debug information */ | 37 | #undef SIMSERIAL_DEBUG /* define this to get some debug information */ |
41 | 38 | ||
@@ -43,118 +40,44 @@ | |||
43 | 40 | ||
44 | #define NR_PORTS 1 /* only one port for now */ | 41 | #define NR_PORTS 1 /* only one port for now */ |
45 | 42 | ||
46 | #define IRQ_T(info) ((info->flags & ASYNC_SHARE_IRQ) ? IRQF_SHARED : IRQF_DISABLED) | 43 | struct serial_state { |
47 | 44 | struct tty_port port; | |
48 | #define SSC_GETCHAR 21 | 45 | struct circ_buf xmit; |
49 | 46 | int irq; | |
50 | extern long ia64_ssc (long, long, long, long, int); | 47 | int x_char; |
51 | extern void ia64_ssc_connect_irq (long intr, long irq); | ||
52 | |||
53 | static char *serial_name = "SimSerial driver"; | ||
54 | static char *serial_version = "0.6"; | ||
55 | |||
56 | /* | ||
57 | * This has been extracted from asm/serial.h. We need one eventually but | ||
58 | * I don't know exactly what we're going to put in it so just fake one | ||
59 | * for now. | ||
60 | */ | ||
61 | #define BASE_BAUD ( 1843200 / 16 ) | ||
62 | |||
63 | #define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) | ||
64 | |||
65 | /* | ||
66 | * Most of the values here are meaningless to this particular driver. | ||
67 | * However some values must be preserved for the code (leveraged from serial.c | ||
68 | * to work correctly). | ||
69 | * port must not be 0 | ||
70 | * type must not be UNKNOWN | ||
71 | * So I picked arbitrary (guess from where?) values instead | ||
72 | */ | ||
73 | static struct serial_state rs_table[NR_PORTS]={ | ||
74 | /* UART CLK PORT IRQ FLAGS */ | ||
75 | { 0, BASE_BAUD, 0x3F8, 0, STD_COM_FLAGS,0,PORT_16550 } /* ttyS0 */ | ||
76 | }; | 48 | }; |
77 | 49 | ||
78 | /* | 50 | static struct serial_state rs_table[NR_PORTS]; |
79 | * Just for the fun of it ! | ||
80 | */ | ||
81 | static struct serial_uart_config uart_config[] = { | ||
82 | { "unknown", 1, 0 }, | ||
83 | { "8250", 1, 0 }, | ||
84 | { "16450", 1, 0 }, | ||
85 | { "16550", 1, 0 }, | ||
86 | { "16550A", 16, UART_CLEAR_FIFO | UART_USE_FIFO }, | ||
87 | { "cirrus", 1, 0 }, | ||
88 | { "ST16650", 1, UART_CLEAR_FIFO | UART_STARTECH }, | ||
89 | { "ST16650V2", 32, UART_CLEAR_FIFO | UART_USE_FIFO | | ||
90 | UART_STARTECH }, | ||
91 | { "TI16750", 64, UART_CLEAR_FIFO | UART_USE_FIFO}, | ||
92 | { NULL, 0} | ||
93 | }; | ||
94 | 51 | ||
95 | struct tty_driver *hp_simserial_driver; | 52 | struct tty_driver *hp_simserial_driver; |
96 | 53 | ||
97 | static struct async_struct *IRQ_ports[NR_IRQS]; | ||
98 | |||
99 | static struct console *console; | 54 | static struct console *console; |
100 | 55 | ||
101 | static unsigned char *tmp_buf; | 56 | static void receive_chars(struct tty_struct *tty) |
102 | |||
103 | extern struct console *console_drivers; /* from kernel/printk.c */ | ||
104 | |||
105 | /* | ||
106 | * ------------------------------------------------------------ | ||
107 | * rs_stop() and rs_start() | ||
108 | * | ||
109 | * This routines are called before setting or resetting tty->stopped. | ||
110 | * They enable or disable transmitter interrupts, as necessary. | ||
111 | * ------------------------------------------------------------ | ||
112 | */ | ||
113 | static void rs_stop(struct tty_struct *tty) | ||
114 | { | ||
115 | #ifdef SIMSERIAL_DEBUG | ||
116 | printk("rs_stop: tty->stopped=%d tty->hw_stopped=%d tty->flow_stopped=%d\n", | ||
117 | tty->stopped, tty->hw_stopped, tty->flow_stopped); | ||
118 | #endif | ||
119 | |||
120 | } | ||
121 | |||
122 | static void rs_start(struct tty_struct *tty) | ||
123 | { | ||
124 | #ifdef SIMSERIAL_DEBUG | ||
125 | printk("rs_start: tty->stopped=%d tty->hw_stopped=%d tty->flow_stopped=%d\n", | ||
126 | tty->stopped, tty->hw_stopped, tty->flow_stopped); | ||
127 | #endif | ||
128 | } | ||
129 | |||
130 | static void receive_chars(struct tty_struct *tty) | ||
131 | { | 57 | { |
132 | unsigned char ch; | 58 | unsigned char ch; |
133 | static unsigned char seen_esc = 0; | 59 | static unsigned char seen_esc = 0; |
134 | 60 | ||
135 | while ( (ch = ia64_ssc(0, 0, 0, 0, SSC_GETCHAR)) ) { | 61 | while ( (ch = ia64_ssc(0, 0, 0, 0, SSC_GETCHAR)) ) { |
136 | if ( ch == 27 && seen_esc == 0 ) { | 62 | if (ch == 27 && seen_esc == 0) { |
137 | seen_esc = 1; | 63 | seen_esc = 1; |
138 | continue; | 64 | continue; |
139 | } else { | 65 | } else if (seen_esc == 1 && ch == 'O') { |
140 | if ( seen_esc==1 && ch == 'O' ) { | 66 | seen_esc = 2; |
141 | seen_esc = 2; | 67 | continue; |
142 | continue; | 68 | } else if (seen_esc == 2) { |
143 | } else if ( seen_esc == 2 ) { | 69 | if (ch == 'P') /* F1 */ |
144 | if ( ch == 'P' ) /* F1 */ | 70 | show_state(); |
145 | show_state(); | ||
146 | #ifdef CONFIG_MAGIC_SYSRQ | 71 | #ifdef CONFIG_MAGIC_SYSRQ |
147 | if ( ch == 'S' ) { /* F4 */ | 72 | if (ch == 'S') { /* F4 */ |
148 | do | 73 | do { |
149 | ch = ia64_ssc(0, 0, 0, 0, | 74 | ch = ia64_ssc(0, 0, 0, 0, SSC_GETCHAR); |
150 | SSC_GETCHAR); | 75 | } while (!ch); |
151 | while (!ch); | 76 | handle_sysrq(ch); |
152 | handle_sysrq(ch); | ||
153 | } | ||
154 | #endif | ||
155 | seen_esc = 0; | ||
156 | continue; | ||
157 | } | 77 | } |
78 | #endif | ||
79 | seen_esc = 0; | ||
80 | continue; | ||
158 | } | 81 | } |
159 | seen_esc = 0; | 82 | seen_esc = 0; |
160 | 83 | ||
@@ -169,22 +92,19 @@ static void receive_chars(struct tty_struct *tty) | |||
169 | */ | 92 | */ |
170 | static irqreturn_t rs_interrupt_single(int irq, void *dev_id) | 93 | static irqreturn_t rs_interrupt_single(int irq, void *dev_id) |
171 | { | 94 | { |
172 | struct async_struct * info; | 95 | struct serial_state *info = dev_id; |
96 | struct tty_struct *tty = tty_port_tty_get(&info->port); | ||
173 | 97 | ||
174 | /* | 98 | if (!tty) { |
175 | * I don't know exactly why they don't use the dev_id opaque data | 99 | printk(KERN_INFO "%s: tty=0 problem\n", __func__); |
176 | * pointer instead of this extra lookup table | ||
177 | */ | ||
178 | info = IRQ_ports[irq]; | ||
179 | if (!info || !info->tty) { | ||
180 | printk(KERN_INFO "simrs_interrupt_single: info|tty=0 info=%p problem\n", info); | ||
181 | return IRQ_NONE; | 100 | return IRQ_NONE; |
182 | } | 101 | } |
183 | /* | 102 | /* |
184 | * pretty simple in our case, because we only get interrupts | 103 | * pretty simple in our case, because we only get interrupts |
185 | * on inbound traffic | 104 | * on inbound traffic |
186 | */ | 105 | */ |
187 | receive_chars(info->tty); | 106 | receive_chars(tty); |
107 | tty_kref_put(tty); | ||
188 | return IRQ_HANDLED; | 108 | return IRQ_HANDLED; |
189 | } | 109 | } |
190 | 110 | ||
@@ -194,17 +114,12 @@ static irqreturn_t rs_interrupt_single(int irq, void *dev_id) | |||
194 | * ------------------------------------------------------------------- | 114 | * ------------------------------------------------------------------- |
195 | */ | 115 | */ |
196 | 116 | ||
197 | static void do_softint(struct work_struct *private_) | ||
198 | { | ||
199 | printk(KERN_ERR "simserial: do_softint called\n"); | ||
200 | } | ||
201 | |||
202 | static int rs_put_char(struct tty_struct *tty, unsigned char ch) | 117 | static int rs_put_char(struct tty_struct *tty, unsigned char ch) |
203 | { | 118 | { |
204 | struct async_struct *info = (struct async_struct *)tty->driver_data; | 119 | struct serial_state *info = tty->driver_data; |
205 | unsigned long flags; | 120 | unsigned long flags; |
206 | 121 | ||
207 | if (!tty || !info->xmit.buf) | 122 | if (!info->xmit.buf) |
208 | return 0; | 123 | return 0; |
209 | 124 | ||
210 | local_irq_save(flags); | 125 | local_irq_save(flags); |
@@ -218,12 +133,12 @@ static int rs_put_char(struct tty_struct *tty, unsigned char ch) | |||
218 | return 1; | 133 | return 1; |
219 | } | 134 | } |
220 | 135 | ||
221 | static void transmit_chars(struct async_struct *info, int *intr_done) | 136 | static void transmit_chars(struct tty_struct *tty, struct serial_state *info, |
137 | int *intr_done) | ||
222 | { | 138 | { |
223 | int count; | 139 | int count; |
224 | unsigned long flags; | 140 | unsigned long flags; |
225 | 141 | ||
226 | |||
227 | local_irq_save(flags); | 142 | local_irq_save(flags); |
228 | 143 | ||
229 | if (info->x_char) { | 144 | if (info->x_char) { |
@@ -231,16 +146,16 @@ static void transmit_chars(struct async_struct *info, int *intr_done) | |||
231 | 146 | ||
232 | console->write(console, &c, 1); | 147 | console->write(console, &c, 1); |
233 | 148 | ||
234 | info->state->icount.tx++; | ||
235 | info->x_char = 0; | 149 | info->x_char = 0; |
236 | 150 | ||
237 | goto out; | 151 | goto out; |
238 | } | 152 | } |
239 | 153 | ||
240 | if (info->xmit.head == info->xmit.tail || info->tty->stopped || info->tty->hw_stopped) { | 154 | if (info->xmit.head == info->xmit.tail || tty->stopped || |
155 | tty->hw_stopped) { | ||
241 | #ifdef SIMSERIAL_DEBUG | 156 | #ifdef SIMSERIAL_DEBUG |
242 | printk("transmit_chars: head=%d, tail=%d, stopped=%d\n", | 157 | printk("transmit_chars: head=%d, tail=%d, stopped=%d\n", |
243 | info->xmit.head, info->xmit.tail, info->tty->stopped); | 158 | info->xmit.head, info->xmit.tail, tty->stopped); |
244 | #endif | 159 | #endif |
245 | goto out; | 160 | goto out; |
246 | } | 161 | } |
@@ -272,24 +187,24 @@ out: | |||
272 | 187 | ||
273 | static void rs_flush_chars(struct tty_struct *tty) | 188 | static void rs_flush_chars(struct tty_struct *tty) |
274 | { | 189 | { |
275 | struct async_struct *info = (struct async_struct *)tty->driver_data; | 190 | struct serial_state *info = tty->driver_data; |
276 | 191 | ||
277 | if (info->xmit.head == info->xmit.tail || tty->stopped || tty->hw_stopped || | 192 | if (info->xmit.head == info->xmit.tail || tty->stopped || |
278 | !info->xmit.buf) | 193 | tty->hw_stopped || !info->xmit.buf) |
279 | return; | 194 | return; |
280 | 195 | ||
281 | transmit_chars(info, NULL); | 196 | transmit_chars(tty, info, NULL); |
282 | } | 197 | } |
283 | 198 | ||
284 | |||
285 | static int rs_write(struct tty_struct * tty, | 199 | static int rs_write(struct tty_struct * tty, |
286 | const unsigned char *buf, int count) | 200 | const unsigned char *buf, int count) |
287 | { | 201 | { |
202 | struct serial_state *info = tty->driver_data; | ||
288 | int c, ret = 0; | 203 | int c, ret = 0; |
289 | struct async_struct *info = (struct async_struct *)tty->driver_data; | ||
290 | unsigned long flags; | 204 | unsigned long flags; |
291 | 205 | ||
292 | if (!tty || !info->xmit.buf || !tmp_buf) return 0; | 206 | if (!info->xmit.buf) |
207 | return 0; | ||
293 | 208 | ||
294 | local_irq_save(flags); | 209 | local_irq_save(flags); |
295 | while (1) { | 210 | while (1) { |
@@ -310,30 +225,30 @@ static int rs_write(struct tty_struct * tty, | |||
310 | /* | 225 | /* |
311 | * Hey, we transmit directly from here in our case | 226 | * Hey, we transmit directly from here in our case |
312 | */ | 227 | */ |
313 | if (CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) | 228 | if (CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) && |
314 | && !tty->stopped && !tty->hw_stopped) { | 229 | !tty->stopped && !tty->hw_stopped) |
315 | transmit_chars(info, NULL); | 230 | transmit_chars(tty, info, NULL); |
316 | } | 231 | |
317 | return ret; | 232 | return ret; |
318 | } | 233 | } |
319 | 234 | ||
320 | static int rs_write_room(struct tty_struct *tty) | 235 | static int rs_write_room(struct tty_struct *tty) |
321 | { | 236 | { |
322 | struct async_struct *info = (struct async_struct *)tty->driver_data; | 237 | struct serial_state *info = tty->driver_data; |
323 | 238 | ||
324 | return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); | 239 | return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); |
325 | } | 240 | } |
326 | 241 | ||
327 | static int rs_chars_in_buffer(struct tty_struct *tty) | 242 | static int rs_chars_in_buffer(struct tty_struct *tty) |
328 | { | 243 | { |
329 | struct async_struct *info = (struct async_struct *)tty->driver_data; | 244 | struct serial_state *info = tty->driver_data; |
330 | 245 | ||
331 | return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); | 246 | return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); |
332 | } | 247 | } |
333 | 248 | ||
334 | static void rs_flush_buffer(struct tty_struct *tty) | 249 | static void rs_flush_buffer(struct tty_struct *tty) |
335 | { | 250 | { |
336 | struct async_struct *info = (struct async_struct *)tty->driver_data; | 251 | struct serial_state *info = tty->driver_data; |
337 | unsigned long flags; | 252 | unsigned long flags; |
338 | 253 | ||
339 | local_irq_save(flags); | 254 | local_irq_save(flags); |
@@ -349,7 +264,7 @@ static void rs_flush_buffer(struct tty_struct *tty) | |||
349 | */ | 264 | */ |
350 | static void rs_send_xchar(struct tty_struct *tty, char ch) | 265 | static void rs_send_xchar(struct tty_struct *tty, char ch) |
351 | { | 266 | { |
352 | struct async_struct *info = (struct async_struct *)tty->driver_data; | 267 | struct serial_state *info = tty->driver_data; |
353 | 268 | ||
354 | info->x_char = ch; | 269 | info->x_char = ch; |
355 | if (ch) { | 270 | if (ch) { |
@@ -357,7 +272,7 @@ static void rs_send_xchar(struct tty_struct *tty, char ch) | |||
357 | * I guess we could call console->write() directly but | 272 | * I guess we could call console->write() directly but |
358 | * let's do that for now. | 273 | * let's do that for now. |
359 | */ | 274 | */ |
360 | transmit_chars(info, NULL); | 275 | transmit_chars(tty, info, NULL); |
361 | } | 276 | } |
362 | } | 277 | } |
363 | 278 | ||
@@ -371,14 +286,15 @@ static void rs_send_xchar(struct tty_struct *tty, char ch) | |||
371 | */ | 286 | */ |
372 | static void rs_throttle(struct tty_struct * tty) | 287 | static void rs_throttle(struct tty_struct * tty) |
373 | { | 288 | { |
374 | if (I_IXOFF(tty)) rs_send_xchar(tty, STOP_CHAR(tty)); | 289 | if (I_IXOFF(tty)) |
290 | rs_send_xchar(tty, STOP_CHAR(tty)); | ||
375 | 291 | ||
376 | printk(KERN_INFO "simrs_throttle called\n"); | 292 | printk(KERN_INFO "simrs_throttle called\n"); |
377 | } | 293 | } |
378 | 294 | ||
379 | static void rs_unthrottle(struct tty_struct * tty) | 295 | static void rs_unthrottle(struct tty_struct * tty) |
380 | { | 296 | { |
381 | struct async_struct *info = (struct async_struct *)tty->driver_data; | 297 | struct serial_state *info = tty->driver_data; |
382 | 298 | ||
383 | if (I_IXOFF(tty)) { | 299 | if (I_IXOFF(tty)) { |
384 | if (info->x_char) | 300 | if (info->x_char) |
@@ -389,7 +305,6 @@ static void rs_unthrottle(struct tty_struct * tty) | |||
389 | printk(KERN_INFO "simrs_unthrottle called\n"); | 305 | printk(KERN_INFO "simrs_unthrottle called\n"); |
390 | } | 306 | } |
391 | 307 | ||
392 | |||
393 | static int rs_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) | 308 | static int rs_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) |
394 | { | 309 | { |
395 | if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && | 310 | if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && |
@@ -400,48 +315,21 @@ static int rs_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) | |||
400 | } | 315 | } |
401 | 316 | ||
402 | switch (cmd) { | 317 | switch (cmd) { |
403 | case TIOCGSERIAL: | 318 | case TIOCGSERIAL: |
404 | printk(KERN_INFO "simrs_ioctl TIOCGSERIAL called\n"); | 319 | case TIOCSSERIAL: |
405 | return 0; | 320 | case TIOCSERGSTRUCT: |
406 | case TIOCSSERIAL: | 321 | case TIOCMIWAIT: |
407 | printk(KERN_INFO "simrs_ioctl TIOCSSERIAL called\n"); | 322 | return 0; |
408 | return 0; | 323 | case TIOCSERCONFIG: |
409 | case TIOCSERCONFIG: | 324 | case TIOCSERGETLSR: /* Get line status register */ |
410 | printk(KERN_INFO "rs_ioctl: TIOCSERCONFIG called\n"); | 325 | return -EINVAL; |
411 | return -EINVAL; | 326 | case TIOCSERGWILD: |
412 | 327 | case TIOCSERSWILD: | |
413 | case TIOCSERGETLSR: /* Get line status register */ | 328 | /* "setserial -W" is called in Debian boot */ |
414 | printk(KERN_INFO "rs_ioctl: TIOCSERGETLSR called\n"); | 329 | printk (KERN_INFO "TIOCSER?WILD ioctl obsolete, ignored.\n"); |
415 | return -EINVAL; | 330 | return 0; |
416 | 331 | } | |
417 | case TIOCSERGSTRUCT: | 332 | return -ENOIOCTLCMD; |
418 | printk(KERN_INFO "rs_ioctl: TIOCSERGSTRUCT called\n"); | ||
419 | #if 0 | ||
420 | if (copy_to_user((struct async_struct *) arg, | ||
421 | info, sizeof(struct async_struct))) | ||
422 | return -EFAULT; | ||
423 | #endif | ||
424 | return 0; | ||
425 | |||
426 | /* | ||
427 | * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change | ||
428 | * - mask passed in arg for lines of interest | ||
429 | * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) | ||
430 | * Caller should use TIOCGICOUNT to see which one it was | ||
431 | */ | ||
432 | case TIOCMIWAIT: | ||
433 | printk(KERN_INFO "rs_ioctl: TIOCMIWAIT: called\n"); | ||
434 | return 0; | ||
435 | case TIOCSERGWILD: | ||
436 | case TIOCSERSWILD: | ||
437 | /* "setserial -W" is called in Debian boot */ | ||
438 | printk (KERN_INFO "TIOCSER?WILD ioctl obsolete, ignored.\n"); | ||
439 | return 0; | ||
440 | |||
441 | default: | ||
442 | return -ENOIOCTLCMD; | ||
443 | } | ||
444 | return 0; | ||
445 | } | 333 | } |
446 | 334 | ||
447 | #define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) | 335 | #define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) |
@@ -452,220 +340,50 @@ static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios) | |||
452 | if ((old_termios->c_cflag & CRTSCTS) && | 340 | if ((old_termios->c_cflag & CRTSCTS) && |
453 | !(tty->termios->c_cflag & CRTSCTS)) { | 341 | !(tty->termios->c_cflag & CRTSCTS)) { |
454 | tty->hw_stopped = 0; | 342 | tty->hw_stopped = 0; |
455 | rs_start(tty); | ||
456 | } | 343 | } |
457 | } | 344 | } |
458 | /* | 345 | /* |
459 | * This routine will shutdown a serial port; interrupts are disabled, and | 346 | * This routine will shutdown a serial port; interrupts are disabled, and |
460 | * DTR is dropped if the hangup on close termio flag is on. | 347 | * DTR is dropped if the hangup on close termio flag is on. |
461 | */ | 348 | */ |
462 | static void shutdown(struct async_struct * info) | 349 | static void shutdown(struct tty_port *port) |
463 | { | 350 | { |
464 | unsigned long flags; | 351 | struct serial_state *info = container_of(port, struct serial_state, |
465 | struct serial_state *state; | 352 | port); |
466 | int retval; | 353 | unsigned long flags; |
467 | |||
468 | if (!(info->flags & ASYNC_INITIALIZED)) return; | ||
469 | |||
470 | state = info->state; | ||
471 | |||
472 | #ifdef SIMSERIAL_DEBUG | ||
473 | printk("Shutting down serial port %d (irq %d)....", info->line, | ||
474 | state->irq); | ||
475 | #endif | ||
476 | 354 | ||
477 | local_irq_save(flags); | 355 | local_irq_save(flags); |
478 | { | 356 | if (info->irq) |
479 | /* | 357 | free_irq(info->irq, info); |
480 | * First unlink the serial port from the IRQ chain... | ||
481 | */ | ||
482 | if (info->next_port) | ||
483 | info->next_port->prev_port = info->prev_port; | ||
484 | if (info->prev_port) | ||
485 | info->prev_port->next_port = info->next_port; | ||
486 | else | ||
487 | IRQ_ports[state->irq] = info->next_port; | ||
488 | |||
489 | /* | ||
490 | * Free the IRQ, if necessary | ||
491 | */ | ||
492 | if (state->irq && (!IRQ_ports[state->irq] || | ||
493 | !IRQ_ports[state->irq]->next_port)) { | ||
494 | if (IRQ_ports[state->irq]) { | ||
495 | free_irq(state->irq, NULL); | ||
496 | retval = request_irq(state->irq, rs_interrupt_single, | ||
497 | IRQ_T(info), "serial", NULL); | ||
498 | |||
499 | if (retval) | ||
500 | printk(KERN_ERR "serial shutdown: request_irq: error %d" | ||
501 | " Couldn't reacquire IRQ.\n", retval); | ||
502 | } else | ||
503 | free_irq(state->irq, NULL); | ||
504 | } | ||
505 | |||
506 | if (info->xmit.buf) { | ||
507 | free_page((unsigned long) info->xmit.buf); | ||
508 | info->xmit.buf = NULL; | ||
509 | } | ||
510 | |||
511 | if (info->tty) set_bit(TTY_IO_ERROR, &info->tty->flags); | ||
512 | 358 | ||
513 | info->flags &= ~ASYNC_INITIALIZED; | 359 | if (info->xmit.buf) { |
360 | free_page((unsigned long) info->xmit.buf); | ||
361 | info->xmit.buf = NULL; | ||
514 | } | 362 | } |
515 | local_irq_restore(flags); | 363 | local_irq_restore(flags); |
516 | } | 364 | } |
517 | 365 | ||
518 | /* | ||
519 | * ------------------------------------------------------------ | ||
520 | * rs_close() | ||
521 | * | ||
522 | * This routine is called when the serial port gets closed. First, we | ||
523 | * wait for the last remaining data to be sent. Then, we unlink its | ||
524 | * async structure from the interrupt chain if necessary, and we free | ||
525 | * that IRQ if nothing is left in the chain. | ||
526 | * ------------------------------------------------------------ | ||
527 | */ | ||
528 | static void rs_close(struct tty_struct *tty, struct file * filp) | 366 | static void rs_close(struct tty_struct *tty, struct file * filp) |
529 | { | 367 | { |
530 | struct async_struct * info = (struct async_struct *)tty->driver_data; | 368 | struct serial_state *info = tty->driver_data; |
531 | struct serial_state *state; | ||
532 | unsigned long flags; | ||
533 | |||
534 | if (!info ) return; | ||
535 | |||
536 | state = info->state; | ||
537 | |||
538 | local_irq_save(flags); | ||
539 | if (tty_hung_up_p(filp)) { | ||
540 | #ifdef SIMSERIAL_DEBUG | ||
541 | printk("rs_close: hung_up\n"); | ||
542 | #endif | ||
543 | local_irq_restore(flags); | ||
544 | return; | ||
545 | } | ||
546 | #ifdef SIMSERIAL_DEBUG | ||
547 | printk("rs_close ttys%d, count = %d\n", info->line, state->count); | ||
548 | #endif | ||
549 | if ((tty->count == 1) && (state->count != 1)) { | ||
550 | /* | ||
551 | * Uh, oh. tty->count is 1, which means that the tty | ||
552 | * structure will be freed. state->count should always | ||
553 | * be one in these conditions. If it's greater than | ||
554 | * one, we've got real problems, since it means the | ||
555 | * serial port won't be shutdown. | ||
556 | */ | ||
557 | printk(KERN_ERR "rs_close: bad serial port count; tty->count is 1, " | ||
558 | "state->count is %d\n", state->count); | ||
559 | state->count = 1; | ||
560 | } | ||
561 | if (--state->count < 0) { | ||
562 | printk(KERN_ERR "rs_close: bad serial port count for ttys%d: %d\n", | ||
563 | info->line, state->count); | ||
564 | state->count = 0; | ||
565 | } | ||
566 | if (state->count) { | ||
567 | local_irq_restore(flags); | ||
568 | return; | ||
569 | } | ||
570 | info->flags |= ASYNC_CLOSING; | ||
571 | local_irq_restore(flags); | ||
572 | 369 | ||
573 | /* | 370 | tty_port_close(&info->port, tty, filp); |
574 | * Now we wait for the transmit buffer to clear; and we notify | ||
575 | * the line discipline to only process XON/XOFF characters. | ||
576 | */ | ||
577 | shutdown(info); | ||
578 | rs_flush_buffer(tty); | ||
579 | tty_ldisc_flush(tty); | ||
580 | info->event = 0; | ||
581 | info->tty = NULL; | ||
582 | if (info->blocked_open) { | ||
583 | if (info->close_delay) | ||
584 | schedule_timeout_interruptible(info->close_delay); | ||
585 | wake_up_interruptible(&info->open_wait); | ||
586 | } | ||
587 | info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); | ||
588 | wake_up_interruptible(&info->close_wait); | ||
589 | } | 371 | } |
590 | 372 | ||
591 | /* | ||
592 | * rs_wait_until_sent() --- wait until the transmitter is empty | ||
593 | */ | ||
594 | static void rs_wait_until_sent(struct tty_struct *tty, int timeout) | ||
595 | { | ||
596 | } | ||
597 | |||
598 | |||
599 | /* | ||
600 | * rs_hangup() --- called by tty_hangup() when a hangup is signaled. | ||
601 | */ | ||
602 | static void rs_hangup(struct tty_struct *tty) | 373 | static void rs_hangup(struct tty_struct *tty) |
603 | { | 374 | { |
604 | struct async_struct * info = (struct async_struct *)tty->driver_data; | 375 | struct serial_state *info = tty->driver_data; |
605 | struct serial_state *state = info->state; | ||
606 | |||
607 | #ifdef SIMSERIAL_DEBUG | ||
608 | printk("rs_hangup: called\n"); | ||
609 | #endif | ||
610 | |||
611 | state = info->state; | ||
612 | 376 | ||
613 | rs_flush_buffer(tty); | 377 | rs_flush_buffer(tty); |
614 | if (info->flags & ASYNC_CLOSING) | 378 | tty_port_hangup(&info->port); |
615 | return; | ||
616 | shutdown(info); | ||
617 | |||
618 | info->event = 0; | ||
619 | state->count = 0; | ||
620 | info->flags &= ~ASYNC_NORMAL_ACTIVE; | ||
621 | info->tty = NULL; | ||
622 | wake_up_interruptible(&info->open_wait); | ||
623 | } | 379 | } |
624 | 380 | ||
625 | 381 | static int activate(struct tty_port *port, struct tty_struct *tty) | |
626 | static int get_async_struct(int line, struct async_struct **ret_info) | ||
627 | { | 382 | { |
628 | struct async_struct *info; | 383 | struct serial_state *state = container_of(port, struct serial_state, |
629 | struct serial_state *sstate; | 384 | port); |
630 | 385 | unsigned long flags, page; | |
631 | sstate = rs_table + line; | 386 | int retval = 0; |
632 | sstate->count++; | ||
633 | if (sstate->info) { | ||
634 | *ret_info = sstate->info; | ||
635 | return 0; | ||
636 | } | ||
637 | info = kzalloc(sizeof(struct async_struct), GFP_KERNEL); | ||
638 | if (!info) { | ||
639 | sstate->count--; | ||
640 | return -ENOMEM; | ||
641 | } | ||
642 | init_waitqueue_head(&info->open_wait); | ||
643 | init_waitqueue_head(&info->close_wait); | ||
644 | init_waitqueue_head(&info->delta_msr_wait); | ||
645 | info->magic = SERIAL_MAGIC; | ||
646 | info->port = sstate->port; | ||
647 | info->flags = sstate->flags; | ||
648 | info->xmit_fifo_size = sstate->xmit_fifo_size; | ||
649 | info->line = line; | ||
650 | INIT_WORK(&info->work, do_softint); | ||
651 | info->state = sstate; | ||
652 | if (sstate->info) { | ||
653 | kfree(info); | ||
654 | *ret_info = sstate->info; | ||
655 | return 0; | ||
656 | } | ||
657 | *ret_info = sstate->info = info; | ||
658 | return 0; | ||
659 | } | ||
660 | |||
661 | static int | ||
662 | startup(struct async_struct *info) | ||
663 | { | ||
664 | unsigned long flags; | ||
665 | int retval=0; | ||
666 | irq_handler_t handler; | ||
667 | struct serial_state *state= info->state; | ||
668 | unsigned long page; | ||
669 | 387 | ||
670 | page = get_zeroed_page(GFP_KERNEL); | 388 | page = get_zeroed_page(GFP_KERNEL); |
671 | if (!page) | 389 | if (!page) |
@@ -673,86 +391,31 @@ startup(struct async_struct *info) | |||
673 | 391 | ||
674 | local_irq_save(flags); | 392 | local_irq_save(flags); |
675 | 393 | ||
676 | if (info->flags & ASYNC_INITIALIZED) { | 394 | if (state->xmit.buf) |
677 | free_page(page); | ||
678 | goto errout; | ||
679 | } | ||
680 | |||
681 | if (!state->port || !state->type) { | ||
682 | if (info->tty) set_bit(TTY_IO_ERROR, &info->tty->flags); | ||
683 | free_page(page); | ||
684 | goto errout; | ||
685 | } | ||
686 | if (info->xmit.buf) | ||
687 | free_page(page); | 395 | free_page(page); |
688 | else | 396 | else |
689 | info->xmit.buf = (unsigned char *) page; | 397 | state->xmit.buf = (unsigned char *) page; |
690 | |||
691 | #ifdef SIMSERIAL_DEBUG | ||
692 | printk("startup: ttys%d (irq %d)...", info->line, state->irq); | ||
693 | #endif | ||
694 | 398 | ||
695 | /* | 399 | if (state->irq) { |
696 | * Allocate the IRQ if necessary | 400 | retval = request_irq(state->irq, rs_interrupt_single, 0, |
697 | */ | 401 | "simserial", state); |
698 | if (state->irq && (!IRQ_ports[state->irq] || | 402 | if (retval) |
699 | !IRQ_ports[state->irq]->next_port)) { | ||
700 | if (IRQ_ports[state->irq]) { | ||
701 | retval = -EBUSY; | ||
702 | goto errout; | ||
703 | } else | ||
704 | handler = rs_interrupt_single; | ||
705 | |||
706 | retval = request_irq(state->irq, handler, IRQ_T(info), "simserial", NULL); | ||
707 | if (retval) { | ||
708 | if (capable(CAP_SYS_ADMIN)) { | ||
709 | if (info->tty) | ||
710 | set_bit(TTY_IO_ERROR, | ||
711 | &info->tty->flags); | ||
712 | retval = 0; | ||
713 | } | ||
714 | goto errout; | 403 | goto errout; |
715 | } | ||
716 | } | 404 | } |
717 | 405 | ||
718 | /* | 406 | state->xmit.head = state->xmit.tail = 0; |
719 | * Insert serial port into IRQ chain. | ||
720 | */ | ||
721 | info->prev_port = NULL; | ||
722 | info->next_port = IRQ_ports[state->irq]; | ||
723 | if (info->next_port) | ||
724 | info->next_port->prev_port = info; | ||
725 | IRQ_ports[state->irq] = info; | ||
726 | |||
727 | if (info->tty) clear_bit(TTY_IO_ERROR, &info->tty->flags); | ||
728 | |||
729 | info->xmit.head = info->xmit.tail = 0; | ||
730 | |||
731 | #if 0 | ||
732 | /* | ||
733 | * Set up serial timers... | ||
734 | */ | ||
735 | timer_table[RS_TIMER].expires = jiffies + 2*HZ/100; | ||
736 | timer_active |= 1 << RS_TIMER; | ||
737 | #endif | ||
738 | 407 | ||
739 | /* | 408 | /* |
740 | * Set up the tty->alt_speed kludge | 409 | * Set up the tty->alt_speed kludge |
741 | */ | 410 | */ |
742 | if (info->tty) { | 411 | if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) |
743 | if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) | 412 | tty->alt_speed = 57600; |
744 | info->tty->alt_speed = 57600; | 413 | if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) |
745 | if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) | 414 | tty->alt_speed = 115200; |
746 | info->tty->alt_speed = 115200; | 415 | if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI) |
747 | if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI) | 416 | tty->alt_speed = 230400; |
748 | info->tty->alt_speed = 230400; | 417 | if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) |
749 | if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) | 418 | tty->alt_speed = 460800; |
750 | info->tty->alt_speed = 460800; | ||
751 | } | ||
752 | |||
753 | info->flags |= ASYNC_INITIALIZED; | ||
754 | local_irq_restore(flags); | ||
755 | return 0; | ||
756 | 419 | ||
757 | errout: | 420 | errout: |
758 | local_irq_restore(flags); | 421 | local_irq_restore(flags); |
@@ -768,56 +431,11 @@ errout: | |||
768 | */ | 431 | */ |
769 | static int rs_open(struct tty_struct *tty, struct file * filp) | 432 | static int rs_open(struct tty_struct *tty, struct file * filp) |
770 | { | 433 | { |
771 | struct async_struct *info; | 434 | struct serial_state *info = rs_table + tty->index; |
772 | int retval, line; | 435 | struct tty_port *port = &info->port; |
773 | unsigned long page; | ||
774 | 436 | ||
775 | line = tty->index; | ||
776 | if ((line < 0) || (line >= NR_PORTS)) | ||
777 | return -ENODEV; | ||
778 | retval = get_async_struct(line, &info); | ||
779 | if (retval) | ||
780 | return retval; | ||
781 | tty->driver_data = info; | 437 | tty->driver_data = info; |
782 | info->tty = tty; | 438 | tty->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0; |
783 | |||
784 | #ifdef SIMSERIAL_DEBUG | ||
785 | printk("rs_open %s, count = %d\n", tty->name, info->state->count); | ||
786 | #endif | ||
787 | info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0; | ||
788 | |||
789 | if (!tmp_buf) { | ||
790 | page = get_zeroed_page(GFP_KERNEL); | ||
791 | if (!page) | ||
792 | return -ENOMEM; | ||
793 | if (tmp_buf) | ||
794 | free_page(page); | ||
795 | else | ||
796 | tmp_buf = (unsigned char *) page; | ||
797 | } | ||
798 | |||
799 | /* | ||
800 | * If the port is the middle of closing, bail out now | ||
801 | */ | ||
802 | if (tty_hung_up_p(filp) || | ||
803 | (info->flags & ASYNC_CLOSING)) { | ||
804 | if (info->flags & ASYNC_CLOSING) | ||
805 | interruptible_sleep_on(&info->close_wait); | ||
806 | #ifdef SERIAL_DO_RESTART | ||
807 | return ((info->flags & ASYNC_HUP_NOTIFY) ? | ||
808 | -EAGAIN : -ERESTARTSYS); | ||
809 | #else | ||
810 | return -EAGAIN; | ||
811 | #endif | ||
812 | } | ||
813 | |||
814 | /* | ||
815 | * Start up serial port | ||
816 | */ | ||
817 | retval = startup(info); | ||
818 | if (retval) { | ||
819 | return retval; | ||
820 | } | ||
821 | 439 | ||
822 | /* | 440 | /* |
823 | * figure out which console to use (should be one already) | 441 | * figure out which console to use (should be one already) |
@@ -828,30 +446,21 @@ static int rs_open(struct tty_struct *tty, struct file * filp) | |||
828 | console = console->next; | 446 | console = console->next; |
829 | } | 447 | } |
830 | 448 | ||
831 | #ifdef SIMSERIAL_DEBUG | 449 | return tty_port_open(port, tty, filp); |
832 | printk("rs_open ttys%d successful\n", info->line); | ||
833 | #endif | ||
834 | return 0; | ||
835 | } | 450 | } |
836 | 451 | ||
837 | /* | 452 | /* |
838 | * /proc fs routines.... | 453 | * /proc fs routines.... |
839 | */ | 454 | */ |
840 | 455 | ||
841 | static inline void line_info(struct seq_file *m, struct serial_state *state) | ||
842 | { | ||
843 | seq_printf(m, "%d: uart:%s port:%lX irq:%d\n", | ||
844 | state->line, uart_config[state->type].name, | ||
845 | state->port, state->irq); | ||
846 | } | ||
847 | |||
848 | static int rs_proc_show(struct seq_file *m, void *v) | 456 | static int rs_proc_show(struct seq_file *m, void *v) |
849 | { | 457 | { |
850 | int i; | 458 | int i; |
851 | 459 | ||
852 | seq_printf(m, "simserinfo:1.0 driver:%s\n", serial_version); | 460 | seq_printf(m, "simserinfo:1.0\n"); |
853 | for (i = 0; i < NR_PORTS; i++) | 461 | for (i = 0; i < NR_PORTS; i++) |
854 | line_info(m, &rs_table[i]); | 462 | seq_printf(m, "%d: uart:16550 port:3F8 irq:%d\n", |
463 | i, rs_table[i].irq); | ||
855 | return 0; | 464 | return 0; |
856 | } | 465 | } |
857 | 466 | ||
@@ -868,25 +477,6 @@ static const struct file_operations rs_proc_fops = { | |||
868 | .release = single_release, | 477 | .release = single_release, |
869 | }; | 478 | }; |
870 | 479 | ||
871 | /* | ||
872 | * --------------------------------------------------------------------- | ||
873 | * rs_init() and friends | ||
874 | * | ||
875 | * rs_init() is called at boot-time to initialize the serial driver. | ||
876 | * --------------------------------------------------------------------- | ||
877 | */ | ||
878 | |||
879 | /* | ||
880 | * This routine prints out the appropriate serial driver version | ||
881 | * number, and identifies which options were configured into this | ||
882 | * driver. | ||
883 | */ | ||
884 | static inline void show_serial_version(void) | ||
885 | { | ||
886 | printk(KERN_INFO "%s version %s with", serial_name, serial_version); | ||
887 | printk(KERN_INFO " no serial options enabled\n"); | ||
888 | } | ||
889 | |||
890 | static const struct tty_operations hp_ops = { | 480 | static const struct tty_operations hp_ops = { |
891 | .open = rs_open, | 481 | .open = rs_open, |
892 | .close = rs_close, | 482 | .close = rs_close, |
@@ -901,34 +491,31 @@ static const struct tty_operations hp_ops = { | |||
901 | .unthrottle = rs_unthrottle, | 491 | .unthrottle = rs_unthrottle, |
902 | .send_xchar = rs_send_xchar, | 492 | .send_xchar = rs_send_xchar, |
903 | .set_termios = rs_set_termios, | 493 | .set_termios = rs_set_termios, |
904 | .stop = rs_stop, | ||
905 | .start = rs_start, | ||
906 | .hangup = rs_hangup, | 494 | .hangup = rs_hangup, |
907 | .wait_until_sent = rs_wait_until_sent, | ||
908 | .proc_fops = &rs_proc_fops, | 495 | .proc_fops = &rs_proc_fops, |
909 | }; | 496 | }; |
910 | 497 | ||
911 | /* | 498 | static const struct tty_port_operations hp_port_ops = { |
912 | * The serial driver boot-time initialization code! | 499 | .activate = activate, |
913 | */ | 500 | .shutdown = shutdown, |
914 | static int __init | 501 | }; |
915 | simrs_init (void) | 502 | |
503 | static int __init simrs_init(void) | ||
916 | { | 504 | { |
917 | int i, rc; | 505 | struct serial_state *state; |
918 | struct serial_state *state; | 506 | int retval; |
919 | 507 | ||
920 | if (!ia64_platform_is("hpsim")) | 508 | if (!ia64_platform_is("hpsim")) |
921 | return -ENODEV; | 509 | return -ENODEV; |
922 | 510 | ||
923 | hp_simserial_driver = alloc_tty_driver(1); | 511 | hp_simserial_driver = alloc_tty_driver(NR_PORTS); |
924 | if (!hp_simserial_driver) | 512 | if (!hp_simserial_driver) |
925 | return -ENOMEM; | 513 | return -ENOMEM; |
926 | 514 | ||
927 | show_serial_version(); | 515 | printk(KERN_INFO "SimSerial driver with no serial options enabled\n"); |
928 | 516 | ||
929 | /* Initialize the tty_driver structure */ | 517 | /* Initialize the tty_driver structure */ |
930 | 518 | ||
931 | hp_simserial_driver->owner = THIS_MODULE; | ||
932 | hp_simserial_driver->driver_name = "simserial"; | 519 | hp_simserial_driver->driver_name = "simserial"; |
933 | hp_simserial_driver->name = "ttyS"; | 520 | hp_simserial_driver->name = "ttyS"; |
934 | hp_simserial_driver->major = TTY_MAJOR; | 521 | hp_simserial_driver->major = TTY_MAJOR; |
@@ -941,31 +528,33 @@ simrs_init (void) | |||
941 | hp_simserial_driver->flags = TTY_DRIVER_REAL_RAW; | 528 | hp_simserial_driver->flags = TTY_DRIVER_REAL_RAW; |
942 | tty_set_operations(hp_simserial_driver, &hp_ops); | 529 | tty_set_operations(hp_simserial_driver, &hp_ops); |
943 | 530 | ||
944 | /* | 531 | state = rs_table; |
945 | * Let's have a little bit of fun ! | 532 | tty_port_init(&state->port); |
946 | */ | 533 | state->port.ops = &hp_port_ops; |
947 | for (i = 0, state = rs_table; i < NR_PORTS; i++,state++) { | 534 | state->port.close_delay = 0; /* XXX really 0? */ |
948 | 535 | ||
949 | if (state->type == PORT_UNKNOWN) continue; | 536 | retval = hpsim_get_irq(KEYBOARD_INTR); |
537 | if (retval < 0) { | ||
538 | printk(KERN_ERR "%s: out of interrupt vectors!\n", | ||
539 | __func__); | ||
540 | goto err_free_tty; | ||
541 | } | ||
950 | 542 | ||
951 | if (!state->irq) { | 543 | state->irq = retval; |
952 | if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0) | ||
953 | panic("%s: out of interrupt vectors!\n", | ||
954 | __func__); | ||
955 | state->irq = rc; | ||
956 | ia64_ssc_connect_irq(KEYBOARD_INTR, state->irq); | ||
957 | } | ||
958 | 544 | ||
959 | printk(KERN_INFO "ttyS%d at 0x%04lx (irq = %d) is a %s\n", | 545 | /* the port is imaginary */ |
960 | state->line, | 546 | printk(KERN_INFO "ttyS0 at 0x03f8 (irq = %d) is a 16550\n", state->irq); |
961 | state->port, state->irq, | ||
962 | uart_config[state->type].name); | ||
963 | } | ||
964 | 547 | ||
965 | if (tty_register_driver(hp_simserial_driver)) | 548 | retval = tty_register_driver(hp_simserial_driver); |
966 | panic("Couldn't register simserial driver\n"); | 549 | if (retval) { |
550 | printk(KERN_ERR "Couldn't register simserial driver\n"); | ||
551 | goto err_free_tty; | ||
552 | } | ||
967 | 553 | ||
968 | return 0; | 554 | return 0; |
555 | err_free_tty: | ||
556 | put_tty_driver(hp_simserial_driver); | ||
557 | return retval; | ||
969 | } | 558 | } |
970 | 559 | ||
971 | #ifndef MODULE | 560 | #ifndef MODULE |
diff --git a/arch/ia64/include/asm/hpsim.h b/arch/ia64/include/asm/hpsim.h index 892ab198a9da..0fe50225daa4 100644 --- a/arch/ia64/include/asm/hpsim.h +++ b/arch/ia64/include/asm/hpsim.h | |||
@@ -10,7 +10,7 @@ int simcons_register(void); | |||
10 | struct tty_driver; | 10 | struct tty_driver; |
11 | extern struct tty_driver *hp_simserial_driver; | 11 | extern struct tty_driver *hp_simserial_driver; |
12 | 12 | ||
13 | void ia64_ssc_connect_irq(long intr, long irq); | 13 | extern int hpsim_get_irq(int intr); |
14 | void ia64_ctl_trace(long on); | 14 | void ia64_ctl_trace(long on); |
15 | 15 | ||
16 | #endif | 16 | #endif |
diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h index 32551d304cd7..b149b88ea795 100644 --- a/arch/ia64/include/asm/paravirt.h +++ b/arch/ia64/include/asm/paravirt.h | |||
@@ -281,9 +281,9 @@ paravirt_init_missing_ticks_accounting(int cpu) | |||
281 | pv_time_ops.init_missing_ticks_accounting(cpu); | 281 | pv_time_ops.init_missing_ticks_accounting(cpu); |
282 | } | 282 | } |
283 | 283 | ||
284 | struct jump_label_key; | 284 | struct static_key; |
285 | extern struct jump_label_key paravirt_steal_enabled; | 285 | extern struct static_key paravirt_steal_enabled; |
286 | extern struct jump_label_key paravirt_steal_rq_enabled; | 286 | extern struct static_key paravirt_steal_rq_enabled; |
287 | 287 | ||
288 | static inline int | 288 | static inline int |
289 | paravirt_do_steal_accounting(unsigned long *new_itm) | 289 | paravirt_do_steal_accounting(unsigned long *new_itm) |
diff --git a/arch/ia64/include/asm/socket.h b/arch/ia64/include/asm/socket.h index 4b03664e3fb5..41fc28a4a18a 100644 --- a/arch/ia64/include/asm/socket.h +++ b/arch/ia64/include/asm/socket.h | |||
@@ -73,5 +73,9 @@ | |||
73 | 73 | ||
74 | #define SO_WIFI_STATUS 41 | 74 | #define SO_WIFI_STATUS 41 |
75 | #define SCM_WIFI_STATUS SO_WIFI_STATUS | 75 | #define SCM_WIFI_STATUS SO_WIFI_STATUS |
76 | #define SO_PEEK_OFF 42 | ||
77 | |||
78 | /* Instruct lower device to use last 4-bytes of skb data as FCS */ | ||
79 | #define SO_NOFCS 43 | ||
76 | 80 | ||
77 | #endif /* _ASM_IA64_SOCKET_H */ | 81 | #endif /* _ASM_IA64_SOCKET_H */ |
diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c index 100868216c55..1b22f6de2932 100644 --- a/arch/ia64/kernel/paravirt.c +++ b/arch/ia64/kernel/paravirt.c | |||
@@ -634,8 +634,8 @@ struct pv_irq_ops pv_irq_ops = { | |||
634 | * pv_time_ops | 634 | * pv_time_ops |
635 | * time operations | 635 | * time operations |
636 | */ | 636 | */ |
637 | struct jump_label_key paravirt_steal_enabled; | 637 | struct static_key paravirt_steal_enabled; |
638 | struct jump_label_key paravirt_steal_rq_enabled; | 638 | struct static_key paravirt_steal_rq_enabled; |
639 | 639 | ||
640 | static int | 640 | static int |
641 | ia64_native_do_steal_accounting(unsigned long *new_itm) | 641 | ia64_native_do_steal_accounting(unsigned long *new_itm) |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 6d33c5cc94f0..9dc52b63fc87 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -330,9 +330,7 @@ cpu_idle (void) | |||
330 | normal_xtp(); | 330 | normal_xtp(); |
331 | #endif | 331 | #endif |
332 | } | 332 | } |
333 | preempt_enable_no_resched(); | 333 | schedule_preempt_disabled(); |
334 | schedule(); | ||
335 | preempt_disable(); | ||
336 | check_pgt_cache(); | 334 | check_pgt_cache(); |
337 | if (cpu_is_offline(cpu)) | 335 | if (cpu_is_offline(cpu)) |
338 | play_dead(); | 336 | play_dead(); |
diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c index b279e142c633..3bb12230721f 100644 --- a/arch/ia64/xen/irq_xen.c +++ b/arch/ia64/xen/irq_xen.c | |||
@@ -58,7 +58,7 @@ xen_free_irq_vector(int vector) | |||
58 | 58 | ||
59 | irq_op.vector = vector; | 59 | irq_op.vector = vector; |
60 | if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op)) | 60 | if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op)) |
61 | printk(KERN_WARNING "%s: xen_free_irq_vecotr fail vector=%d\n", | 61 | printk(KERN_WARNING "%s: xen_free_irq_vector fail vector=%d\n", |
62 | __func__, vector); | 62 | __func__, vector); |
63 | } | 63 | } |
64 | 64 | ||
diff --git a/arch/m32r/include/asm/socket.h b/arch/m32r/include/asm/socket.h index e8b8c5bb053c..a15f40b52783 100644 --- a/arch/m32r/include/asm/socket.h +++ b/arch/m32r/include/asm/socket.h | |||
@@ -64,5 +64,9 @@ | |||
64 | 64 | ||
65 | #define SO_WIFI_STATUS 41 | 65 | #define SO_WIFI_STATUS 41 |
66 | #define SCM_WIFI_STATUS SO_WIFI_STATUS | 66 | #define SCM_WIFI_STATUS SO_WIFI_STATUS |
67 | #define SO_PEEK_OFF 42 | ||
68 | |||
69 | /* Instruct lower device to use last 4-bytes of skb data as FCS */ | ||
70 | #define SO_NOFCS 43 | ||
67 | 71 | ||
68 | #endif /* _ASM_M32R_SOCKET_H */ | 72 | #endif /* _ASM_M32R_SOCKET_H */ |
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c index 422bea9f1dbc..3a4a32b27208 100644 --- a/arch/m32r/kernel/process.c +++ b/arch/m32r/kernel/process.c | |||
@@ -90,9 +90,7 @@ void cpu_idle (void) | |||
90 | 90 | ||
91 | idle(); | 91 | idle(); |
92 | } | 92 | } |
93 | preempt_enable_no_resched(); | 93 | schedule_preempt_disabled(); |
94 | schedule(); | ||
95 | preempt_disable(); | ||
96 | } | 94 | } |
97 | } | 95 | } |
98 | 96 | ||
diff --git a/arch/m68k/emu/nfcon.c b/arch/m68k/emu/nfcon.c index ab20dc0ff63b..8db25e806947 100644 --- a/arch/m68k/emu/nfcon.c +++ b/arch/m68k/emu/nfcon.c | |||
@@ -127,7 +127,6 @@ static int __init nfcon_init(void) | |||
127 | if (!nfcon_tty_driver) | 127 | if (!nfcon_tty_driver) |
128 | return -ENOMEM; | 128 | return -ENOMEM; |
129 | 129 | ||
130 | nfcon_tty_driver->owner = THIS_MODULE; | ||
131 | nfcon_tty_driver->driver_name = "nfcon"; | 130 | nfcon_tty_driver->driver_name = "nfcon"; |
132 | nfcon_tty_driver->name = "nfcon"; | 131 | nfcon_tty_driver->name = "nfcon"; |
133 | nfcon_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM; | 132 | nfcon_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM; |
diff --git a/arch/m68k/include/asm/socket.h b/arch/m68k/include/asm/socket.h index d4708ce466e0..d1be684edf97 100644 --- a/arch/m68k/include/asm/socket.h +++ b/arch/m68k/include/asm/socket.h | |||
@@ -64,5 +64,9 @@ | |||
64 | 64 | ||
65 | #define SO_WIFI_STATUS 41 | 65 | #define SO_WIFI_STATUS 41 |
66 | #define SCM_WIFI_STATUS SO_WIFI_STATUS | 66 | #define SCM_WIFI_STATUS SO_WIFI_STATUS |
67 | #define SO_PEEK_OFF 42 | ||
68 | |||
69 | /* Instruct lower device to use last 4-bytes of skb data as FCS */ | ||
70 | #define SO_NOFCS 43 | ||
67 | 71 | ||
68 | #endif /* _ASM_SOCKET_H */ | 72 | #endif /* _ASM_SOCKET_H */ |
diff --git a/arch/m68k/kernel/process_mm.c b/arch/m68k/kernel/process_mm.c index 099283ee1a8f..fe4186b5fc32 100644 --- a/arch/m68k/kernel/process_mm.c +++ b/arch/m68k/kernel/process_mm.c | |||
@@ -78,9 +78,7 @@ void cpu_idle(void) | |||
78 | while (1) { | 78 | while (1) { |
79 | while (!need_resched()) | 79 | while (!need_resched()) |
80 | idle(); | 80 | idle(); |
81 | preempt_enable_no_resched(); | 81 | schedule_preempt_disabled(); |
82 | schedule(); | ||
83 | preempt_disable(); | ||
84 | } | 82 | } |
85 | } | 83 | } |
86 | 84 | ||
diff --git a/arch/m68k/kernel/process_no.c b/arch/m68k/kernel/process_no.c index 5e1078cabe0e..f7fe6c348595 100644 --- a/arch/m68k/kernel/process_no.c +++ b/arch/m68k/kernel/process_no.c | |||
@@ -73,9 +73,7 @@ void cpu_idle(void) | |||
73 | /* endless idle loop with no priority at all */ | 73 | /* endless idle loop with no priority at all */ |
74 | while (1) { | 74 | while (1) { |
75 | idle(); | 75 | idle(); |
76 | preempt_enable_no_resched(); | 76 | schedule_preempt_disabled(); |
77 | schedule(); | ||
78 | preempt_disable(); | ||
79 | } | 77 | } |
80 | } | 78 | } |
81 | 79 | ||
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index 7dcb5bfffb75..9155f7d92669 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c | |||
@@ -110,9 +110,7 @@ void cpu_idle(void) | |||
110 | rcu_idle_exit(); | 110 | rcu_idle_exit(); |
111 | tick_nohz_idle_exit(); | 111 | tick_nohz_idle_exit(); |
112 | 112 | ||
113 | preempt_enable_no_resched(); | 113 | schedule_preempt_disabled(); |
114 | schedule(); | ||
115 | preempt_disable(); | ||
116 | check_pgt_cache(); | 114 | check_pgt_cache(); |
117 | } | 115 | } |
118 | } | 116 | } |
diff --git a/arch/mips/ath79/dev-usb.c b/arch/mips/ath79/dev-usb.c index 002d6d2afe04..36e9570e7bc4 100644 --- a/arch/mips/ath79/dev-usb.c +++ b/arch/mips/ath79/dev-usb.c | |||
@@ -17,6 +17,8 @@ | |||
17 | #include <linux/irq.h> | 17 | #include <linux/irq.h> |
18 | #include <linux/dma-mapping.h> | 18 | #include <linux/dma-mapping.h> |
19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/usb/ehci_pdriver.h> | ||
21 | #include <linux/usb/ohci_pdriver.h> | ||
20 | 22 | ||
21 | #include <asm/mach-ath79/ath79.h> | 23 | #include <asm/mach-ath79/ath79.h> |
22 | #include <asm/mach-ath79/ar71xx_regs.h> | 24 | #include <asm/mach-ath79/ar71xx_regs.h> |
@@ -36,14 +38,19 @@ static struct resource ath79_ohci_resources[] = { | |||
36 | }; | 38 | }; |
37 | 39 | ||
38 | static u64 ath79_ohci_dmamask = DMA_BIT_MASK(32); | 40 | static u64 ath79_ohci_dmamask = DMA_BIT_MASK(32); |
41 | |||
42 | static struct usb_ohci_pdata ath79_ohci_pdata = { | ||
43 | }; | ||
44 | |||
39 | static struct platform_device ath79_ohci_device = { | 45 | static struct platform_device ath79_ohci_device = { |
40 | .name = "ath79-ohci", | 46 | .name = "ohci-platform", |
41 | .id = -1, | 47 | .id = -1, |
42 | .resource = ath79_ohci_resources, | 48 | .resource = ath79_ohci_resources, |
43 | .num_resources = ARRAY_SIZE(ath79_ohci_resources), | 49 | .num_resources = ARRAY_SIZE(ath79_ohci_resources), |
44 | .dev = { | 50 | .dev = { |
45 | .dma_mask = &ath79_ohci_dmamask, | 51 | .dma_mask = &ath79_ohci_dmamask, |
46 | .coherent_dma_mask = DMA_BIT_MASK(32), | 52 | .coherent_dma_mask = DMA_BIT_MASK(32), |
53 | .platform_data = &ath79_ohci_pdata, | ||
47 | }, | 54 | }, |
48 | }; | 55 | }; |
49 | 56 | ||
@@ -60,8 +67,20 @@ static struct resource ath79_ehci_resources[] = { | |||
60 | }; | 67 | }; |
61 | 68 | ||
62 | static u64 ath79_ehci_dmamask = DMA_BIT_MASK(32); | 69 | static u64 ath79_ehci_dmamask = DMA_BIT_MASK(32); |
70 | |||
71 | static struct usb_ehci_pdata ath79_ehci_pdata_v1 = { | ||
72 | .has_synopsys_hc_bug = 1, | ||
73 | .port_power_off = 1, | ||
74 | }; | ||
75 | |||
76 | static struct usb_ehci_pdata ath79_ehci_pdata_v2 = { | ||
77 | .caps_offset = 0x100, | ||
78 | .has_tt = 1, | ||
79 | .port_power_off = 1, | ||
80 | }; | ||
81 | |||
63 | static struct platform_device ath79_ehci_device = { | 82 | static struct platform_device ath79_ehci_device = { |
64 | .name = "ath79-ehci", | 83 | .name = "ehci-platform", |
65 | .id = -1, | 84 | .id = -1, |
66 | .resource = ath79_ehci_resources, | 85 | .resource = ath79_ehci_resources, |
67 | .num_resources = ARRAY_SIZE(ath79_ehci_resources), | 86 | .num_resources = ARRAY_SIZE(ath79_ehci_resources), |
@@ -101,7 +120,7 @@ static void __init ath79_usb_setup(void) | |||
101 | 120 | ||
102 | ath79_ehci_resources[0].start = AR71XX_EHCI_BASE; | 121 | ath79_ehci_resources[0].start = AR71XX_EHCI_BASE; |
103 | ath79_ehci_resources[0].end = AR71XX_EHCI_BASE + AR71XX_EHCI_SIZE - 1; | 122 | ath79_ehci_resources[0].end = AR71XX_EHCI_BASE + AR71XX_EHCI_SIZE - 1; |
104 | ath79_ehci_device.name = "ar71xx-ehci"; | 123 | ath79_ehci_device.dev.platform_data = &ath79_ehci_pdata_v1; |
105 | platform_device_register(&ath79_ehci_device); | 124 | platform_device_register(&ath79_ehci_device); |
106 | } | 125 | } |
107 | 126 | ||
@@ -142,7 +161,7 @@ static void __init ar724x_usb_setup(void) | |||
142 | 161 | ||
143 | ath79_ehci_resources[0].start = AR724X_EHCI_BASE; | 162 | ath79_ehci_resources[0].start = AR724X_EHCI_BASE; |
144 | ath79_ehci_resources[0].end = AR724X_EHCI_BASE + AR724X_EHCI_SIZE - 1; | 163 | ath79_ehci_resources[0].end = AR724X_EHCI_BASE + AR724X_EHCI_SIZE - 1; |
145 | ath79_ehci_device.name = "ar724x-ehci"; | 164 | ath79_ehci_device.dev.platform_data = &ath79_ehci_pdata_v2; |
146 | platform_device_register(&ath79_ehci_device); | 165 | platform_device_register(&ath79_ehci_device); |
147 | } | 166 | } |
148 | 167 | ||
@@ -159,7 +178,7 @@ static void __init ar913x_usb_setup(void) | |||
159 | 178 | ||
160 | ath79_ehci_resources[0].start = AR913X_EHCI_BASE; | 179 | ath79_ehci_resources[0].start = AR913X_EHCI_BASE; |
161 | ath79_ehci_resources[0].end = AR913X_EHCI_BASE + AR913X_EHCI_SIZE - 1; | 180 | ath79_ehci_resources[0].end = AR913X_EHCI_BASE + AR913X_EHCI_SIZE - 1; |
162 | ath79_ehci_device.name = "ar913x-ehci"; | 181 | ath79_ehci_device.dev.platform_data = &ath79_ehci_pdata_v2; |
163 | platform_device_register(&ath79_ehci_device); | 182 | platform_device_register(&ath79_ehci_device); |
164 | } | 183 | } |
165 | 184 | ||
@@ -176,7 +195,7 @@ static void __init ar933x_usb_setup(void) | |||
176 | 195 | ||
177 | ath79_ehci_resources[0].start = AR933X_EHCI_BASE; | 196 | ath79_ehci_resources[0].start = AR933X_EHCI_BASE; |
178 | ath79_ehci_resources[0].end = AR933X_EHCI_BASE + AR933X_EHCI_SIZE - 1; | 197 | ath79_ehci_resources[0].end = AR933X_EHCI_BASE + AR933X_EHCI_SIZE - 1; |
179 | ath79_ehci_device.name = "ar933x-ehci"; | 198 | ath79_ehci_device.dev.platform_data = &ath79_ehci_pdata_v2; |
180 | platform_device_register(&ath79_ehci_device); | 199 | platform_device_register(&ath79_ehci_device); |
181 | } | 200 | } |
182 | 201 | ||
diff --git a/arch/mips/bcm47xx/Makefile b/arch/mips/bcm47xx/Makefile index 4add17349ff9..4389de182eb4 100644 --- a/arch/mips/bcm47xx/Makefile +++ b/arch/mips/bcm47xx/Makefile | |||
@@ -3,5 +3,5 @@ | |||
3 | # under Linux. | 3 | # under Linux. |
4 | # | 4 | # |
5 | 5 | ||
6 | obj-y += gpio.o irq.o nvram.o prom.o serial.o setup.o time.o | 6 | obj-y += gpio.o irq.o nvram.o prom.o serial.o setup.o time.o sprom.o |
7 | obj-$(CONFIG_BCM47XX_SSB) += wgt634u.o | 7 | obj-$(CONFIG_BCM47XX_SSB) += wgt634u.o |
diff --git a/arch/mips/bcm47xx/nvram.c b/arch/mips/bcm47xx/nvram.c index a84e3bb7387f..d43ceff5be47 100644 --- a/arch/mips/bcm47xx/nvram.c +++ b/arch/mips/bcm47xx/nvram.c | |||
@@ -107,8 +107,7 @@ int nvram_getenv(char *name, char *val, size_t val_len) | |||
107 | value = eq + 1; | 107 | value = eq + 1; |
108 | if ((eq - var) == strlen(name) && | 108 | if ((eq - var) == strlen(name) && |
109 | strncmp(var, name, (eq - var)) == 0) { | 109 | strncmp(var, name, (eq - var)) == 0) { |
110 | snprintf(val, val_len, "%s", value); | 110 | return snprintf(val, val_len, "%s", value); |
111 | return 0; | ||
112 | } | 111 | } |
113 | } | 112 | } |
114 | return NVRAM_ERR_ENVNOTFOUND; | 113 | return NVRAM_ERR_ENVNOTFOUND; |
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index aab6b0c40a75..19780aa91708 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org> | 3 | * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org> |
4 | * Copyright (C) 2006 Michael Buesch <m@bues.ch> | 4 | * Copyright (C) 2006 Michael Buesch <m@bues.ch> |
5 | * Copyright (C) 2010 Waldemar Brodkorb <wbx@openadk.org> | 5 | * Copyright (C) 2010 Waldemar Brodkorb <wbx@openadk.org> |
6 | * Copyright (C) 2010-2011 Hauke Mehrtens <hauke@hauke-m.de> | 6 | * Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify it | 8 | * This program is free software; you can redistribute it and/or modify it |
9 | * under the terms of the GNU General Public License as published by the | 9 | * under the terms of the GNU General Public License as published by the |
@@ -85,156 +85,7 @@ static void bcm47xx_machine_halt(void) | |||
85 | } | 85 | } |
86 | 86 | ||
87 | #ifdef CONFIG_BCM47XX_SSB | 87 | #ifdef CONFIG_BCM47XX_SSB |
88 | #define READ_FROM_NVRAM(_outvar, name, buf) \ | 88 | static int bcm47xx_get_sprom_ssb(struct ssb_bus *bus, struct ssb_sprom *out) |
89 | if (nvram_getprefix(prefix, name, buf, sizeof(buf)) >= 0)\ | ||
90 | sprom->_outvar = simple_strtoul(buf, NULL, 0); | ||
91 | |||
92 | #define READ_FROM_NVRAM2(_outvar, name1, name2, buf) \ | ||
93 | if (nvram_getprefix(prefix, name1, buf, sizeof(buf)) >= 0 || \ | ||
94 | nvram_getprefix(prefix, name2, buf, sizeof(buf)) >= 0)\ | ||
95 | sprom->_outvar = simple_strtoul(buf, NULL, 0); | ||
96 | |||
97 | static inline int nvram_getprefix(const char *prefix, char *name, | ||
98 | char *buf, int len) | ||
99 | { | ||
100 | if (prefix) { | ||
101 | char key[100]; | ||
102 | |||
103 | snprintf(key, sizeof(key), "%s%s", prefix, name); | ||
104 | return nvram_getenv(key, buf, len); | ||
105 | } | ||
106 | |||
107 | return nvram_getenv(name, buf, len); | ||
108 | } | ||
109 | |||
110 | static u32 nvram_getu32(const char *name, char *buf, int len) | ||
111 | { | ||
112 | int rv; | ||
113 | char key[100]; | ||
114 | u16 var0, var1; | ||
115 | |||
116 | snprintf(key, sizeof(key), "%s0", name); | ||
117 | rv = nvram_getenv(key, buf, len); | ||
118 | /* return 0 here so this looks like unset */ | ||
119 | if (rv < 0) | ||
120 | return 0; | ||
121 | var0 = simple_strtoul(buf, NULL, 0); | ||
122 | |||
123 | snprintf(key, sizeof(key), "%s1", name); | ||
124 | rv = nvram_getenv(key, buf, len); | ||
125 | if (rv < 0) | ||
126 | return 0; | ||
127 | var1 = simple_strtoul(buf, NULL, 0); | ||
128 | return var1 << 16 | var0; | ||
129 | } | ||
130 | |||
131 | static void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix) | ||
132 | { | ||
133 | char buf[100]; | ||
134 | u32 boardflags; | ||
135 | |||
136 | memset(sprom, 0, sizeof(struct ssb_sprom)); | ||
137 | |||
138 | sprom->revision = 1; /* Fallback: Old hardware does not define this. */ | ||
139 | READ_FROM_NVRAM(revision, "sromrev", buf); | ||
140 | if (nvram_getprefix(prefix, "il0macaddr", buf, sizeof(buf)) >= 0 || | ||
141 | nvram_getprefix(prefix, "macaddr", buf, sizeof(buf)) >= 0) | ||
142 | nvram_parse_macaddr(buf, sprom->il0mac); | ||
143 | if (nvram_getprefix(prefix, "et0macaddr", buf, sizeof(buf)) >= 0) | ||
144 | nvram_parse_macaddr(buf, sprom->et0mac); | ||
145 | if (nvram_getprefix(prefix, "et1macaddr", buf, sizeof(buf)) >= 0) | ||
146 | nvram_parse_macaddr(buf, sprom->et1mac); | ||
147 | READ_FROM_NVRAM(et0phyaddr, "et0phyaddr", buf); | ||
148 | READ_FROM_NVRAM(et1phyaddr, "et1phyaddr", buf); | ||
149 | READ_FROM_NVRAM(et0mdcport, "et0mdcport", buf); | ||
150 | READ_FROM_NVRAM(et1mdcport, "et1mdcport", buf); | ||
151 | READ_FROM_NVRAM(board_rev, "boardrev", buf); | ||
152 | READ_FROM_NVRAM(country_code, "ccode", buf); | ||
153 | READ_FROM_NVRAM(ant_available_a, "aa5g", buf); | ||
154 | READ_FROM_NVRAM(ant_available_bg, "aa2g", buf); | ||
155 | READ_FROM_NVRAM(pa0b0, "pa0b0", buf); | ||
156 | READ_FROM_NVRAM(pa0b1, "pa0b1", buf); | ||
157 | READ_FROM_NVRAM(pa0b2, "pa0b2", buf); | ||
158 | READ_FROM_NVRAM(pa1b0, "pa1b0", buf); | ||
159 | READ_FROM_NVRAM(pa1b1, "pa1b1", buf); | ||
160 | READ_FROM_NVRAM(pa1b2, "pa1b2", buf); | ||
161 | READ_FROM_NVRAM(pa1lob0, "pa1lob0", buf); | ||
162 | READ_FROM_NVRAM(pa1lob2, "pa1lob1", buf); | ||
163 | READ_FROM_NVRAM(pa1lob1, "pa1lob2", buf); | ||
164 | READ_FROM_NVRAM(pa1hib0, "pa1hib0", buf); | ||
165 | READ_FROM_NVRAM(pa1hib2, "pa1hib1", buf); | ||
166 | READ_FROM_NVRAM(pa1hib1, "pa1hib2", buf); | ||
167 | READ_FROM_NVRAM2(gpio0, "ledbh0", "wl0gpio0", buf); | ||
168 | READ_FROM_NVRAM2(gpio1, "ledbh1", "wl0gpio1", buf); | ||
169 | READ_FROM_NVRAM2(gpio2, "ledbh2", "wl0gpio2", buf); | ||
170 | READ_FROM_NVRAM2(gpio3, "ledbh3", "wl0gpio3", buf); | ||
171 | READ_FROM_NVRAM2(maxpwr_bg, "maxp2ga0", "pa0maxpwr", buf); | ||
172 | READ_FROM_NVRAM2(maxpwr_al, "maxp5gla0", "pa1lomaxpwr", buf); | ||
173 | READ_FROM_NVRAM2(maxpwr_a, "maxp5ga0", "pa1maxpwr", buf); | ||
174 | READ_FROM_NVRAM2(maxpwr_ah, "maxp5gha0", "pa1himaxpwr", buf); | ||
175 | READ_FROM_NVRAM2(itssi_bg, "itt5ga0", "pa0itssit", buf); | ||
176 | READ_FROM_NVRAM2(itssi_a, "itt2ga0", "pa1itssit", buf); | ||
177 | READ_FROM_NVRAM(tri2g, "tri2g", buf); | ||
178 | READ_FROM_NVRAM(tri5gl, "tri5gl", buf); | ||
179 | READ_FROM_NVRAM(tri5g, "tri5g", buf); | ||
180 | READ_FROM_NVRAM(tri5gh, "tri5gh", buf); | ||
181 | READ_FROM_NVRAM(txpid2g[0], "txpid2ga0", buf); | ||
182 | READ_FROM_NVRAM(txpid2g[1], "txpid2ga1", buf); | ||
183 | READ_FROM_NVRAM(txpid2g[2], "txpid2ga2", buf); | ||
184 | READ_FROM_NVRAM(txpid2g[3], "txpid2ga3", buf); | ||
185 | READ_FROM_NVRAM(txpid5g[0], "txpid5ga0", buf); | ||
186 | READ_FROM_NVRAM(txpid5g[1], "txpid5ga1", buf); | ||
187 | READ_FROM_NVRAM(txpid5g[2], "txpid5ga2", buf); | ||
188 | READ_FROM_NVRAM(txpid5g[3], "txpid5ga3", buf); | ||
189 | READ_FROM_NVRAM(txpid5gl[0], "txpid5gla0", buf); | ||
190 | READ_FROM_NVRAM(txpid5gl[1], "txpid5gla1", buf); | ||
191 | READ_FROM_NVRAM(txpid5gl[2], "txpid5gla2", buf); | ||
192 | READ_FROM_NVRAM(txpid5gl[3], "txpid5gla3", buf); | ||
193 | READ_FROM_NVRAM(txpid5gh[0], "txpid5gha0", buf); | ||
194 | READ_FROM_NVRAM(txpid5gh[1], "txpid5gha1", buf); | ||
195 | READ_FROM_NVRAM(txpid5gh[2], "txpid5gha2", buf); | ||
196 | READ_FROM_NVRAM(txpid5gh[3], "txpid5gha3", buf); | ||
197 | READ_FROM_NVRAM(rxpo2g, "rxpo2g", buf); | ||
198 | READ_FROM_NVRAM(rxpo5g, "rxpo5g", buf); | ||
199 | READ_FROM_NVRAM(rssisav2g, "rssisav2g", buf); | ||
200 | READ_FROM_NVRAM(rssismc2g, "rssismc2g", buf); | ||
201 | READ_FROM_NVRAM(rssismf2g, "rssismf2g", buf); | ||
202 | READ_FROM_NVRAM(bxa2g, "bxa2g", buf); | ||
203 | READ_FROM_NVRAM(rssisav5g, "rssisav5g", buf); | ||
204 | READ_FROM_NVRAM(rssismc5g, "rssismc5g", buf); | ||
205 | READ_FROM_NVRAM(rssismf5g, "rssismf5g", buf); | ||
206 | READ_FROM_NVRAM(bxa5g, "bxa5g", buf); | ||
207 | READ_FROM_NVRAM(cck2gpo, "cck2gpo", buf); | ||
208 | |||
209 | sprom->ofdm2gpo = nvram_getu32("ofdm2gpo", buf, sizeof(buf)); | ||
210 | sprom->ofdm5glpo = nvram_getu32("ofdm5glpo", buf, sizeof(buf)); | ||
211 | sprom->ofdm5gpo = nvram_getu32("ofdm5gpo", buf, sizeof(buf)); | ||
212 | sprom->ofdm5ghpo = nvram_getu32("ofdm5ghpo", buf, sizeof(buf)); | ||
213 | |||
214 | READ_FROM_NVRAM(antenna_gain.ghz24.a0, "ag0", buf); | ||
215 | READ_FROM_NVRAM(antenna_gain.ghz24.a1, "ag1", buf); | ||
216 | READ_FROM_NVRAM(antenna_gain.ghz24.a2, "ag2", buf); | ||
217 | READ_FROM_NVRAM(antenna_gain.ghz24.a3, "ag3", buf); | ||
218 | memcpy(&sprom->antenna_gain.ghz5, &sprom->antenna_gain.ghz24, | ||
219 | sizeof(sprom->antenna_gain.ghz5)); | ||
220 | |||
221 | if (nvram_getprefix(prefix, "boardflags", buf, sizeof(buf)) >= 0) { | ||
222 | boardflags = simple_strtoul(buf, NULL, 0); | ||
223 | if (boardflags) { | ||
224 | sprom->boardflags_lo = (boardflags & 0x0000FFFFU); | ||
225 | sprom->boardflags_hi = (boardflags & 0xFFFF0000U) >> 16; | ||
226 | } | ||
227 | } | ||
228 | if (nvram_getprefix(prefix, "boardflags2", buf, sizeof(buf)) >= 0) { | ||
229 | boardflags = simple_strtoul(buf, NULL, 0); | ||
230 | if (boardflags) { | ||
231 | sprom->boardflags2_lo = (boardflags & 0x0000FFFFU); | ||
232 | sprom->boardflags2_hi = (boardflags & 0xFFFF0000U) >> 16; | ||
233 | } | ||
234 | } | ||
235 | } | ||
236 | |||
237 | int bcm47xx_get_sprom(struct ssb_bus *bus, struct ssb_sprom *out) | ||
238 | { | 89 | { |
239 | char prefix[10]; | 90 | char prefix[10]; |
240 | 91 | ||
@@ -251,7 +102,7 @@ int bcm47xx_get_sprom(struct ssb_bus *bus, struct ssb_sprom *out) | |||
251 | } | 102 | } |
252 | 103 | ||
253 | static int bcm47xx_get_invariants(struct ssb_bus *bus, | 104 | static int bcm47xx_get_invariants(struct ssb_bus *bus, |
254 | struct ssb_init_invariants *iv) | 105 | struct ssb_init_invariants *iv) |
255 | { | 106 | { |
256 | char buf[20]; | 107 | char buf[20]; |
257 | 108 | ||
@@ -281,7 +132,7 @@ static void __init bcm47xx_register_ssb(void) | |||
281 | char buf[100]; | 132 | char buf[100]; |
282 | struct ssb_mipscore *mcore; | 133 | struct ssb_mipscore *mcore; |
283 | 134 | ||
284 | err = ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom); | 135 | err = ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb); |
285 | if (err) | 136 | if (err) |
286 | printk(KERN_WARNING "bcm47xx: someone else already registered" | 137 | printk(KERN_WARNING "bcm47xx: someone else already registered" |
287 | " a ssb SPROM callback handler (err %d)\n", err); | 138 | " a ssb SPROM callback handler (err %d)\n", err); |
@@ -308,10 +159,41 @@ static void __init bcm47xx_register_ssb(void) | |||
308 | #endif | 159 | #endif |
309 | 160 | ||
310 | #ifdef CONFIG_BCM47XX_BCMA | 161 | #ifdef CONFIG_BCM47XX_BCMA |
162 | static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out) | ||
163 | { | ||
164 | char prefix[10]; | ||
165 | struct bcma_device *core; | ||
166 | |||
167 | switch (bus->hosttype) { | ||
168 | case BCMA_HOSTTYPE_PCI: | ||
169 | snprintf(prefix, sizeof(prefix), "pci/%u/%u/", | ||
170 | bus->host_pci->bus->number + 1, | ||
171 | PCI_SLOT(bus->host_pci->devfn)); | ||
172 | bcm47xx_fill_sprom(out, prefix); | ||
173 | return 0; | ||
174 | case BCMA_HOSTTYPE_SOC: | ||
175 | bcm47xx_fill_sprom_ethernet(out, NULL); | ||
176 | core = bcma_find_core(bus, BCMA_CORE_80211); | ||
177 | if (core) { | ||
178 | snprintf(prefix, sizeof(prefix), "sb/%u/", | ||
179 | core->core_index); | ||
180 | bcm47xx_fill_sprom(out, prefix); | ||
181 | } | ||
182 | return 0; | ||
183 | default: | ||
184 | pr_warn("bcm47xx: unable to fill SPROM for given bustype.\n"); | ||
185 | return -EINVAL; | ||
186 | } | ||
187 | } | ||
188 | |||
311 | static void __init bcm47xx_register_bcma(void) | 189 | static void __init bcm47xx_register_bcma(void) |
312 | { | 190 | { |
313 | int err; | 191 | int err; |
314 | 192 | ||
193 | err = bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma); | ||
194 | if (err) | ||
195 | pr_warn("bcm47xx: someone else already registered a bcma SPROM callback handler (err %d)\n", err); | ||
196 | |||
315 | err = bcma_host_soc_register(&bcm47xx_bus.bcma); | 197 | err = bcma_host_soc_register(&bcm47xx_bus.bcma); |
316 | if (err) | 198 | if (err) |
317 | panic("Failed to initialize BCMA bus (err %d)", err); | 199 | panic("Failed to initialize BCMA bus (err %d)", err); |
diff --git a/arch/mips/bcm47xx/sprom.c b/arch/mips/bcm47xx/sprom.c new file mode 100644 index 000000000000..5c8dcd2a8a93 --- /dev/null +++ b/arch/mips/bcm47xx/sprom.c | |||
@@ -0,0 +1,620 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004 Florian Schirmer <jolt@tuxbox.org> | ||
3 | * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org> | ||
4 | * Copyright (C) 2006 Michael Buesch <m@bues.ch> | ||
5 | * Copyright (C) 2010 Waldemar Brodkorb <wbx@openadk.org> | ||
6 | * Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | * | ||
13 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED | ||
14 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
15 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
16 | * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
17 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
18 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
19 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
20 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
21 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
22 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License along | ||
25 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
26 | * 675 Mass Ave, Cambridge, MA 02139, USA. | ||
27 | */ | ||
28 | |||
29 | #include <bcm47xx.h> | ||
30 | #include <nvram.h> | ||
31 | |||
32 | static void create_key(const char *prefix, const char *postfix, | ||
33 | const char *name, char *buf, int len) | ||
34 | { | ||
35 | if (prefix && postfix) | ||
36 | snprintf(buf, len, "%s%s%s", prefix, name, postfix); | ||
37 | else if (prefix) | ||
38 | snprintf(buf, len, "%s%s", prefix, name); | ||
39 | else if (postfix) | ||
40 | snprintf(buf, len, "%s%s", name, postfix); | ||
41 | else | ||
42 | snprintf(buf, len, "%s", name); | ||
43 | } | ||
44 | |||
45 | #define NVRAM_READ_VAL(type) \ | ||
46 | static void nvram_read_ ## type (const char *prefix, \ | ||
47 | const char *postfix, const char *name, \ | ||
48 | type *val, type allset) \ | ||
49 | { \ | ||
50 | char buf[100]; \ | ||
51 | char key[40]; \ | ||
52 | int err; \ | ||
53 | type var; \ | ||
54 | \ | ||
55 | create_key(prefix, postfix, name, key, sizeof(key)); \ | ||
56 | \ | ||
57 | err = nvram_getenv(key, buf, sizeof(buf)); \ | ||
58 | if (err < 0) \ | ||
59 | return; \ | ||
60 | err = kstrto ## type (buf, 0, &var); \ | ||
61 | if (err) { \ | ||
62 | pr_warn("can not parse nvram name %s with value %s" \ | ||
63 | " got %i", key, buf, err); \ | ||
64 | return; \ | ||
65 | } \ | ||
66 | if (allset && var == allset) \ | ||
67 | return; \ | ||
68 | *val = var; \ | ||
69 | } | ||
70 | |||
71 | NVRAM_READ_VAL(u8) | ||
72 | NVRAM_READ_VAL(s8) | ||
73 | NVRAM_READ_VAL(u16) | ||
74 | NVRAM_READ_VAL(u32) | ||
75 | |||
76 | #undef NVRAM_READ_VAL | ||
77 | |||
78 | static void nvram_read_u32_2(const char *prefix, const char *name, | ||
79 | u16 *val_lo, u16 *val_hi) | ||
80 | { | ||
81 | char buf[100]; | ||
82 | char key[40]; | ||
83 | int err; | ||
84 | u32 val; | ||
85 | |||
86 | create_key(prefix, NULL, name, key, sizeof(key)); | ||
87 | |||
88 | err = nvram_getenv(key, buf, sizeof(buf)); | ||
89 | if (err < 0) | ||
90 | return; | ||
91 | err = kstrtou32(buf, 0, &val); | ||
92 | if (err) { | ||
93 | pr_warn("can not parse nvram name %s with value %s got %i", | ||
94 | key, buf, err); | ||
95 | return; | ||
96 | } | ||
97 | *val_lo = (val & 0x0000FFFFU); | ||
98 | *val_hi = (val & 0xFFFF0000U) >> 16; | ||
99 | } | ||
100 | |||
101 | static void nvram_read_leddc(const char *prefix, const char *name, | ||
102 | u8 *leddc_on_time, u8 *leddc_off_time) | ||
103 | { | ||
104 | char buf[100]; | ||
105 | char key[40]; | ||
106 | int err; | ||
107 | u32 val; | ||
108 | |||
109 | create_key(prefix, NULL, name, key, sizeof(key)); | ||
110 | |||
111 | err = nvram_getenv(key, buf, sizeof(buf)); | ||
112 | if (err < 0) | ||
113 | return; | ||
114 | err = kstrtou32(buf, 0, &val); | ||
115 | if (err) { | ||
116 | pr_warn("can not parse nvram name %s with value %s got %i", | ||
117 | key, buf, err); | ||
118 | return; | ||
119 | } | ||
120 | |||
121 | if (val == 0xffff || val == 0xffffffff) | ||
122 | return; | ||
123 | |||
124 | *leddc_on_time = val & 0xff; | ||
125 | *leddc_off_time = (val >> 16) & 0xff; | ||
126 | } | ||
127 | |||
128 | static void nvram_read_macaddr(const char *prefix, const char *name, | ||
129 | u8 (*val)[6]) | ||
130 | { | ||
131 | char buf[100]; | ||
132 | char key[40]; | ||
133 | int err; | ||
134 | |||
135 | create_key(prefix, NULL, name, key, sizeof(key)); | ||
136 | |||
137 | err = nvram_getenv(key, buf, sizeof(buf)); | ||
138 | if (err < 0) | ||
139 | return; | ||
140 | nvram_parse_macaddr(buf, *val); | ||
141 | } | ||
142 | |||
143 | static void nvram_read_alpha2(const char *prefix, const char *name, | ||
144 | char (*val)[2]) | ||
145 | { | ||
146 | char buf[10]; | ||
147 | char key[40]; | ||
148 | int err; | ||
149 | |||
150 | create_key(prefix, NULL, name, key, sizeof(key)); | ||
151 | |||
152 | err = nvram_getenv(key, buf, sizeof(buf)); | ||
153 | if (err < 0) | ||
154 | return; | ||
155 | if (buf[0] == '0') | ||
156 | return; | ||
157 | if (strlen(buf) > 2) { | ||
158 | pr_warn("alpha2 is too long %s", buf); | ||
159 | return; | ||
160 | } | ||
161 | memcpy(val, buf, sizeof(val)); | ||
162 | } | ||
163 | |||
164 | static void bcm47xx_fill_sprom_r1234589(struct ssb_sprom *sprom, | ||
165 | const char *prefix) | ||
166 | { | ||
167 | nvram_read_u16(prefix, NULL, "boardrev", &sprom->board_rev, 0); | ||
168 | nvram_read_u16(prefix, NULL, "boardnum", &sprom->board_num, 0); | ||
169 | nvram_read_u8(prefix, NULL, "ledbh0", &sprom->gpio0, 0xff); | ||
170 | nvram_read_u8(prefix, NULL, "ledbh1", &sprom->gpio1, 0xff); | ||
171 | nvram_read_u8(prefix, NULL, "ledbh2", &sprom->gpio2, 0xff); | ||
172 | nvram_read_u8(prefix, NULL, "ledbh3", &sprom->gpio3, 0xff); | ||
173 | nvram_read_u8(prefix, NULL, "aa2g", &sprom->ant_available_bg, 0); | ||
174 | nvram_read_u8(prefix, NULL, "aa5g", &sprom->ant_available_a, 0); | ||
175 | nvram_read_s8(prefix, NULL, "ag0", &sprom->antenna_gain.a0, 0); | ||
176 | nvram_read_s8(prefix, NULL, "ag1", &sprom->antenna_gain.a1, 0); | ||
177 | nvram_read_alpha2(prefix, "ccode", &sprom->alpha2); | ||
178 | } | ||
179 | |||
180 | static void bcm47xx_fill_sprom_r12389(struct ssb_sprom *sprom, | ||
181 | const char *prefix) | ||
182 | { | ||
183 | nvram_read_u16(prefix, NULL, "pa0b0", &sprom->pa0b0, 0); | ||
184 | nvram_read_u16(prefix, NULL, "pa0b1", &sprom->pa0b1, 0); | ||
185 | nvram_read_u16(prefix, NULL, "pa0b2", &sprom->pa0b2, 0); | ||
186 | nvram_read_u8(prefix, NULL, "pa0itssit", &sprom->itssi_bg, 0); | ||
187 | nvram_read_u8(prefix, NULL, "pa0maxpwr", &sprom->maxpwr_bg, 0); | ||
188 | nvram_read_u16(prefix, NULL, "pa1b0", &sprom->pa1b0, 0); | ||
189 | nvram_read_u16(prefix, NULL, "pa1b1", &sprom->pa1b1, 0); | ||
190 | nvram_read_u16(prefix, NULL, "pa1b2", &sprom->pa1b2, 0); | ||
191 | nvram_read_u8(prefix, NULL, "pa1itssit", &sprom->itssi_a, 0); | ||
192 | nvram_read_u8(prefix, NULL, "pa1maxpwr", &sprom->maxpwr_a, 0); | ||
193 | } | ||
194 | |||
195 | static void bcm47xx_fill_sprom_r1(struct ssb_sprom *sprom, const char *prefix) | ||
196 | { | ||
197 | nvram_read_u16(prefix, NULL, "boardflags", &sprom->boardflags_lo, 0); | ||
198 | nvram_read_u8(prefix, NULL, "cc", &sprom->country_code, 0); | ||
199 | } | ||
200 | |||
201 | static void bcm47xx_fill_sprom_r2389(struct ssb_sprom *sprom, | ||
202 | const char *prefix) | ||
203 | { | ||
204 | nvram_read_u8(prefix, NULL, "opo", &sprom->opo, 0); | ||
205 | nvram_read_u16(prefix, NULL, "pa1lob0", &sprom->pa1lob0, 0); | ||
206 | nvram_read_u16(prefix, NULL, "pa1lob1", &sprom->pa1lob1, 0); | ||
207 | nvram_read_u16(prefix, NULL, "pa1lob2", &sprom->pa1lob2, 0); | ||
208 | nvram_read_u16(prefix, NULL, "pa1hib0", &sprom->pa1hib0, 0); | ||
209 | nvram_read_u16(prefix, NULL, "pa1hib1", &sprom->pa1hib1, 0); | ||
210 | nvram_read_u16(prefix, NULL, "pa1hib2", &sprom->pa1hib2, 0); | ||
211 | nvram_read_u8(prefix, NULL, "pa1lomaxpwr", &sprom->maxpwr_al, 0); | ||
212 | nvram_read_u8(prefix, NULL, "pa1himaxpwr", &sprom->maxpwr_ah, 0); | ||
213 | } | ||
214 | |||
215 | static void bcm47xx_fill_sprom_r2(struct ssb_sprom *sprom, const char *prefix) | ||
216 | { | ||
217 | nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo, | ||
218 | &sprom->boardflags_hi); | ||
219 | nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0); | ||
220 | } | ||
221 | |||
222 | static void bcm47xx_fill_sprom_r389(struct ssb_sprom *sprom, const char *prefix) | ||
223 | { | ||
224 | nvram_read_u8(prefix, NULL, "bxa2g", &sprom->bxa2g, 0); | ||
225 | nvram_read_u8(prefix, NULL, "rssisav2g", &sprom->rssisav2g, 0); | ||
226 | nvram_read_u8(prefix, NULL, "rssismc2g", &sprom->rssismc2g, 0); | ||
227 | nvram_read_u8(prefix, NULL, "rssismf2g", &sprom->rssismf2g, 0); | ||
228 | nvram_read_u8(prefix, NULL, "bxa5g", &sprom->bxa5g, 0); | ||
229 | nvram_read_u8(prefix, NULL, "rssisav5g", &sprom->rssisav5g, 0); | ||
230 | nvram_read_u8(prefix, NULL, "rssismc5g", &sprom->rssismc5g, 0); | ||
231 | nvram_read_u8(prefix, NULL, "rssismf5g", &sprom->rssismf5g, 0); | ||
232 | nvram_read_u8(prefix, NULL, "tri2g", &sprom->tri2g, 0); | ||
233 | nvram_read_u8(prefix, NULL, "tri5g", &sprom->tri5g, 0); | ||
234 | nvram_read_u8(prefix, NULL, "tri5gl", &sprom->tri5gl, 0); | ||
235 | nvram_read_u8(prefix, NULL, "tri5gh", &sprom->tri5gh, 0); | ||
236 | nvram_read_s8(prefix, NULL, "rxpo2g", &sprom->rxpo2g, 0); | ||
237 | nvram_read_s8(prefix, NULL, "rxpo5g", &sprom->rxpo5g, 0); | ||
238 | } | ||
239 | |||
240 | static void bcm47xx_fill_sprom_r3(struct ssb_sprom *sprom, const char *prefix) | ||
241 | { | ||
242 | nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo, | ||
243 | &sprom->boardflags_hi); | ||
244 | nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0); | ||
245 | nvram_read_u8(prefix, NULL, "regrev", &sprom->regrev, 0); | ||
246 | nvram_read_leddc(prefix, "leddc", &sprom->leddc_on_time, | ||
247 | &sprom->leddc_off_time); | ||
248 | } | ||
249 | |||
250 | static void bcm47xx_fill_sprom_r4589(struct ssb_sprom *sprom, | ||
251 | const char *prefix) | ||
252 | { | ||
253 | nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo, | ||
254 | &sprom->boardflags_hi); | ||
255 | nvram_read_u32_2(prefix, "boardflags2", &sprom->boardflags2_lo, | ||
256 | &sprom->boardflags2_hi); | ||
257 | nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0); | ||
258 | nvram_read_u8(prefix, NULL, "regrev", &sprom->regrev, 0); | ||
259 | nvram_read_s8(prefix, NULL, "ag2", &sprom->antenna_gain.a2, 0); | ||
260 | nvram_read_s8(prefix, NULL, "ag3", &sprom->antenna_gain.a3, 0); | ||
261 | nvram_read_u8(prefix, NULL, "txchain", &sprom->txchain, 0xf); | ||
262 | nvram_read_u8(prefix, NULL, "rxchain", &sprom->rxchain, 0xf); | ||
263 | nvram_read_u8(prefix, NULL, "antswitch", &sprom->antswitch, 0xff); | ||
264 | nvram_read_leddc(prefix, "leddc", &sprom->leddc_on_time, | ||
265 | &sprom->leddc_off_time); | ||
266 | } | ||
267 | |||
268 | static void bcm47xx_fill_sprom_r458(struct ssb_sprom *sprom, const char *prefix) | ||
269 | { | ||
270 | nvram_read_u16(prefix, NULL, "cck2gpo", &sprom->cck2gpo, 0); | ||
271 | nvram_read_u32(prefix, NULL, "ofdm2gpo", &sprom->ofdm2gpo, 0); | ||
272 | nvram_read_u32(prefix, NULL, "ofdm5gpo", &sprom->ofdm5gpo, 0); | ||
273 | nvram_read_u32(prefix, NULL, "ofdm5glpo", &sprom->ofdm5glpo, 0); | ||
274 | nvram_read_u32(prefix, NULL, "ofdm5ghpo", &sprom->ofdm5ghpo, 0); | ||
275 | nvram_read_u16(prefix, NULL, "cddpo", &sprom->cddpo, 0); | ||
276 | nvram_read_u16(prefix, NULL, "stbcpo", &sprom->stbcpo, 0); | ||
277 | nvram_read_u16(prefix, NULL, "bw40po", &sprom->bw40po, 0); | ||
278 | nvram_read_u16(prefix, NULL, "bwduppo", &sprom->bwduppo, 0); | ||
279 | nvram_read_u16(prefix, NULL, "mcs2gpo0", &sprom->mcs2gpo[0], 0); | ||
280 | nvram_read_u16(prefix, NULL, "mcs2gpo1", &sprom->mcs2gpo[1], 0); | ||
281 | nvram_read_u16(prefix, NULL, "mcs2gpo2", &sprom->mcs2gpo[2], 0); | ||
282 | nvram_read_u16(prefix, NULL, "mcs2gpo3", &sprom->mcs2gpo[3], 0); | ||
283 | nvram_read_u16(prefix, NULL, "mcs2gpo4", &sprom->mcs2gpo[4], 0); | ||
284 | nvram_read_u16(prefix, NULL, "mcs2gpo5", &sprom->mcs2gpo[5], 0); | ||
285 | nvram_read_u16(prefix, NULL, "mcs2gpo6", &sprom->mcs2gpo[6], 0); | ||
286 | nvram_read_u16(prefix, NULL, "mcs2gpo7", &sprom->mcs2gpo[7], 0); | ||
287 | nvram_read_u16(prefix, NULL, "mcs5gpo0", &sprom->mcs5gpo[0], 0); | ||
288 | nvram_read_u16(prefix, NULL, "mcs5gpo1", &sprom->mcs5gpo[1], 0); | ||
289 | nvram_read_u16(prefix, NULL, "mcs5gpo2", &sprom->mcs5gpo[2], 0); | ||
290 | nvram_read_u16(prefix, NULL, "mcs5gpo3", &sprom->mcs5gpo[3], 0); | ||
291 | nvram_read_u16(prefix, NULL, "mcs5gpo4", &sprom->mcs5gpo[4], 0); | ||
292 | nvram_read_u16(prefix, NULL, "mcs5gpo5", &sprom->mcs5gpo[5], 0); | ||
293 | nvram_read_u16(prefix, NULL, "mcs5gpo6", &sprom->mcs5gpo[6], 0); | ||
294 | nvram_read_u16(prefix, NULL, "mcs5gpo7", &sprom->mcs5gpo[7], 0); | ||
295 | nvram_read_u16(prefix, NULL, "mcs5glpo0", &sprom->mcs5glpo[0], 0); | ||
296 | nvram_read_u16(prefix, NULL, "mcs5glpo1", &sprom->mcs5glpo[1], 0); | ||
297 | nvram_read_u16(prefix, NULL, "mcs5glpo2", &sprom->mcs5glpo[2], 0); | ||
298 | nvram_read_u16(prefix, NULL, "mcs5glpo3", &sprom->mcs5glpo[3], 0); | ||
299 | nvram_read_u16(prefix, NULL, "mcs5glpo4", &sprom->mcs5glpo[4], 0); | ||
300 | nvram_read_u16(prefix, NULL, "mcs5glpo5", &sprom->mcs5glpo[5], 0); | ||
301 | nvram_read_u16(prefix, NULL, "mcs5glpo6", &sprom->mcs5glpo[6], 0); | ||
302 | nvram_read_u16(prefix, NULL, "mcs5glpo7", &sprom->mcs5glpo[7], 0); | ||
303 | nvram_read_u16(prefix, NULL, "mcs5ghpo0", &sprom->mcs5ghpo[0], 0); | ||
304 | nvram_read_u16(prefix, NULL, "mcs5ghpo1", &sprom->mcs5ghpo[1], 0); | ||
305 | nvram_read_u16(prefix, NULL, "mcs5ghpo2", &sprom->mcs5ghpo[2], 0); | ||
306 | nvram_read_u16(prefix, NULL, "mcs5ghpo3", &sprom->mcs5ghpo[3], 0); | ||
307 | nvram_read_u16(prefix, NULL, "mcs5ghpo4", &sprom->mcs5ghpo[4], 0); | ||
308 | nvram_read_u16(prefix, NULL, "mcs5ghpo5", &sprom->mcs5ghpo[5], 0); | ||
309 | nvram_read_u16(prefix, NULL, "mcs5ghpo6", &sprom->mcs5ghpo[6], 0); | ||
310 | nvram_read_u16(prefix, NULL, "mcs5ghpo7", &sprom->mcs5ghpo[7], 0); | ||
311 | } | ||
312 | |||
313 | static void bcm47xx_fill_sprom_r45(struct ssb_sprom *sprom, const char *prefix) | ||
314 | { | ||
315 | nvram_read_u8(prefix, NULL, "txpid2ga0", &sprom->txpid2g[0], 0); | ||
316 | nvram_read_u8(prefix, NULL, "txpid2ga1", &sprom->txpid2g[1], 0); | ||
317 | nvram_read_u8(prefix, NULL, "txpid2ga2", &sprom->txpid2g[2], 0); | ||
318 | nvram_read_u8(prefix, NULL, "txpid2ga3", &sprom->txpid2g[3], 0); | ||
319 | nvram_read_u8(prefix, NULL, "txpid5ga0", &sprom->txpid5g[0], 0); | ||
320 | nvram_read_u8(prefix, NULL, "txpid5ga1", &sprom->txpid5g[1], 0); | ||
321 | nvram_read_u8(prefix, NULL, "txpid5ga2", &sprom->txpid5g[2], 0); | ||
322 | nvram_read_u8(prefix, NULL, "txpid5ga3", &sprom->txpid5g[3], 0); | ||
323 | nvram_read_u8(prefix, NULL, "txpid5gla0", &sprom->txpid5gl[0], 0); | ||
324 | nvram_read_u8(prefix, NULL, "txpid5gla1", &sprom->txpid5gl[1], 0); | ||
325 | nvram_read_u8(prefix, NULL, "txpid5gla2", &sprom->txpid5gl[2], 0); | ||
326 | nvram_read_u8(prefix, NULL, "txpid5gla3", &sprom->txpid5gl[3], 0); | ||
327 | nvram_read_u8(prefix, NULL, "txpid5gha0", &sprom->txpid5gh[0], 0); | ||
328 | nvram_read_u8(prefix, NULL, "txpid5gha1", &sprom->txpid5gh[1], 0); | ||
329 | nvram_read_u8(prefix, NULL, "txpid5gha2", &sprom->txpid5gh[2], 0); | ||
330 | nvram_read_u8(prefix, NULL, "txpid5gha3", &sprom->txpid5gh[3], 0); | ||
331 | } | ||
332 | |||
333 | static void bcm47xx_fill_sprom_r89(struct ssb_sprom *sprom, const char *prefix) | ||
334 | { | ||
335 | nvram_read_u8(prefix, NULL, "tssipos2g", &sprom->fem.ghz2.tssipos, 0); | ||
336 | nvram_read_u8(prefix, NULL, "extpagain2g", | ||
337 | &sprom->fem.ghz2.extpa_gain, 0); | ||
338 | nvram_read_u8(prefix, NULL, "pdetrange2g", | ||
339 | &sprom->fem.ghz2.pdet_range, 0); | ||
340 | nvram_read_u8(prefix, NULL, "triso2g", &sprom->fem.ghz2.tr_iso, 0); | ||
341 | nvram_read_u8(prefix, NULL, "antswctl2g", &sprom->fem.ghz2.antswlut, 0); | ||
342 | nvram_read_u8(prefix, NULL, "tssipos5g", &sprom->fem.ghz5.tssipos, 0); | ||
343 | nvram_read_u8(prefix, NULL, "extpagain5g", | ||
344 | &sprom->fem.ghz5.extpa_gain, 0); | ||
345 | nvram_read_u8(prefix, NULL, "pdetrange5g", | ||
346 | &sprom->fem.ghz5.pdet_range, 0); | ||
347 | nvram_read_u8(prefix, NULL, "triso5g", &sprom->fem.ghz5.tr_iso, 0); | ||
348 | nvram_read_u8(prefix, NULL, "antswctl5g", &sprom->fem.ghz5.antswlut, 0); | ||
349 | nvram_read_u8(prefix, NULL, "tempthresh", &sprom->tempthresh, 0); | ||
350 | nvram_read_u8(prefix, NULL, "tempoffset", &sprom->tempoffset, 0); | ||
351 | nvram_read_u16(prefix, NULL, "rawtempsense", &sprom->rawtempsense, 0); | ||
352 | nvram_read_u8(prefix, NULL, "measpower", &sprom->measpower, 0); | ||
353 | nvram_read_u8(prefix, NULL, "tempsense_slope", | ||
354 | &sprom->tempsense_slope, 0); | ||
355 | nvram_read_u8(prefix, NULL, "tempcorrx", &sprom->tempcorrx, 0); | ||
356 | nvram_read_u8(prefix, NULL, "tempsense_option", | ||
357 | &sprom->tempsense_option, 0); | ||
358 | nvram_read_u8(prefix, NULL, "freqoffset_corr", | ||
359 | &sprom->freqoffset_corr, 0); | ||
360 | nvram_read_u8(prefix, NULL, "iqcal_swp_dis", &sprom->iqcal_swp_dis, 0); | ||
361 | nvram_read_u8(prefix, NULL, "hw_iqcal_en", &sprom->hw_iqcal_en, 0); | ||
362 | nvram_read_u8(prefix, NULL, "elna2g", &sprom->elna2g, 0); | ||
363 | nvram_read_u8(prefix, NULL, "elna5g", &sprom->elna5g, 0); | ||
364 | nvram_read_u8(prefix, NULL, "phycal_tempdelta", | ||
365 | &sprom->phycal_tempdelta, 0); | ||
366 | nvram_read_u8(prefix, NULL, "temps_period", &sprom->temps_period, 0); | ||
367 | nvram_read_u8(prefix, NULL, "temps_hysteresis", | ||
368 | &sprom->temps_hysteresis, 0); | ||
369 | nvram_read_u8(prefix, NULL, "measpower1", &sprom->measpower1, 0); | ||
370 | nvram_read_u8(prefix, NULL, "measpower2", &sprom->measpower2, 0); | ||
371 | nvram_read_u8(prefix, NULL, "rxgainerr2ga0", | ||
372 | &sprom->rxgainerr2ga[0], 0); | ||
373 | nvram_read_u8(prefix, NULL, "rxgainerr2ga1", | ||
374 | &sprom->rxgainerr2ga[1], 0); | ||
375 | nvram_read_u8(prefix, NULL, "rxgainerr2ga2", | ||
376 | &sprom->rxgainerr2ga[2], 0); | ||
377 | nvram_read_u8(prefix, NULL, "rxgainerr5gla0", | ||
378 | &sprom->rxgainerr5gla[0], 0); | ||
379 | nvram_read_u8(prefix, NULL, "rxgainerr5gla1", | ||
380 | &sprom->rxgainerr5gla[1], 0); | ||
381 | nvram_read_u8(prefix, NULL, "rxgainerr5gla2", | ||
382 | &sprom->rxgainerr5gla[2], 0); | ||
383 | nvram_read_u8(prefix, NULL, "rxgainerr5gma0", | ||
384 | &sprom->rxgainerr5gma[0], 0); | ||
385 | nvram_read_u8(prefix, NULL, "rxgainerr5gma1", | ||
386 | &sprom->rxgainerr5gma[1], 0); | ||
387 | nvram_read_u8(prefix, NULL, "rxgainerr5gma2", | ||
388 | &sprom->rxgainerr5gma[2], 0); | ||
389 | nvram_read_u8(prefix, NULL, "rxgainerr5gha0", | ||
390 | &sprom->rxgainerr5gha[0], 0); | ||
391 | nvram_read_u8(prefix, NULL, "rxgainerr5gha1", | ||
392 | &sprom->rxgainerr5gha[1], 0); | ||
393 | nvram_read_u8(prefix, NULL, "rxgainerr5gha2", | ||
394 | &sprom->rxgainerr5gha[2], 0); | ||
395 | nvram_read_u8(prefix, NULL, "rxgainerr5gua0", | ||
396 | &sprom->rxgainerr5gua[0], 0); | ||
397 | nvram_read_u8(prefix, NULL, "rxgainerr5gua1", | ||
398 | &sprom->rxgainerr5gua[1], 0); | ||
399 | nvram_read_u8(prefix, NULL, "rxgainerr5gua2", | ||
400 | &sprom->rxgainerr5gua[2], 0); | ||
401 | nvram_read_u8(prefix, NULL, "noiselvl2ga0", &sprom->noiselvl2ga[0], 0); | ||
402 | nvram_read_u8(prefix, NULL, "noiselvl2ga1", &sprom->noiselvl2ga[1], 0); | ||
403 | nvram_read_u8(prefix, NULL, "noiselvl2ga2", &sprom->noiselvl2ga[2], 0); | ||
404 | nvram_read_u8(prefix, NULL, "noiselvl5gla0", | ||
405 | &sprom->noiselvl5gla[0], 0); | ||
406 | nvram_read_u8(prefix, NULL, "noiselvl5gla1", | ||
407 | &sprom->noiselvl5gla[1], 0); | ||
408 | nvram_read_u8(prefix, NULL, "noiselvl5gla2", | ||
409 | &sprom->noiselvl5gla[2], 0); | ||
410 | nvram_read_u8(prefix, NULL, "noiselvl5gma0", | ||
411 | &sprom->noiselvl5gma[0], 0); | ||
412 | nvram_read_u8(prefix, NULL, "noiselvl5gma1", | ||
413 | &sprom->noiselvl5gma[1], 0); | ||
414 | nvram_read_u8(prefix, NULL, "noiselvl5gma2", | ||
415 | &sprom->noiselvl5gma[2], 0); | ||
416 | nvram_read_u8(prefix, NULL, "noiselvl5gha0", | ||
417 | &sprom->noiselvl5gha[0], 0); | ||
418 | nvram_read_u8(prefix, NULL, "noiselvl5gha1", | ||
419 | &sprom->noiselvl5gha[1], 0); | ||
420 | nvram_read_u8(prefix, NULL, "noiselvl5gha2", | ||
421 | &sprom->noiselvl5gha[2], 0); | ||
422 | nvram_read_u8(prefix, NULL, "noiselvl5gua0", | ||
423 | &sprom->noiselvl5gua[0], 0); | ||
424 | nvram_read_u8(prefix, NULL, "noiselvl5gua1", | ||
425 | &sprom->noiselvl5gua[1], 0); | ||
426 | nvram_read_u8(prefix, NULL, "noiselvl5gua2", | ||
427 | &sprom->noiselvl5gua[2], 0); | ||
428 | nvram_read_u8(prefix, NULL, "pcieingress_war", | ||
429 | &sprom->pcieingress_war, 0); | ||
430 | } | ||
431 | |||
432 | static void bcm47xx_fill_sprom_r9(struct ssb_sprom *sprom, const char *prefix) | ||
433 | { | ||
434 | nvram_read_u16(prefix, NULL, "cckbw202gpo", &sprom->cckbw202gpo, 0); | ||
435 | nvram_read_u16(prefix, NULL, "cckbw20ul2gpo", &sprom->cckbw20ul2gpo, 0); | ||
436 | nvram_read_u32(prefix, NULL, "legofdmbw202gpo", | ||
437 | &sprom->legofdmbw202gpo, 0); | ||
438 | nvram_read_u32(prefix, NULL, "legofdmbw20ul2gpo", | ||
439 | &sprom->legofdmbw20ul2gpo, 0); | ||
440 | nvram_read_u32(prefix, NULL, "legofdmbw205glpo", | ||
441 | &sprom->legofdmbw205glpo, 0); | ||
442 | nvram_read_u32(prefix, NULL, "legofdmbw20ul5glpo", | ||
443 | &sprom->legofdmbw20ul5glpo, 0); | ||
444 | nvram_read_u32(prefix, NULL, "legofdmbw205gmpo", | ||
445 | &sprom->legofdmbw205gmpo, 0); | ||
446 | nvram_read_u32(prefix, NULL, "legofdmbw20ul5gmpo", | ||
447 | &sprom->legofdmbw20ul5gmpo, 0); | ||
448 | nvram_read_u32(prefix, NULL, "legofdmbw205ghpo", | ||
449 | &sprom->legofdmbw205ghpo, 0); | ||
450 | nvram_read_u32(prefix, NULL, "legofdmbw20ul5ghpo", | ||
451 | &sprom->legofdmbw20ul5ghpo, 0); | ||
452 | nvram_read_u32(prefix, NULL, "mcsbw202gpo", &sprom->mcsbw202gpo, 0); | ||
453 | nvram_read_u32(prefix, NULL, "mcsbw20ul2gpo", &sprom->mcsbw20ul2gpo, 0); | ||
454 | nvram_read_u32(prefix, NULL, "mcsbw402gpo", &sprom->mcsbw402gpo, 0); | ||
455 | nvram_read_u32(prefix, NULL, "mcsbw205glpo", &sprom->mcsbw205glpo, 0); | ||
456 | nvram_read_u32(prefix, NULL, "mcsbw20ul5glpo", | ||
457 | &sprom->mcsbw20ul5glpo, 0); | ||
458 | nvram_read_u32(prefix, NULL, "mcsbw405glpo", &sprom->mcsbw405glpo, 0); | ||
459 | nvram_read_u32(prefix, NULL, "mcsbw205gmpo", &sprom->mcsbw205gmpo, 0); | ||
460 | nvram_read_u32(prefix, NULL, "mcsbw20ul5gmpo", | ||
461 | &sprom->mcsbw20ul5gmpo, 0); | ||
462 | nvram_read_u32(prefix, NULL, "mcsbw405gmpo", &sprom->mcsbw405gmpo, 0); | ||
463 | nvram_read_u32(prefix, NULL, "mcsbw205ghpo", &sprom->mcsbw205ghpo, 0); | ||
464 | nvram_read_u32(prefix, NULL, "mcsbw20ul5ghpo", | ||
465 | &sprom->mcsbw20ul5ghpo, 0); | ||
466 | nvram_read_u32(prefix, NULL, "mcsbw405ghpo", &sprom->mcsbw405ghpo, 0); | ||
467 | nvram_read_u16(prefix, NULL, "mcs32po", &sprom->mcs32po, 0); | ||
468 | nvram_read_u16(prefix, NULL, "legofdm40duppo", | ||
469 | &sprom->legofdm40duppo, 0); | ||
470 | nvram_read_u8(prefix, NULL, "sar2g", &sprom->sar2g, 0); | ||
471 | nvram_read_u8(prefix, NULL, "sar5g", &sprom->sar5g, 0); | ||
472 | } | ||
473 | |||
474 | static void bcm47xx_fill_sprom_path_r4589(struct ssb_sprom *sprom, | ||
475 | const char *prefix) | ||
476 | { | ||
477 | char postfix[2]; | ||
478 | int i; | ||
479 | |||
480 | for (i = 0; i < ARRAY_SIZE(sprom->core_pwr_info); i++) { | ||
481 | struct ssb_sprom_core_pwr_info *pwr_info = &sprom->core_pwr_info[i]; | ||
482 | snprintf(postfix, sizeof(postfix), "%i", i); | ||
483 | nvram_read_u8(prefix, postfix, "maxp2ga", | ||
484 | &pwr_info->maxpwr_2g, 0); | ||
485 | nvram_read_u8(prefix, postfix, "itt2ga", | ||
486 | &pwr_info->itssi_2g, 0); | ||
487 | nvram_read_u8(prefix, postfix, "itt5ga", | ||
488 | &pwr_info->itssi_5g, 0); | ||
489 | nvram_read_u16(prefix, postfix, "pa2gw0a", | ||
490 | &pwr_info->pa_2g[0], 0); | ||
491 | nvram_read_u16(prefix, postfix, "pa2gw1a", | ||
492 | &pwr_info->pa_2g[1], 0); | ||
493 | nvram_read_u16(prefix, postfix, "pa2gw2a", | ||
494 | &pwr_info->pa_2g[2], 0); | ||
495 | nvram_read_u8(prefix, postfix, "maxp5ga", | ||
496 | &pwr_info->maxpwr_5g, 0); | ||
497 | nvram_read_u8(prefix, postfix, "maxp5gha", | ||
498 | &pwr_info->maxpwr_5gh, 0); | ||
499 | nvram_read_u8(prefix, postfix, "maxp5gla", | ||
500 | &pwr_info->maxpwr_5gl, 0); | ||
501 | nvram_read_u16(prefix, postfix, "pa5gw0a", | ||
502 | &pwr_info->pa_5g[0], 0); | ||
503 | nvram_read_u16(prefix, postfix, "pa5gw1a", | ||
504 | &pwr_info->pa_5g[1], 0); | ||
505 | nvram_read_u16(prefix, postfix, "pa5gw2a", | ||
506 | &pwr_info->pa_5g[2], 0); | ||
507 | nvram_read_u16(prefix, postfix, "pa5glw0a", | ||
508 | &pwr_info->pa_5gl[0], 0); | ||
509 | nvram_read_u16(prefix, postfix, "pa5glw1a", | ||
510 | &pwr_info->pa_5gl[1], 0); | ||
511 | nvram_read_u16(prefix, postfix, "pa5glw2a", | ||
512 | &pwr_info->pa_5gl[2], 0); | ||
513 | nvram_read_u16(prefix, postfix, "pa5ghw0a", | ||
514 | &pwr_info->pa_5gh[0], 0); | ||
515 | nvram_read_u16(prefix, postfix, "pa5ghw1a", | ||
516 | &pwr_info->pa_5gh[1], 0); | ||
517 | nvram_read_u16(prefix, postfix, "pa5ghw2a", | ||
518 | &pwr_info->pa_5gh[2], 0); | ||
519 | } | ||
520 | } | ||
521 | |||
522 | static void bcm47xx_fill_sprom_path_r45(struct ssb_sprom *sprom, | ||
523 | const char *prefix) | ||
524 | { | ||
525 | char postfix[2]; | ||
526 | int i; | ||
527 | |||
528 | for (i = 0; i < ARRAY_SIZE(sprom->core_pwr_info); i++) { | ||
529 | struct ssb_sprom_core_pwr_info *pwr_info = &sprom->core_pwr_info[i]; | ||
530 | snprintf(postfix, sizeof(postfix), "%i", i); | ||
531 | nvram_read_u16(prefix, postfix, "pa2gw3a", | ||
532 | &pwr_info->pa_2g[3], 0); | ||
533 | nvram_read_u16(prefix, postfix, "pa5gw3a", | ||
534 | &pwr_info->pa_5g[3], 0); | ||
535 | nvram_read_u16(prefix, postfix, "pa5glw3a", | ||
536 | &pwr_info->pa_5gl[3], 0); | ||
537 | nvram_read_u16(prefix, postfix, "pa5ghw3a", | ||
538 | &pwr_info->pa_5gh[3], 0); | ||
539 | } | ||
540 | } | ||
541 | |||
542 | void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom, const char *prefix) | ||
543 | { | ||
544 | nvram_read_macaddr(prefix, "et0macaddr", &sprom->et0mac); | ||
545 | nvram_read_u8(prefix, NULL, "et0mdcport", &sprom->et0mdcport, 0); | ||
546 | nvram_read_u8(prefix, NULL, "et0phyaddr", &sprom->et0phyaddr, 0); | ||
547 | |||
548 | nvram_read_macaddr(prefix, "et1macaddr", &sprom->et1mac); | ||
549 | nvram_read_u8(prefix, NULL, "et1mdcport", &sprom->et1mdcport, 0); | ||
550 | nvram_read_u8(prefix, NULL, "et1phyaddr", &sprom->et1phyaddr, 0); | ||
551 | |||
552 | nvram_read_macaddr(prefix, "macaddr", &sprom->il0mac); | ||
553 | nvram_read_macaddr(prefix, "il0macaddr", &sprom->il0mac); | ||
554 | } | ||
555 | |||
556 | void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix) | ||
557 | { | ||
558 | memset(sprom, 0, sizeof(struct ssb_sprom)); | ||
559 | |||
560 | bcm47xx_fill_sprom_ethernet(sprom, prefix); | ||
561 | |||
562 | nvram_read_u8(prefix, NULL, "sromrev", &sprom->revision, 0); | ||
563 | |||
564 | switch (sprom->revision) { | ||
565 | case 1: | ||
566 | bcm47xx_fill_sprom_r1234589(sprom, prefix); | ||
567 | bcm47xx_fill_sprom_r12389(sprom, prefix); | ||
568 | bcm47xx_fill_sprom_r1(sprom, prefix); | ||
569 | break; | ||
570 | case 2: | ||
571 | bcm47xx_fill_sprom_r1234589(sprom, prefix); | ||
572 | bcm47xx_fill_sprom_r12389(sprom, prefix); | ||
573 | bcm47xx_fill_sprom_r2389(sprom, prefix); | ||
574 | bcm47xx_fill_sprom_r2(sprom, prefix); | ||
575 | break; | ||
576 | case 3: | ||
577 | bcm47xx_fill_sprom_r1234589(sprom, prefix); | ||
578 | bcm47xx_fill_sprom_r12389(sprom, prefix); | ||
579 | bcm47xx_fill_sprom_r2389(sprom, prefix); | ||
580 | bcm47xx_fill_sprom_r389(sprom, prefix); | ||
581 | bcm47xx_fill_sprom_r3(sprom, prefix); | ||
582 | break; | ||
583 | case 4: | ||
584 | case 5: | ||
585 | bcm47xx_fill_sprom_r1234589(sprom, prefix); | ||
586 | bcm47xx_fill_sprom_r4589(sprom, prefix); | ||
587 | bcm47xx_fill_sprom_r458(sprom, prefix); | ||
588 | bcm47xx_fill_sprom_r45(sprom, prefix); | ||
589 | bcm47xx_fill_sprom_path_r4589(sprom, prefix); | ||
590 | bcm47xx_fill_sprom_path_r45(sprom, prefix); | ||
591 | break; | ||
592 | case 8: | ||
593 | bcm47xx_fill_sprom_r1234589(sprom, prefix); | ||
594 | bcm47xx_fill_sprom_r12389(sprom, prefix); | ||
595 | bcm47xx_fill_sprom_r2389(sprom, prefix); | ||
596 | bcm47xx_fill_sprom_r389(sprom, prefix); | ||
597 | bcm47xx_fill_sprom_r4589(sprom, prefix); | ||
598 | bcm47xx_fill_sprom_r458(sprom, prefix); | ||
599 | bcm47xx_fill_sprom_r89(sprom, prefix); | ||
600 | bcm47xx_fill_sprom_path_r4589(sprom, prefix); | ||
601 | break; | ||
602 | case 9: | ||
603 | bcm47xx_fill_sprom_r1234589(sprom, prefix); | ||
604 | bcm47xx_fill_sprom_r12389(sprom, prefix); | ||
605 | bcm47xx_fill_sprom_r2389(sprom, prefix); | ||
606 | bcm47xx_fill_sprom_r389(sprom, prefix); | ||
607 | bcm47xx_fill_sprom_r4589(sprom, prefix); | ||
608 | bcm47xx_fill_sprom_r89(sprom, prefix); | ||
609 | bcm47xx_fill_sprom_r9(sprom, prefix); | ||
610 | bcm47xx_fill_sprom_path_r4589(sprom, prefix); | ||
611 | break; | ||
612 | default: | ||
613 | pr_warn("Unsupported SPROM revision %d detected. Will extract" | ||
614 | " v1\n", sprom->revision); | ||
615 | sprom->revision = 1; | ||
616 | bcm47xx_fill_sprom_r1234589(sprom, prefix); | ||
617 | bcm47xx_fill_sprom_r12389(sprom, prefix); | ||
618 | bcm47xx_fill_sprom_r1(sprom, prefix); | ||
619 | } | ||
620 | } | ||
diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c index d209f85d87bb..356b05583e14 100644 --- a/arch/mips/bcm63xx/setup.c +++ b/arch/mips/bcm63xx/setup.c | |||
@@ -33,7 +33,7 @@ static void bcm6348_a1_reboot(void) | |||
33 | u32 reg; | 33 | u32 reg; |
34 | 34 | ||
35 | /* soft reset all blocks */ | 35 | /* soft reset all blocks */ |
36 | printk(KERN_INFO "soft-reseting all blocks ...\n"); | 36 | printk(KERN_INFO "soft-resetting all blocks ...\n"); |
37 | reg = bcm_perf_readl(PERF_SOFTRESET_REG); | 37 | reg = bcm_perf_readl(PERF_SOFTRESET_REG); |
38 | reg &= ~SOFTRESET_6348_ALL; | 38 | reg &= ~SOFTRESET_6348_ALL; |
39 | bcm_perf_writel(reg, PERF_SOFTRESET_REG); | 39 | bcm_perf_writel(reg, PERF_SOFTRESET_REG); |
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h index 1881b316ca45..4d6d77ed9b9d 100644 --- a/arch/mips/include/asm/jump_label.h +++ b/arch/mips/include/asm/jump_label.h | |||
@@ -20,7 +20,7 @@ | |||
20 | #define WORD_INSN ".word" | 20 | #define WORD_INSN ".word" |
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | static __always_inline bool arch_static_branch(struct jump_label_key *key) | 23 | static __always_inline bool arch_static_branch(struct static_key *key) |
24 | { | 24 | { |
25 | asm goto("1:\tnop\n\t" | 25 | asm goto("1:\tnop\n\t" |
26 | "nop\n\t" | 26 | "nop\n\t" |
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h index de95e0723e2b..5ecaf47b34d2 100644 --- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h +++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h | |||
@@ -44,4 +44,7 @@ union bcm47xx_bus { | |||
44 | extern union bcm47xx_bus bcm47xx_bus; | 44 | extern union bcm47xx_bus bcm47xx_bus; |
45 | extern enum bcm47xx_bus_type bcm47xx_bus_type; | 45 | extern enum bcm47xx_bus_type bcm47xx_bus_type; |
46 | 46 | ||
47 | void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix); | ||
48 | void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom, const char *prefix); | ||
49 | |||
47 | #endif /* __ASM_BCM47XX_H */ | 50 | #endif /* __ASM_BCM47XX_H */ |
diff --git a/arch/mips/include/asm/mach-bcm47xx/nvram.h b/arch/mips/include/asm/mach-bcm47xx/nvram.h index 184d5ecb5f51..69ef3efe06e7 100644 --- a/arch/mips/include/asm/mach-bcm47xx/nvram.h +++ b/arch/mips/include/asm/mach-bcm47xx/nvram.h | |||
@@ -37,7 +37,7 @@ struct nvram_header { | |||
37 | 37 | ||
38 | extern int nvram_getenv(char *name, char *val, size_t val_len); | 38 | extern int nvram_getenv(char *name, char *val, size_t val_len); |
39 | 39 | ||
40 | static inline void nvram_parse_macaddr(char *buf, u8 *macaddr) | 40 | static inline void nvram_parse_macaddr(char *buf, u8 macaddr[6]) |
41 | { | 41 | { |
42 | if (strchr(buf, ':')) | 42 | if (strchr(buf, ':')) |
43 | sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0], | 43 | sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0], |
diff --git a/arch/mips/include/asm/socket.h b/arch/mips/include/asm/socket.h index ad5c0a7a02a7..a2ed6fdad4e0 100644 --- a/arch/mips/include/asm/socket.h +++ b/arch/mips/include/asm/socket.h | |||
@@ -84,6 +84,10 @@ To add: #define SO_REUSEPORT 0x0200 /* Allow local address and port reuse. */ | |||
84 | 84 | ||
85 | #define SO_WIFI_STATUS 41 | 85 | #define SO_WIFI_STATUS 41 |
86 | #define SCM_WIFI_STATUS SO_WIFI_STATUS | 86 | #define SCM_WIFI_STATUS SO_WIFI_STATUS |
87 | #define SO_PEEK_OFF 42 | ||
88 | |||
89 | /* Instruct lower device to use last 4-bytes of skb data as FCS */ | ||
90 | #define SO_NOFCS 43 | ||
87 | 91 | ||
88 | #ifdef __KERNEL__ | 92 | #ifdef __KERNEL__ |
89 | 93 | ||
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index e3b897acfbc0..811084f4e422 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c | |||
@@ -606,6 +606,10 @@ static int mipspmu_event_init(struct perf_event *event) | |||
606 | { | 606 | { |
607 | int err = 0; | 607 | int err = 0; |
608 | 608 | ||
609 | /* does not support taken branch sampling */ | ||
610 | if (has_branch_stack(event)) | ||
611 | return -EOPNOTSUPP; | ||
612 | |||
609 | switch (event->attr.type) { | 613 | switch (event->attr.type) { |
610 | case PERF_TYPE_RAW: | 614 | case PERF_TYPE_RAW: |
611 | case PERF_TYPE_HARDWARE: | 615 | case PERF_TYPE_HARDWARE: |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 7955409051c4..61f1cb45a1d5 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -80,9 +80,7 @@ void __noreturn cpu_idle(void) | |||
80 | #endif | 80 | #endif |
81 | rcu_idle_exit(); | 81 | rcu_idle_exit(); |
82 | tick_nohz_idle_exit(); | 82 | tick_nohz_idle_exit(); |
83 | preempt_enable_no_resched(); | 83 | schedule_preempt_disabled(); |
84 | schedule(); | ||
85 | preempt_disable(); | ||
86 | } | 84 | } |
87 | } | 85 | } |
88 | 86 | ||
diff --git a/arch/mips/pci/pci-bcm47xx.c b/arch/mips/pci/pci-bcm47xx.c index 400535a955d0..c682468010c5 100644 --- a/arch/mips/pci/pci-bcm47xx.c +++ b/arch/mips/pci/pci-bcm47xx.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/types.h> | 25 | #include <linux/types.h> |
26 | #include <linux/pci.h> | 26 | #include <linux/pci.h> |
27 | #include <linux/ssb/ssb.h> | 27 | #include <linux/ssb/ssb.h> |
28 | #include <linux/bcma/bcma.h> | ||
28 | #include <bcm47xx.h> | 29 | #include <bcm47xx.h> |
29 | 30 | ||
30 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 31 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
@@ -32,15 +33,12 @@ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | |||
32 | return 0; | 33 | return 0; |
33 | } | 34 | } |
34 | 35 | ||
35 | int pcibios_plat_dev_init(struct pci_dev *dev) | ||
36 | { | ||
37 | #ifdef CONFIG_BCM47XX_SSB | 36 | #ifdef CONFIG_BCM47XX_SSB |
37 | static int bcm47xx_pcibios_plat_dev_init_ssb(struct pci_dev *dev) | ||
38 | { | ||
38 | int res; | 39 | int res; |
39 | u8 slot, pin; | 40 | u8 slot, pin; |
40 | 41 | ||
41 | if (bcm47xx_bus_type != BCM47XX_BUS_TYPE_SSB) | ||
42 | return 0; | ||
43 | |||
44 | res = ssb_pcibios_plat_dev_init(dev); | 42 | res = ssb_pcibios_plat_dev_init(dev); |
45 | if (res < 0) { | 43 | if (res < 0) { |
46 | printk(KERN_ALERT "PCI: Failed to init device %s\n", | 44 | printk(KERN_ALERT "PCI: Failed to init device %s\n", |
@@ -60,6 +58,47 @@ int pcibios_plat_dev_init(struct pci_dev *dev) | |||
60 | } | 58 | } |
61 | 59 | ||
62 | dev->irq = res; | 60 | dev->irq = res; |
61 | return 0; | ||
62 | } | ||
63 | #endif | 63 | #endif |
64 | |||
65 | #ifdef CONFIG_BCM47XX_BCMA | ||
66 | static int bcm47xx_pcibios_plat_dev_init_bcma(struct pci_dev *dev) | ||
67 | { | ||
68 | int res; | ||
69 | |||
70 | res = bcma_core_pci_plat_dev_init(dev); | ||
71 | if (res < 0) { | ||
72 | printk(KERN_ALERT "PCI: Failed to init device %s\n", | ||
73 | pci_name(dev)); | ||
74 | return res; | ||
75 | } | ||
76 | |||
77 | res = bcma_core_pci_pcibios_map_irq(dev); | ||
78 | |||
79 | /* IRQ-0 and IRQ-1 are software interrupts. */ | ||
80 | if (res < 2) { | ||
81 | printk(KERN_ALERT "PCI: Failed to map IRQ of device %s\n", | ||
82 | pci_name(dev)); | ||
83 | return res; | ||
84 | } | ||
85 | |||
86 | dev->irq = res; | ||
64 | return 0; | 87 | return 0; |
65 | } | 88 | } |
89 | #endif | ||
90 | |||
91 | int pcibios_plat_dev_init(struct pci_dev *dev) | ||
92 | { | ||
93 | #ifdef CONFIG_BCM47XX_SSB | ||
94 | if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_SSB) | ||
95 | return bcm47xx_pcibios_plat_dev_init_ssb(dev); | ||
96 | else | ||
97 | #endif | ||
98 | #ifdef CONFIG_BCM47XX_BCMA | ||
99 | if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) | ||
100 | return bcm47xx_pcibios_plat_dev_init_bcma(dev); | ||
101 | else | ||
102 | #endif | ||
103 | return 0; | ||
104 | } | ||
diff --git a/arch/mn10300/include/asm/socket.h b/arch/mn10300/include/asm/socket.h index 876356d78522..820463a484b8 100644 --- a/arch/mn10300/include/asm/socket.h +++ b/arch/mn10300/include/asm/socket.h | |||
@@ -64,5 +64,9 @@ | |||
64 | 64 | ||
65 | #define SO_WIFI_STATUS 41 | 65 | #define SO_WIFI_STATUS 41 |
66 | #define SCM_WIFI_STATUS SO_WIFI_STATUS | 66 | #define SCM_WIFI_STATUS SO_WIFI_STATUS |
67 | #define SO_PEEK_OFF 42 | ||
68 | |||
69 | /* Instruct lower device to use last 4-bytes of skb data as FCS */ | ||
70 | #define SO_NOFCS 43 | ||
67 | 71 | ||
68 | #endif /* _ASM_SOCKET_H */ | 72 | #endif /* _ASM_SOCKET_H */ |
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c index 28eec3102535..cac401d37f75 100644 --- a/arch/mn10300/kernel/process.c +++ b/arch/mn10300/kernel/process.c | |||
@@ -123,9 +123,7 @@ void cpu_idle(void) | |||
123 | idle(); | 123 | idle(); |
124 | } | 124 | } |
125 | 125 | ||
126 | preempt_enable_no_resched(); | 126 | schedule_preempt_disabled(); |
127 | schedule(); | ||
128 | preempt_disable(); | ||
129 | } | 127 | } |
130 | } | 128 | } |
131 | 129 | ||
diff --git a/arch/parisc/include/asm/socket.h b/arch/parisc/include/asm/socket.h index d28c51b61067..1b52c2c31a7a 100644 --- a/arch/parisc/include/asm/socket.h +++ b/arch/parisc/include/asm/socket.h | |||
@@ -63,6 +63,11 @@ | |||
63 | 63 | ||
64 | #define SO_WIFI_STATUS 0x4022 | 64 | #define SO_WIFI_STATUS 0x4022 |
65 | #define SCM_WIFI_STATUS SO_WIFI_STATUS | 65 | #define SCM_WIFI_STATUS SO_WIFI_STATUS |
66 | #define SO_PEEK_OFF 0x4023 | ||
67 | |||
68 | /* Instruct lower device to use last 4-bytes of skb data as FCS */ | ||
69 | #define SO_NOFCS 0x4024 | ||
70 | |||
66 | 71 | ||
67 | /* O_NONBLOCK clashes with the bits used for socket types. Therefore we | 72 | /* O_NONBLOCK clashes with the bits used for socket types. Therefore we |
68 | * have to define SOCK_NONBLOCK to a different value here. | 73 | * have to define SOCK_NONBLOCK to a different value here. |
diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c index fc770be465ff..4f004596a6e7 100644 --- a/arch/parisc/kernel/pdc_cons.c +++ b/arch/parisc/kernel/pdc_cons.c | |||
@@ -90,11 +90,13 @@ static int pdc_console_setup(struct console *co, char *options) | |||
90 | 90 | ||
91 | #define PDC_CONS_POLL_DELAY (30 * HZ / 1000) | 91 | #define PDC_CONS_POLL_DELAY (30 * HZ / 1000) |
92 | 92 | ||
93 | static struct timer_list pdc_console_timer; | 93 | static void pdc_console_poll(unsigned long unused); |
94 | static DEFINE_TIMER(pdc_console_timer, pdc_console_poll, 0, 0); | ||
95 | static struct tty_port tty_port; | ||
94 | 96 | ||
95 | static int pdc_console_tty_open(struct tty_struct *tty, struct file *filp) | 97 | static int pdc_console_tty_open(struct tty_struct *tty, struct file *filp) |
96 | { | 98 | { |
97 | 99 | tty_port_tty_set(&tty_port, tty); | |
98 | mod_timer(&pdc_console_timer, jiffies + PDC_CONS_POLL_DELAY); | 100 | mod_timer(&pdc_console_timer, jiffies + PDC_CONS_POLL_DELAY); |
99 | 101 | ||
100 | return 0; | 102 | return 0; |
@@ -102,8 +104,10 @@ static int pdc_console_tty_open(struct tty_struct *tty, struct file *filp) | |||
102 | 104 | ||
103 | static void pdc_console_tty_close(struct tty_struct *tty, struct file *filp) | 105 | static void pdc_console_tty_close(struct tty_struct *tty, struct file *filp) |
104 | { | 106 | { |
105 | if (!tty->count) | 107 | if (!tty->count) { |
106 | del_timer(&pdc_console_timer); | 108 | del_timer_sync(&pdc_console_timer); |
109 | tty_port_tty_set(&tty_port, NULL); | ||
110 | } | ||
107 | } | 111 | } |
108 | 112 | ||
109 | static int pdc_console_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) | 113 | static int pdc_console_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) |
@@ -122,8 +126,6 @@ static int pdc_console_tty_chars_in_buffer(struct tty_struct *tty) | |||
122 | return 0; /* no buffer */ | 126 | return 0; /* no buffer */ |
123 | } | 127 | } |
124 | 128 | ||
125 | static struct tty_driver *pdc_console_tty_driver; | ||
126 | |||
127 | static const struct tty_operations pdc_console_tty_ops = { | 129 | static const struct tty_operations pdc_console_tty_ops = { |
128 | .open = pdc_console_tty_open, | 130 | .open = pdc_console_tty_open, |
129 | .close = pdc_console_tty_close, | 131 | .close = pdc_console_tty_close, |
@@ -134,10 +136,8 @@ static const struct tty_operations pdc_console_tty_ops = { | |||
134 | 136 | ||
135 | static void pdc_console_poll(unsigned long unused) | 137 | static void pdc_console_poll(unsigned long unused) |
136 | { | 138 | { |
137 | |||
138 | int data, count = 0; | 139 | int data, count = 0; |
139 | 140 | struct tty_struct *tty = tty_port_tty_get(&tty_port); | |
140 | struct tty_struct *tty = pdc_console_tty_driver->ttys[0]; | ||
141 | 141 | ||
142 | if (!tty) | 142 | if (!tty) |
143 | return; | 143 | return; |
@@ -153,15 +153,17 @@ static void pdc_console_poll(unsigned long unused) | |||
153 | if (count) | 153 | if (count) |
154 | tty_flip_buffer_push(tty); | 154 | tty_flip_buffer_push(tty); |
155 | 155 | ||
156 | if (tty->count && (pdc_cons.flags & CON_ENABLED)) | 156 | tty_kref_put(tty); |
157 | |||
158 | if (pdc_cons.flags & CON_ENABLED) | ||
157 | mod_timer(&pdc_console_timer, jiffies + PDC_CONS_POLL_DELAY); | 159 | mod_timer(&pdc_console_timer, jiffies + PDC_CONS_POLL_DELAY); |
158 | } | 160 | } |
159 | 161 | ||
162 | static struct tty_driver *pdc_console_tty_driver; | ||
163 | |||
160 | static int __init pdc_console_tty_driver_init(void) | 164 | static int __init pdc_console_tty_driver_init(void) |
161 | { | 165 | { |
162 | |||
163 | int err; | 166 | int err; |
164 | struct tty_driver *drv; | ||
165 | 167 | ||
166 | /* Check if the console driver is still registered. | 168 | /* Check if the console driver is still registered. |
167 | * It is unregistered if the pdc console was not selected as the | 169 | * It is unregistered if the pdc console was not selected as the |
@@ -183,32 +185,29 @@ static int __init pdc_console_tty_driver_init(void) | |||
183 | printk(KERN_INFO "The PDC console driver is still registered, removing CON_BOOT flag\n"); | 185 | printk(KERN_INFO "The PDC console driver is still registered, removing CON_BOOT flag\n"); |
184 | pdc_cons.flags &= ~CON_BOOT; | 186 | pdc_cons.flags &= ~CON_BOOT; |
185 | 187 | ||
186 | drv = alloc_tty_driver(1); | 188 | tty_port_init(&tty_port); |
187 | 189 | ||
188 | if (!drv) | 190 | pdc_console_tty_driver = alloc_tty_driver(1); |
189 | return -ENOMEM; | ||
190 | 191 | ||
191 | drv->driver_name = "pdc_cons"; | 192 | if (!pdc_console_tty_driver) |
192 | drv->name = "ttyB"; | 193 | return -ENOMEM; |
193 | drv->major = MUX_MAJOR; | ||
194 | drv->minor_start = 0; | ||
195 | drv->type = TTY_DRIVER_TYPE_SYSTEM; | ||
196 | drv->init_termios = tty_std_termios; | ||
197 | drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS; | ||
198 | tty_set_operations(drv, &pdc_console_tty_ops); | ||
199 | 194 | ||
200 | err = tty_register_driver(drv); | 195 | pdc_console_tty_driver->driver_name = "pdc_cons"; |
196 | pdc_console_tty_driver->name = "ttyB"; | ||
197 | pdc_console_tty_driver->major = MUX_MAJOR; | ||
198 | pdc_console_tty_driver->minor_start = 0; | ||
199 | pdc_console_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM; | ||
200 | pdc_console_tty_driver->init_termios = tty_std_termios; | ||
201 | pdc_console_tty_driver->flags = TTY_DRIVER_REAL_RAW | | ||
202 | TTY_DRIVER_RESET_TERMIOS; | ||
203 | tty_set_operations(pdc_console_tty_driver, &pdc_console_tty_ops); | ||
204 | |||
205 | err = tty_register_driver(pdc_console_tty_driver); | ||
201 | if (err) { | 206 | if (err) { |
202 | printk(KERN_ERR "Unable to register the PDC console TTY driver\n"); | 207 | printk(KERN_ERR "Unable to register the PDC console TTY driver\n"); |
203 | return err; | 208 | return err; |
204 | } | 209 | } |
205 | 210 | ||
206 | pdc_console_tty_driver = drv; | ||
207 | |||
208 | /* No need to initialize the pdc_console_timer if tty isn't allocated */ | ||
209 | init_timer(&pdc_console_timer); | ||
210 | pdc_console_timer.function = pdc_console_poll; | ||
211 | |||
212 | return 0; | 211 | return 0; |
213 | } | 212 | } |
214 | 213 | ||
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 62c60b87d039..d4b94b395c16 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c | |||
@@ -71,9 +71,7 @@ void cpu_idle(void) | |||
71 | while (1) { | 71 | while (1) { |
72 | while (!need_resched()) | 72 | while (!need_resched()) |
73 | barrier(); | 73 | barrier(); |
74 | preempt_enable_no_resched(); | 74 | schedule_preempt_disabled(); |
75 | schedule(); | ||
76 | preempt_disable(); | ||
77 | check_pgt_cache(); | 75 | check_pgt_cache(); |
78 | } | 76 | } |
79 | } | 77 | } |
diff --git a/arch/powerpc/boot/dts/bluestone.dts b/arch/powerpc/boot/dts/bluestone.dts index 2a56a0dbd1f7..74876f737407 100644 --- a/arch/powerpc/boot/dts/bluestone.dts +++ b/arch/powerpc/boot/dts/bluestone.dts | |||
@@ -222,7 +222,7 @@ | |||
222 | 222 | ||
223 | EMAC0: ethernet@ef600c00 { | 223 | EMAC0: ethernet@ef600c00 { |
224 | device_type = "network"; | 224 | device_type = "network"; |
225 | compatible = "ibm,emac4sync"; | 225 | compatible = "ibm,emac-apm821xx", "ibm,emac4sync"; |
226 | interrupt-parent = <&EMAC0>; | 226 | interrupt-parent = <&EMAC0>; |
227 | interrupts = <0x0 0x1>; | 227 | interrupts = <0x0 0x1>; |
228 | #interrupt-cells = <1>; | 228 | #interrupt-cells = <1>; |
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h index 938986e412f1..ae098c438f00 100644 --- a/arch/powerpc/include/asm/jump_label.h +++ b/arch/powerpc/include/asm/jump_label.h | |||
@@ -17,7 +17,7 @@ | |||
17 | #define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG) | 17 | #define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG) |
18 | #define JUMP_LABEL_NOP_SIZE 4 | 18 | #define JUMP_LABEL_NOP_SIZE 4 |
19 | 19 | ||
20 | static __always_inline bool arch_static_branch(struct jump_label_key *key) | 20 | static __always_inline bool arch_static_branch(struct static_key *key) |
21 | { | 21 | { |
22 | asm goto("1:\n\t" | 22 | asm goto("1:\n\t" |
23 | "nop\n\t" | 23 | "nop\n\t" |
diff --git a/arch/powerpc/include/asm/keylargo.h b/arch/powerpc/include/asm/keylargo.h index fc195d0b3c34..2156315d8a90 100644 --- a/arch/powerpc/include/asm/keylargo.h +++ b/arch/powerpc/include/asm/keylargo.h | |||
@@ -21,7 +21,7 @@ | |||
21 | #define KEYLARGO_FCR4 0x48 | 21 | #define KEYLARGO_FCR4 0x48 |
22 | #define KEYLARGO_FCR5 0x4c /* Pangea only */ | 22 | #define KEYLARGO_FCR5 0x4c /* Pangea only */ |
23 | 23 | ||
24 | /* K2 aditional FCRs */ | 24 | /* K2 additional FCRs */ |
25 | #define K2_FCR6 0x34 | 25 | #define K2_FCR6 0x34 |
26 | #define K2_FCR7 0x30 | 26 | #define K2_FCR7 0x30 |
27 | #define K2_FCR8 0x2c | 27 | #define K2_FCR8 0x2c |
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 8f1df1208d23..1a8093fa8f71 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h | |||
@@ -61,8 +61,6 @@ struct pt_regs; | |||
61 | extern unsigned long perf_misc_flags(struct pt_regs *regs); | 61 | extern unsigned long perf_misc_flags(struct pt_regs *regs); |
62 | extern unsigned long perf_instruction_pointer(struct pt_regs *regs); | 62 | extern unsigned long perf_instruction_pointer(struct pt_regs *regs); |
63 | 63 | ||
64 | #define PERF_EVENT_INDEX_OFFSET 1 | ||
65 | |||
66 | /* | 64 | /* |
67 | * Only override the default definitions in include/linux/perf_event.h | 65 | * Only override the default definitions in include/linux/perf_event.h |
68 | * if we have hardware PMU support. | 66 | * if we have hardware PMU support. |
diff --git a/arch/powerpc/include/asm/socket.h b/arch/powerpc/include/asm/socket.h index 2fc2af8fbf59..3d5179bb122f 100644 --- a/arch/powerpc/include/asm/socket.h +++ b/arch/powerpc/include/asm/socket.h | |||
@@ -71,5 +71,9 @@ | |||
71 | 71 | ||
72 | #define SO_WIFI_STATUS 41 | 72 | #define SO_WIFI_STATUS 41 |
73 | #define SCM_WIFI_STATUS SO_WIFI_STATUS | 73 | #define SCM_WIFI_STATUS SO_WIFI_STATUS |
74 | #define SO_PEEK_OFF 42 | ||
75 | |||
76 | /* Instruct lower device to use last 4-bytes of skb data as FCS */ | ||
77 | #define SO_NOFCS 43 | ||
74 | 78 | ||
75 | #endif /* _ASM_POWERPC_SOCKET_H */ | 79 | #endif /* _ASM_POWERPC_SOCKET_H */ |
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 0a48bf5db6c8..c97fc60c790c 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c | |||
@@ -101,11 +101,11 @@ void cpu_idle(void) | |||
101 | ppc64_runlatch_on(); | 101 | ppc64_runlatch_on(); |
102 | rcu_idle_exit(); | 102 | rcu_idle_exit(); |
103 | tick_nohz_idle_exit(); | 103 | tick_nohz_idle_exit(); |
104 | preempt_enable_no_resched(); | 104 | if (cpu_should_die()) { |
105 | if (cpu_should_die()) | 105 | sched_preempt_enable_no_resched(); |
106 | cpu_die(); | 106 | cpu_die(); |
107 | schedule(); | 107 | } |
108 | preempt_disable(); | 108 | schedule_preempt_disabled(); |
109 | } | 109 | } |
110 | } | 110 | } |
111 | 111 | ||
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 64483fde95c6..c2e27ede07ec 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
@@ -1084,6 +1084,10 @@ static int power_pmu_event_init(struct perf_event *event) | |||
1084 | if (!ppmu) | 1084 | if (!ppmu) |
1085 | return -ENOENT; | 1085 | return -ENOENT; |
1086 | 1086 | ||
1087 | /* does not support taken branch sampling */ | ||
1088 | if (has_branch_stack(event)) | ||
1089 | return -EOPNOTSUPP; | ||
1090 | |||
1087 | switch (event->attr.type) { | 1091 | switch (event->attr.type) { |
1088 | case PERF_TYPE_HARDWARE: | 1092 | case PERF_TYPE_HARDWARE: |
1089 | ev = event->attr.config; | 1093 | ev = event->attr.config; |
@@ -1193,6 +1197,11 @@ static int power_pmu_event_init(struct perf_event *event) | |||
1193 | return err; | 1197 | return err; |
1194 | } | 1198 | } |
1195 | 1199 | ||
1200 | static int power_pmu_event_idx(struct perf_event *event) | ||
1201 | { | ||
1202 | return event->hw.idx; | ||
1203 | } | ||
1204 | |||
1196 | struct pmu power_pmu = { | 1205 | struct pmu power_pmu = { |
1197 | .pmu_enable = power_pmu_enable, | 1206 | .pmu_enable = power_pmu_enable, |
1198 | .pmu_disable = power_pmu_disable, | 1207 | .pmu_disable = power_pmu_disable, |
@@ -1205,6 +1214,7 @@ struct pmu power_pmu = { | |||
1205 | .start_txn = power_pmu_start_txn, | 1214 | .start_txn = power_pmu_start_txn, |
1206 | .cancel_txn = power_pmu_cancel_txn, | 1215 | .cancel_txn = power_pmu_cancel_txn, |
1207 | .commit_txn = power_pmu_commit_txn, | 1216 | .commit_txn = power_pmu_commit_txn, |
1217 | .event_idx = power_pmu_event_idx, | ||
1208 | }; | 1218 | }; |
1209 | 1219 | ||
1210 | /* | 1220 | /* |
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c index 8fc62586a973..a5fbf4cb6329 100644 --- a/arch/powerpc/platforms/iseries/setup.c +++ b/arch/powerpc/platforms/iseries/setup.c | |||
@@ -584,9 +584,7 @@ static void iseries_shared_idle(void) | |||
584 | if (hvlpevent_is_pending()) | 584 | if (hvlpevent_is_pending()) |
585 | process_iSeries_events(); | 585 | process_iSeries_events(); |
586 | 586 | ||
587 | preempt_enable_no_resched(); | 587 | schedule_preempt_disabled(); |
588 | schedule(); | ||
589 | preempt_disable(); | ||
590 | } | 588 | } |
591 | } | 589 | } |
592 | 590 | ||
@@ -615,9 +613,7 @@ static void iseries_dedicated_idle(void) | |||
615 | ppc64_runlatch_on(); | 613 | ppc64_runlatch_on(); |
616 | rcu_idle_exit(); | 614 | rcu_idle_exit(); |
617 | tick_nohz_idle_exit(); | 615 | tick_nohz_idle_exit(); |
618 | preempt_enable_no_resched(); | 616 | schedule_preempt_disabled(); |
619 | schedule(); | ||
620 | preempt_disable(); | ||
621 | } | 617 | } |
622 | } | 618 | } |
623 | 619 | ||
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h index 95a6cf2b5b67..6c32190dc73e 100644 --- a/arch/s390/include/asm/jump_label.h +++ b/arch/s390/include/asm/jump_label.h | |||
@@ -13,7 +13,7 @@ | |||
13 | #define ASM_ALIGN ".balign 4" | 13 | #define ASM_ALIGN ".balign 4" |
14 | #endif | 14 | #endif |
15 | 15 | ||
16 | static __always_inline bool arch_static_branch(struct jump_label_key *key) | 16 | static __always_inline bool arch_static_branch(struct static_key *key) |
17 | { | 17 | { |
18 | asm goto("0: brcl 0,0\n" | 18 | asm goto("0: brcl 0,0\n" |
19 | ".pushsection __jump_table, \"aw\"\n" | 19 | ".pushsection __jump_table, \"aw\"\n" |
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h index a75f168d2718..4eb444edbe49 100644 --- a/arch/s390/include/asm/perf_event.h +++ b/arch/s390/include/asm/perf_event.h | |||
@@ -6,4 +6,3 @@ | |||
6 | 6 | ||
7 | /* Empty, just to avoid compiling error */ | 7 | /* Empty, just to avoid compiling error */ |
8 | 8 | ||
9 | #define PERF_EVENT_INDEX_OFFSET 0 | ||
diff --git a/arch/s390/include/asm/qeth.h b/arch/s390/include/asm/qeth.h index 90efda0b137d..2c7c898c03e4 100644 --- a/arch/s390/include/asm/qeth.h +++ b/arch/s390/include/asm/qeth.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #define SIOC_QETH_ARP_FLUSH_CACHE (SIOCDEVPRIVATE + 4) | 20 | #define SIOC_QETH_ARP_FLUSH_CACHE (SIOCDEVPRIVATE + 4) |
21 | #define SIOC_QETH_ADP_SET_SNMP_CONTROL (SIOCDEVPRIVATE + 5) | 21 | #define SIOC_QETH_ADP_SET_SNMP_CONTROL (SIOCDEVPRIVATE + 5) |
22 | #define SIOC_QETH_GET_CARD_TYPE (SIOCDEVPRIVATE + 6) | 22 | #define SIOC_QETH_GET_CARD_TYPE (SIOCDEVPRIVATE + 6) |
23 | #define SIOC_QETH_QUERY_OAT (SIOCDEVPRIVATE + 7) | ||
23 | 24 | ||
24 | struct qeth_arp_cache_entry { | 25 | struct qeth_arp_cache_entry { |
25 | __u8 macaddr[6]; | 26 | __u8 macaddr[6]; |
@@ -107,4 +108,10 @@ struct qeth_arp_query_user_data { | |||
107 | char *entries; | 108 | char *entries; |
108 | } __attribute__((packed)); | 109 | } __attribute__((packed)); |
109 | 110 | ||
111 | struct qeth_query_oat_data { | ||
112 | __u32 command; | ||
113 | __u32 buffer_len; | ||
114 | __u32 response_len; | ||
115 | __u64 ptr; | ||
116 | }; | ||
110 | #endif /* __ASM_S390_QETH_IOCTL_H__ */ | 117 | #endif /* __ASM_S390_QETH_IOCTL_H__ */ |
diff --git a/arch/s390/include/asm/socket.h b/arch/s390/include/asm/socket.h index 67b5c1b14b51..c91b720965c0 100644 --- a/arch/s390/include/asm/socket.h +++ b/arch/s390/include/asm/socket.h | |||
@@ -72,5 +72,9 @@ | |||
72 | 72 | ||
73 | #define SO_WIFI_STATUS 41 | 73 | #define SO_WIFI_STATUS 41 |
74 | #define SCM_WIFI_STATUS SO_WIFI_STATUS | 74 | #define SCM_WIFI_STATUS SO_WIFI_STATUS |
75 | #define SO_PEEK_OFF 42 | ||
76 | |||
77 | /* Instruct lower device to use last 4-bytes of skb data as FCS */ | ||
78 | #define SO_NOFCS 43 | ||
75 | 79 | ||
76 | #endif /* _ASM_SOCKET_H */ | 80 | #endif /* _ASM_SOCKET_H */ |
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index b9a7fdd9c814..e30b2dfa8ba0 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c | |||
@@ -165,13 +165,6 @@ static inline int ext_hash(u16 code) | |||
165 | return (code + (code >> 9)) & 0xff; | 165 | return (code + (code >> 9)) & 0xff; |
166 | } | 166 | } |
167 | 167 | ||
168 | static void ext_int_hash_update(struct rcu_head *head) | ||
169 | { | ||
170 | struct ext_int_info *p = container_of(head, struct ext_int_info, rcu); | ||
171 | |||
172 | kfree(p); | ||
173 | } | ||
174 | |||
175 | int register_external_interrupt(u16 code, ext_int_handler_t handler) | 168 | int register_external_interrupt(u16 code, ext_int_handler_t handler) |
176 | { | 169 | { |
177 | struct ext_int_info *p; | 170 | struct ext_int_info *p; |
@@ -202,7 +195,7 @@ int unregister_external_interrupt(u16 code, ext_int_handler_t handler) | |||
202 | list_for_each_entry_rcu(p, &ext_int_hash[index], entry) | 195 | list_for_each_entry_rcu(p, &ext_int_hash[index], entry) |
203 | if (p->code == code && p->handler == handler) { | 196 | if (p->code == code && p->handler == handler) { |
204 | list_del_rcu(&p->entry); | 197 | list_del_rcu(&p->entry); |
205 | call_rcu(&p->rcu, ext_int_hash_update); | 198 | kfree_rcu(p, rcu); |
206 | } | 199 | } |
207 | spin_unlock_irqrestore(&ext_int_hash_lock, flags); | 200 | spin_unlock_irqrestore(&ext_int_hash_lock, flags); |
208 | return 0; | 201 | return 0; |
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index e795933eb2cb..7618085b4164 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
@@ -97,9 +97,7 @@ void cpu_idle(void) | |||
97 | tick_nohz_idle_exit(); | 97 | tick_nohz_idle_exit(); |
98 | if (test_thread_flag(TIF_MCCK_PENDING)) | 98 | if (test_thread_flag(TIF_MCCK_PENDING)) |
99 | s390_handle_mcck(); | 99 | s390_handle_mcck(); |
100 | preempt_enable_no_resched(); | 100 | schedule_preempt_disabled(); |
101 | schedule(); | ||
102 | preempt_disable(); | ||
103 | } | 101 | } |
104 | } | 102 | } |
105 | 103 | ||
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 2398ce6b15ae..b0e28c47ab83 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -550,12 +550,6 @@ int __cpuinit start_secondary(void *cpuvoid) | |||
550 | S390_lowcore.restart_psw.addr = | 550 | S390_lowcore.restart_psw.addr = |
551 | PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; | 551 | PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; |
552 | __ctl_set_bit(0, 28); /* Enable lowcore protection */ | 552 | __ctl_set_bit(0, 28); /* Enable lowcore protection */ |
553 | /* | ||
554 | * Wait until the cpu which brought this one up marked it | ||
555 | * active before enabling interrupts. | ||
556 | */ | ||
557 | while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask)) | ||
558 | cpu_relax(); | ||
559 | local_irq_enable(); | 553 | local_irq_enable(); |
560 | /* cpu_idle will call schedule for us */ | 554 | /* cpu_idle will call schedule for us */ |
561 | cpu_idle(); | 555 | cpu_idle(); |
diff --git a/arch/score/Kconfig.debug b/arch/score/Kconfig.debug index a1f346df0a71..d8a9b2d146ee 100644 --- a/arch/score/Kconfig.debug +++ b/arch/score/Kconfig.debug | |||
@@ -22,7 +22,7 @@ config RUNTIME_DEBUG | |||
22 | help | 22 | help |
23 | If you say Y here, some debugging macros will do run-time checking. | 23 | If you say Y here, some debugging macros will do run-time checking. |
24 | If you say N here, those macros will mostly turn to no-ops. See | 24 | If you say N here, those macros will mostly turn to no-ops. See |
25 | include/asm-score/debug.h for debuging macros. | 25 | include/asm-score/debug.h for debugging macros. |
26 | If unsure, say N. | 26 | If unsure, say N. |
27 | 27 | ||
28 | endmenu | 28 | endmenu |
diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c index 25d08030a883..2707023c7563 100644 --- a/arch/score/kernel/process.c +++ b/arch/score/kernel/process.c | |||
@@ -53,9 +53,7 @@ void __noreturn cpu_idle(void) | |||
53 | while (!need_resched()) | 53 | while (!need_resched()) |
54 | barrier(); | 54 | barrier(); |
55 | 55 | ||
56 | preempt_enable_no_resched(); | 56 | schedule_preempt_disabled(); |
57 | schedule(); | ||
58 | preempt_disable(); | ||
59 | } | 57 | } |
60 | } | 58 | } |
61 | 59 | ||
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 406508d4ce74..7e4892826563 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c | |||
@@ -114,9 +114,7 @@ void cpu_idle(void) | |||
114 | 114 | ||
115 | rcu_idle_exit(); | 115 | rcu_idle_exit(); |
116 | tick_nohz_idle_exit(); | 116 | tick_nohz_idle_exit(); |
117 | preempt_enable_no_resched(); | 117 | schedule_preempt_disabled(); |
118 | schedule(); | ||
119 | preempt_disable(); | ||
120 | } | 118 | } |
121 | } | 119 | } |
122 | 120 | ||
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 10b14e3a7eb8..068b8a2759b5 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c | |||
@@ -310,6 +310,10 @@ static int sh_pmu_event_init(struct perf_event *event) | |||
310 | { | 310 | { |
311 | int err; | 311 | int err; |
312 | 312 | ||
313 | /* does not support taken branch sampling */ | ||
314 | if (has_branch_stack(event)) | ||
315 | return -EOPNOTSUPP; | ||
316 | |||
313 | switch (event->attr.type) { | 317 | switch (event->attr.type) { |
314 | case PERF_TYPE_RAW: | 318 | case PERF_TYPE_RAW: |
315 | case PERF_TYPE_HW_CACHE: | 319 | case PERF_TYPE_HW_CACHE: |
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h index fc73a82366f8..5080d16a832f 100644 --- a/arch/sparc/include/asm/jump_label.h +++ b/arch/sparc/include/asm/jump_label.h | |||
@@ -7,7 +7,7 @@ | |||
7 | 7 | ||
8 | #define JUMP_LABEL_NOP_SIZE 4 | 8 | #define JUMP_LABEL_NOP_SIZE 4 |
9 | 9 | ||
10 | static __always_inline bool arch_static_branch(struct jump_label_key *key) | 10 | static __always_inline bool arch_static_branch(struct static_key *key) |
11 | { | 11 | { |
12 | asm goto("1:\n\t" | 12 | asm goto("1:\n\t" |
13 | "nop\n\t" | 13 | "nop\n\t" |
diff --git a/arch/sparc/include/asm/socket.h b/arch/sparc/include/asm/socket.h index 8af1b64168b3..bea1568ae4af 100644 --- a/arch/sparc/include/asm/socket.h +++ b/arch/sparc/include/asm/socket.h | |||
@@ -60,6 +60,11 @@ | |||
60 | 60 | ||
61 | #define SO_WIFI_STATUS 0x0025 | 61 | #define SO_WIFI_STATUS 0x0025 |
62 | #define SCM_WIFI_STATUS SO_WIFI_STATUS | 62 | #define SCM_WIFI_STATUS SO_WIFI_STATUS |
63 | #define SO_PEEK_OFF 0x0026 | ||
64 | |||
65 | /* Instruct lower device to use last 4-bytes of skb data as FCS */ | ||
66 | #define SO_NOFCS 0x0027 | ||
67 | |||
63 | 68 | ||
64 | /* Security levels - as per NRL IPv6 - don't actually do anything */ | 69 | /* Security levels - as per NRL IPv6 - don't actually do anything */ |
65 | #define SO_SECURITY_AUTHENTICATION 0x5001 | 70 | #define SO_SECURITY_AUTHENTICATION 0x5001 |
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 614da624330c..8e16a4a21582 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
@@ -1105,6 +1105,10 @@ static int sparc_pmu_event_init(struct perf_event *event) | |||
1105 | if (atomic_read(&nmi_active) < 0) | 1105 | if (atomic_read(&nmi_active) < 0) |
1106 | return -ENODEV; | 1106 | return -ENODEV; |
1107 | 1107 | ||
1108 | /* does not support taken branch sampling */ | ||
1109 | if (has_branch_stack(event)) | ||
1110 | return -EOPNOTSUPP; | ||
1111 | |||
1108 | switch (attr->type) { | 1112 | switch (attr->type) { |
1109 | case PERF_TYPE_HARDWARE: | 1113 | case PERF_TYPE_HARDWARE: |
1110 | if (attr->config >= sparc_pmu->max_events) | 1114 | if (attr->config >= sparc_pmu->max_events) |
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index f793742eec2b..935fdbcd88c2 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c | |||
@@ -113,9 +113,7 @@ void cpu_idle(void) | |||
113 | while (!need_resched()) | 113 | while (!need_resched()) |
114 | cpu_relax(); | 114 | cpu_relax(); |
115 | } | 115 | } |
116 | preempt_enable_no_resched(); | 116 | schedule_preempt_disabled(); |
117 | schedule(); | ||
118 | preempt_disable(); | ||
119 | check_pgt_cache(); | 117 | check_pgt_cache(); |
120 | } | 118 | } |
121 | } | 119 | } |
@@ -138,9 +136,7 @@ void cpu_idle(void) | |||
138 | while (!need_resched()) | 136 | while (!need_resched()) |
139 | cpu_relax(); | 137 | cpu_relax(); |
140 | } | 138 | } |
141 | preempt_enable_no_resched(); | 139 | schedule_preempt_disabled(); |
142 | schedule(); | ||
143 | preempt_disable(); | ||
144 | check_pgt_cache(); | 140 | check_pgt_cache(); |
145 | } | 141 | } |
146 | } | 142 | } |
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 39d8b05201a2..06b5b5fc20c7 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c | |||
@@ -104,15 +104,13 @@ void cpu_idle(void) | |||
104 | rcu_idle_exit(); | 104 | rcu_idle_exit(); |
105 | tick_nohz_idle_exit(); | 105 | tick_nohz_idle_exit(); |
106 | 106 | ||
107 | preempt_enable_no_resched(); | ||
108 | |||
109 | #ifdef CONFIG_HOTPLUG_CPU | 107 | #ifdef CONFIG_HOTPLUG_CPU |
110 | if (cpu_is_offline(cpu)) | 108 | if (cpu_is_offline(cpu)) { |
109 | sched_preempt_enable_no_resched(); | ||
111 | cpu_play_dead(); | 110 | cpu_play_dead(); |
111 | } | ||
112 | #endif | 112 | #endif |
113 | 113 | schedule_preempt_disabled(); | |
114 | schedule(); | ||
115 | preempt_disable(); | ||
116 | } | 114 | } |
117 | } | 115 | } |
118 | 116 | ||
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 4c1ac6e5347a..6ae495ef2b99 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c | |||
@@ -108,9 +108,7 @@ void cpu_idle(void) | |||
108 | } | 108 | } |
109 | rcu_idle_exit(); | 109 | rcu_idle_exit(); |
110 | tick_nohz_idle_exit(); | 110 | tick_nohz_idle_exit(); |
111 | preempt_enable_no_resched(); | 111 | schedule_preempt_disabled(); |
112 | schedule(); | ||
113 | preempt_disable(); | ||
114 | } | 112 | } |
115 | } | 113 | } |
116 | 114 | ||
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index a492e59883a3..d2996183e584 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c | |||
@@ -293,7 +293,7 @@ static void uml_net_user_timer_expire(unsigned long _conn) | |||
293 | #endif | 293 | #endif |
294 | } | 294 | } |
295 | 295 | ||
296 | static void setup_etheraddr(char *str, unsigned char *addr, char *name) | 296 | static int setup_etheraddr(char *str, unsigned char *addr, char *name) |
297 | { | 297 | { |
298 | char *end; | 298 | char *end; |
299 | int i; | 299 | int i; |
@@ -334,12 +334,13 @@ static void setup_etheraddr(char *str, unsigned char *addr, char *name) | |||
334 | addr[0] | 0x02, addr[1], addr[2], addr[3], addr[4], | 334 | addr[0] | 0x02, addr[1], addr[2], addr[3], addr[4], |
335 | addr[5]); | 335 | addr[5]); |
336 | } | 336 | } |
337 | return; | 337 | return 0; |
338 | 338 | ||
339 | random: | 339 | random: |
340 | printk(KERN_INFO | 340 | printk(KERN_INFO |
341 | "Choosing a random ethernet address for device %s\n", name); | 341 | "Choosing a random ethernet address for device %s\n", name); |
342 | random_ether_addr(addr); | 342 | random_ether_addr(addr); |
343 | return 1; | ||
343 | } | 344 | } |
344 | 345 | ||
345 | static DEFINE_SPINLOCK(devices_lock); | 346 | static DEFINE_SPINLOCK(devices_lock); |
@@ -391,6 +392,7 @@ static void eth_configure(int n, void *init, char *mac, | |||
391 | struct net_device *dev; | 392 | struct net_device *dev; |
392 | struct uml_net_private *lp; | 393 | struct uml_net_private *lp; |
393 | int err, size; | 394 | int err, size; |
395 | int random_mac; | ||
394 | 396 | ||
395 | size = transport->private_size + sizeof(struct uml_net_private); | 397 | size = transport->private_size + sizeof(struct uml_net_private); |
396 | 398 | ||
@@ -417,7 +419,7 @@ static void eth_configure(int n, void *init, char *mac, | |||
417 | */ | 419 | */ |
418 | snprintf(dev->name, sizeof(dev->name), "eth%d", n); | 420 | snprintf(dev->name, sizeof(dev->name), "eth%d", n); |
419 | 421 | ||
420 | setup_etheraddr(mac, device->mac, dev->name); | 422 | random_mac = setup_etheraddr(mac, device->mac, dev->name); |
421 | 423 | ||
422 | printk(KERN_INFO "Netdevice %d (%pM) : ", n, device->mac); | 424 | printk(KERN_INFO "Netdevice %d (%pM) : ", n, device->mac); |
423 | 425 | ||
@@ -474,6 +476,9 @@ static void eth_configure(int n, void *init, char *mac, | |||
474 | 476 | ||
475 | /* don't use eth_mac_addr, it will not work here */ | 477 | /* don't use eth_mac_addr, it will not work here */ |
476 | memcpy(dev->dev_addr, device->mac, ETH_ALEN); | 478 | memcpy(dev->dev_addr, device->mac, ETH_ALEN); |
479 | if (random_mac) | ||
480 | dev->addr_assign_type |= NET_ADDR_RANDOM; | ||
481 | |||
477 | dev->mtu = transport->user->mtu; | 482 | dev->mtu = transport->user->mtu; |
478 | dev->netdev_ops = ¨_netdev_ops; | 483 | dev->netdev_ops = ¨_netdev_ops; |
479 | dev->ethtool_ops = ¨_net_ethtool_ops; | 484 | dev->ethtool_ops = ¨_net_ethtool_ops; |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5bed94e189fa..d523a6c36835 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -82,6 +82,7 @@ config X86 | |||
82 | select CLKEVT_I8253 | 82 | select CLKEVT_I8253 |
83 | select ARCH_HAVE_NMI_SAFE_CMPXCHG | 83 | select ARCH_HAVE_NMI_SAFE_CMPXCHG |
84 | select GENERIC_IOMAP | 84 | select GENERIC_IOMAP |
85 | select DCACHE_WORD_ACCESS if !DEBUG_PAGEALLOC | ||
85 | 86 | ||
86 | config INSTRUCTION_DECODER | 87 | config INSTRUCTION_DECODER |
87 | def_bool (KPROBES || PERF_EVENTS) | 88 | def_bool (KPROBES || PERF_EVENTS) |
@@ -179,6 +180,9 @@ config ARCH_HAS_DEFAULT_IDLE | |||
179 | config ARCH_HAS_CACHE_LINE_SIZE | 180 | config ARCH_HAS_CACHE_LINE_SIZE |
180 | def_bool y | 181 | def_bool y |
181 | 182 | ||
183 | config ARCH_HAS_CPU_AUTOPROBE | ||
184 | def_bool y | ||
185 | |||
182 | config HAVE_SETUP_PER_CPU_AREA | 186 | config HAVE_SETUP_PER_CPU_AREA |
183 | def_bool y | 187 | def_bool y |
184 | 188 | ||
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 152232d2dc6a..c799352e24fc 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <crypto/aes.h> | 28 | #include <crypto/aes.h> |
29 | #include <crypto/cryptd.h> | 29 | #include <crypto/cryptd.h> |
30 | #include <crypto/ctr.h> | 30 | #include <crypto/ctr.h> |
31 | #include <asm/cpu_device_id.h> | ||
31 | #include <asm/i387.h> | 32 | #include <asm/i387.h> |
32 | #include <asm/aes.h> | 33 | #include <asm/aes.h> |
33 | #include <crypto/scatterwalk.h> | 34 | #include <crypto/scatterwalk.h> |
@@ -1253,14 +1254,19 @@ static struct crypto_alg __rfc4106_alg = { | |||
1253 | }; | 1254 | }; |
1254 | #endif | 1255 | #endif |
1255 | 1256 | ||
1257 | |||
1258 | static const struct x86_cpu_id aesni_cpu_id[] = { | ||
1259 | X86_FEATURE_MATCH(X86_FEATURE_AES), | ||
1260 | {} | ||
1261 | }; | ||
1262 | MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id); | ||
1263 | |||
1256 | static int __init aesni_init(void) | 1264 | static int __init aesni_init(void) |
1257 | { | 1265 | { |
1258 | int err; | 1266 | int err; |
1259 | 1267 | ||
1260 | if (!cpu_has_aes) { | 1268 | if (!x86_match_cpu(aesni_cpu_id)) |
1261 | printk(KERN_INFO "Intel AES-NI instructions are not detected.\n"); | ||
1262 | return -ENODEV; | 1269 | return -ENODEV; |
1263 | } | ||
1264 | 1270 | ||
1265 | if ((err = crypto_fpu_init())) | 1271 | if ((err = crypto_fpu_init())) |
1266 | goto fpu_err; | 1272 | goto fpu_err; |
diff --git a/arch/x86/crypto/crc32c-intel.c b/arch/x86/crypto/crc32c-intel.c index b9d00261703c..493f959261f7 100644 --- a/arch/x86/crypto/crc32c-intel.c +++ b/arch/x86/crypto/crc32c-intel.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <crypto/internal/hash.h> | 31 | #include <crypto/internal/hash.h> |
32 | 32 | ||
33 | #include <asm/cpufeature.h> | 33 | #include <asm/cpufeature.h> |
34 | #include <asm/cpu_device_id.h> | ||
34 | 35 | ||
35 | #define CHKSUM_BLOCK_SIZE 1 | 36 | #define CHKSUM_BLOCK_SIZE 1 |
36 | #define CHKSUM_DIGEST_SIZE 4 | 37 | #define CHKSUM_DIGEST_SIZE 4 |
@@ -173,13 +174,17 @@ static struct shash_alg alg = { | |||
173 | } | 174 | } |
174 | }; | 175 | }; |
175 | 176 | ||
177 | static const struct x86_cpu_id crc32c_cpu_id[] = { | ||
178 | X86_FEATURE_MATCH(X86_FEATURE_XMM4_2), | ||
179 | {} | ||
180 | }; | ||
181 | MODULE_DEVICE_TABLE(x86cpu, crc32c_cpu_id); | ||
176 | 182 | ||
177 | static int __init crc32c_intel_mod_init(void) | 183 | static int __init crc32c_intel_mod_init(void) |
178 | { | 184 | { |
179 | if (cpu_has_xmm4_2) | 185 | if (!x86_match_cpu(crc32c_cpu_id)) |
180 | return crypto_register_shash(&alg); | ||
181 | else | ||
182 | return -ENODEV; | 186 | return -ENODEV; |
187 | return crypto_register_shash(&alg); | ||
183 | } | 188 | } |
184 | 189 | ||
185 | static void __exit crc32c_intel_mod_fini(void) | 190 | static void __exit crc32c_intel_mod_fini(void) |
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c index 976aa64d9a20..b4bf0a63b520 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_glue.c +++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <crypto/gf128mul.h> | 20 | #include <crypto/gf128mul.h> |
21 | #include <crypto/internal/hash.h> | 21 | #include <crypto/internal/hash.h> |
22 | #include <asm/i387.h> | 22 | #include <asm/i387.h> |
23 | #include <asm/cpu_device_id.h> | ||
23 | 24 | ||
24 | #define GHASH_BLOCK_SIZE 16 | 25 | #define GHASH_BLOCK_SIZE 16 |
25 | #define GHASH_DIGEST_SIZE 16 | 26 | #define GHASH_DIGEST_SIZE 16 |
@@ -294,15 +295,18 @@ static struct ahash_alg ghash_async_alg = { | |||
294 | }, | 295 | }, |
295 | }; | 296 | }; |
296 | 297 | ||
298 | static const struct x86_cpu_id pcmul_cpu_id[] = { | ||
299 | X86_FEATURE_MATCH(X86_FEATURE_PCLMULQDQ), /* Pickle-Mickle-Duck */ | ||
300 | {} | ||
301 | }; | ||
302 | MODULE_DEVICE_TABLE(x86cpu, pcmul_cpu_id); | ||
303 | |||
297 | static int __init ghash_pclmulqdqni_mod_init(void) | 304 | static int __init ghash_pclmulqdqni_mod_init(void) |
298 | { | 305 | { |
299 | int err; | 306 | int err; |
300 | 307 | ||
301 | if (!cpu_has_pclmulqdq) { | 308 | if (!x86_match_cpu(pcmul_cpu_id)) |
302 | printk(KERN_INFO "Intel PCLMULQDQ-NI instructions are not" | ||
303 | " detected.\n"); | ||
304 | return -ENODEV; | 309 | return -ENODEV; |
305 | } | ||
306 | 310 | ||
307 | err = crypto_register_shash(&ghash_alg); | 311 | err = crypto_register_shash(&ghash_alg); |
308 | if (err) | 312 | if (err) |
diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h new file mode 100644 index 000000000000..ff501e511d91 --- /dev/null +++ b/arch/x86/include/asm/cpu_device_id.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef _CPU_DEVICE_ID | ||
2 | #define _CPU_DEVICE_ID 1 | ||
3 | |||
4 | /* | ||
5 | * Declare drivers belonging to specific x86 CPUs | ||
6 | * Similar in spirit to pci_device_id and related PCI functions | ||
7 | */ | ||
8 | |||
9 | #include <linux/mod_devicetable.h> | ||
10 | |||
11 | extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match); | ||
12 | |||
13 | #endif | ||
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 8d67d428b0f9..dcb839eebc76 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -177,6 +177,7 @@ | |||
177 | #define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */ | 177 | #define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */ |
178 | #define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ | 178 | #define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ |
179 | #define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */ | 179 | #define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */ |
180 | #define X86_FEATURE_HW_PSTATE (7*32+ 8) /* AMD HW-PState */ | ||
180 | 181 | ||
181 | /* Virtualization flags: Linux defined, word 8 */ | 182 | /* Virtualization flags: Linux defined, word 8 */ |
182 | #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ | 183 | #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ |
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index da0b3ca815b7..382f75d735f3 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h | |||
@@ -7,7 +7,6 @@ | |||
7 | typedef struct { | 7 | typedef struct { |
8 | unsigned int __softirq_pending; | 8 | unsigned int __softirq_pending; |
9 | unsigned int __nmi_count; /* arch dependent */ | 9 | unsigned int __nmi_count; /* arch dependent */ |
10 | unsigned int irq0_irqs; | ||
11 | #ifdef CONFIG_X86_LOCAL_APIC | 10 | #ifdef CONFIG_X86_LOCAL_APIC |
12 | unsigned int apic_timer_irqs; /* arch dependent */ | 11 | unsigned int apic_timer_irqs; /* arch dependent */ |
13 | unsigned int irq_spurious_count; | 12 | unsigned int irq_spurious_count; |
diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h index 205b063e3e32..74a2e312e8a2 100644 --- a/arch/x86/include/asm/inat.h +++ b/arch/x86/include/asm/inat.h | |||
@@ -97,11 +97,12 @@ | |||
97 | 97 | ||
98 | /* Attribute search APIs */ | 98 | /* Attribute search APIs */ |
99 | extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode); | 99 | extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode); |
100 | extern int inat_get_last_prefix_id(insn_byte_t last_pfx); | ||
100 | extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, | 101 | extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, |
101 | insn_byte_t last_pfx, | 102 | int lpfx_id, |
102 | insn_attr_t esc_attr); | 103 | insn_attr_t esc_attr); |
103 | extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm, | 104 | extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm, |
104 | insn_byte_t last_pfx, | 105 | int lpfx_id, |
105 | insn_attr_t esc_attr); | 106 | insn_attr_t esc_attr); |
106 | extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, | 107 | extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, |
107 | insn_byte_t vex_m, | 108 | insn_byte_t vex_m, |
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h index 74df3f1eddfd..48eb30a86062 100644 --- a/arch/x86/include/asm/insn.h +++ b/arch/x86/include/asm/insn.h | |||
@@ -96,12 +96,6 @@ struct insn { | |||
96 | #define X86_VEX_P(vex) ((vex) & 0x03) /* VEX3 Byte2, VEX2 Byte1 */ | 96 | #define X86_VEX_P(vex) ((vex) & 0x03) /* VEX3 Byte2, VEX2 Byte1 */ |
97 | #define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */ | 97 | #define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */ |
98 | 98 | ||
99 | /* The last prefix is needed for two-byte and three-byte opcodes */ | ||
100 | static inline insn_byte_t insn_last_prefix(struct insn *insn) | ||
101 | { | ||
102 | return insn->prefixes.bytes[3]; | ||
103 | } | ||
104 | |||
105 | extern void insn_init(struct insn *insn, const void *kaddr, int x86_64); | 99 | extern void insn_init(struct insn *insn, const void *kaddr, int x86_64); |
106 | extern void insn_get_prefixes(struct insn *insn); | 100 | extern void insn_get_prefixes(struct insn *insn); |
107 | extern void insn_get_opcode(struct insn *insn); | 101 | extern void insn_get_opcode(struct insn *insn); |
@@ -160,6 +154,18 @@ static inline insn_byte_t insn_vex_p_bits(struct insn *insn) | |||
160 | return X86_VEX_P(insn->vex_prefix.bytes[2]); | 154 | return X86_VEX_P(insn->vex_prefix.bytes[2]); |
161 | } | 155 | } |
162 | 156 | ||
157 | /* Get the last prefix id from last prefix or VEX prefix */ | ||
158 | static inline int insn_last_prefix_id(struct insn *insn) | ||
159 | { | ||
160 | if (insn_is_avx(insn)) | ||
161 | return insn_vex_p_bits(insn); /* VEX_p is a SIMD prefix id */ | ||
162 | |||
163 | if (insn->prefixes.bytes[3]) | ||
164 | return inat_get_last_prefix_id(insn->prefixes.bytes[3]); | ||
165 | |||
166 | return 0; | ||
167 | } | ||
168 | |||
163 | /* Offset of each field from kaddr */ | 169 | /* Offset of each field from kaddr */ |
164 | static inline int insn_offset_rex_prefix(struct insn *insn) | 170 | static inline int insn_offset_rex_prefix(struct insn *insn) |
165 | { | 171 | { |
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h index a32b18ce6ead..3a16c1483b45 100644 --- a/arch/x86/include/asm/jump_label.h +++ b/arch/x86/include/asm/jump_label.h | |||
@@ -9,12 +9,12 @@ | |||
9 | 9 | ||
10 | #define JUMP_LABEL_NOP_SIZE 5 | 10 | #define JUMP_LABEL_NOP_SIZE 5 |
11 | 11 | ||
12 | #define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" | 12 | #define STATIC_KEY_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" |
13 | 13 | ||
14 | static __always_inline bool arch_static_branch(struct jump_label_key *key) | 14 | static __always_inline bool arch_static_branch(struct static_key *key) |
15 | { | 15 | { |
16 | asm goto("1:" | 16 | asm goto("1:" |
17 | JUMP_LABEL_INITIAL_NOP | 17 | STATIC_KEY_INITIAL_NOP |
18 | ".pushsection __jump_table, \"aw\" \n\t" | 18 | ".pushsection __jump_table, \"aw\" \n\t" |
19 | _ASM_ALIGN "\n\t" | 19 | _ASM_ALIGN "\n\t" |
20 | _ASM_PTR "1b, %l[l_yes], %c0 \n\t" | 20 | _ASM_PTR "1b, %l[l_yes], %c0 \n\t" |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index a6962d9161a0..ccb805966f68 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -56,6 +56,13 @@ | |||
56 | #define MSR_OFFCORE_RSP_0 0x000001a6 | 56 | #define MSR_OFFCORE_RSP_0 0x000001a6 |
57 | #define MSR_OFFCORE_RSP_1 0x000001a7 | 57 | #define MSR_OFFCORE_RSP_1 0x000001a7 |
58 | 58 | ||
59 | #define MSR_LBR_SELECT 0x000001c8 | ||
60 | #define MSR_LBR_TOS 0x000001c9 | ||
61 | #define MSR_LBR_NHM_FROM 0x00000680 | ||
62 | #define MSR_LBR_NHM_TO 0x000006c0 | ||
63 | #define MSR_LBR_CORE_FROM 0x00000040 | ||
64 | #define MSR_LBR_CORE_TO 0x00000060 | ||
65 | |||
59 | #define MSR_IA32_PEBS_ENABLE 0x000003f1 | 66 | #define MSR_IA32_PEBS_ENABLE 0x000003f1 |
60 | #define MSR_IA32_DS_AREA 0x00000600 | 67 | #define MSR_IA32_DS_AREA 0x00000600 |
61 | #define MSR_IA32_PERF_CAPABILITIES 0x00000345 | 68 | #define MSR_IA32_PERF_CAPABILITIES 0x00000345 |
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index a7d2db9a74fb..c0180fd372d2 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -230,9 +230,9 @@ static inline unsigned long long paravirt_sched_clock(void) | |||
230 | return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock); | 230 | return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock); |
231 | } | 231 | } |
232 | 232 | ||
233 | struct jump_label_key; | 233 | struct static_key; |
234 | extern struct jump_label_key paravirt_steal_enabled; | 234 | extern struct static_key paravirt_steal_enabled; |
235 | extern struct jump_label_key paravirt_steal_rq_enabled; | 235 | extern struct static_key paravirt_steal_rq_enabled; |
236 | 236 | ||
237 | static inline u64 paravirt_steal_clock(int cpu) | 237 | static inline u64 paravirt_steal_clock(int cpu) |
238 | { | 238 | { |
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 461ce432b1c2..e8fb2c7a5f4f 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
@@ -188,8 +188,6 @@ extern u32 get_ibs_caps(void); | |||
188 | #ifdef CONFIG_PERF_EVENTS | 188 | #ifdef CONFIG_PERF_EVENTS |
189 | extern void perf_events_lapic_init(void); | 189 | extern void perf_events_lapic_init(void); |
190 | 190 | ||
191 | #define PERF_EVENT_INDEX_OFFSET 0 | ||
192 | |||
193 | /* | 191 | /* |
194 | * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups. | 192 | * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups. |
195 | * This flag is otherwise unused and ABI specified to be 0, so nobody should | 193 | * This flag is otherwise unused and ABI specified to be 0, so nobody should |
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h index 431793e5d484..34baa0eb5d0c 100644 --- a/arch/x86/include/asm/timer.h +++ b/arch/x86/include/asm/timer.h | |||
@@ -57,14 +57,10 @@ DECLARE_PER_CPU(unsigned long long, cyc2ns_offset); | |||
57 | 57 | ||
58 | static inline unsigned long long __cycles_2_ns(unsigned long long cyc) | 58 | static inline unsigned long long __cycles_2_ns(unsigned long long cyc) |
59 | { | 59 | { |
60 | unsigned long long quot; | ||
61 | unsigned long long rem; | ||
62 | int cpu = smp_processor_id(); | 60 | int cpu = smp_processor_id(); |
63 | unsigned long long ns = per_cpu(cyc2ns_offset, cpu); | 61 | unsigned long long ns = per_cpu(cyc2ns_offset, cpu); |
64 | quot = (cyc >> CYC2NS_SCALE_FACTOR); | 62 | ns += mult_frac(cyc, per_cpu(cyc2ns, cpu), |
65 | rem = cyc & ((1ULL << CYC2NS_SCALE_FACTOR) - 1); | 63 | (1UL << CYC2NS_SCALE_FACTOR)); |
66 | ns += quot * per_cpu(cyc2ns, cpu) + | ||
67 | ((rem * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR); | ||
68 | return ns; | 64 | return ns; |
69 | } | 65 | } |
70 | 66 | ||
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 5369059c07a9..532d2e090e6f 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -69,6 +69,7 @@ obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o | |||
69 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o | 69 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o |
70 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o | 70 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o |
71 | obj-$(CONFIG_KPROBES) += kprobes.o | 71 | obj-$(CONFIG_KPROBES) += kprobes.o |
72 | obj-$(CONFIG_OPTPROBES) += kprobes-opt.o | ||
72 | obj-$(CONFIG_MODULES) += module.o | 73 | obj-$(CONFIG_MODULES) += module.o |
73 | obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o | 74 | obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o |
74 | obj-$(CONFIG_KGDB) += kgdb.o | 75 | obj-$(CONFIG_KGDB) += kgdb.o |
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 25f24dccdcfa..6ab6aa2fdfdd 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -16,6 +16,7 @@ obj-y := intel_cacheinfo.o scattered.o topology.o | |||
16 | obj-y += proc.o capflags.o powerflags.o common.o | 16 | obj-y += proc.o capflags.o powerflags.o common.o |
17 | obj-y += vmware.o hypervisor.o sched.o mshyperv.o | 17 | obj-y += vmware.o hypervisor.o sched.o mshyperv.o |
18 | obj-y += rdrand.o | 18 | obj-y += rdrand.o |
19 | obj-y += match.o | ||
19 | 20 | ||
20 | obj-$(CONFIG_X86_32) += bugs.o | 21 | obj-$(CONFIG_X86_32) += bugs.o |
21 | obj-$(CONFIG_X86_64) += bugs_64.o | 22 | obj-$(CONFIG_X86_64) += bugs_64.o |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index f4773f4aae35..0a44b90602b0 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
6 | 6 | ||
7 | #include <linux/io.h> | 7 | #include <linux/io.h> |
8 | #include <linux/sched.h> | ||
8 | #include <asm/processor.h> | 9 | #include <asm/processor.h> |
9 | #include <asm/apic.h> | 10 | #include <asm/apic.h> |
10 | #include <asm/cpu.h> | 11 | #include <asm/cpu.h> |
@@ -456,6 +457,8 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | |||
456 | if (c->x86_power & (1 << 8)) { | 457 | if (c->x86_power & (1 << 8)) { |
457 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 458 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
458 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); | 459 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); |
460 | if (!check_tsc_unstable()) | ||
461 | sched_clock_stable = 1; | ||
459 | } | 462 | } |
460 | 463 | ||
461 | #ifdef CONFIG_X86_64 | 464 | #ifdef CONFIG_X86_64 |
diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c new file mode 100644 index 000000000000..5502b289341b --- /dev/null +++ b/arch/x86/kernel/cpu/match.c | |||
@@ -0,0 +1,91 @@ | |||
1 | #include <asm/cpu_device_id.h> | ||
2 | #include <asm/processor.h> | ||
3 | #include <linux/cpu.h> | ||
4 | #include <linux/module.h> | ||
5 | #include <linux/slab.h> | ||
6 | |||
7 | /** | ||
8 | * x86_match_cpu - match current CPU again an array of x86_cpu_ids | ||
9 | * @match: Pointer to array of x86_cpu_ids. Last entry terminated with | ||
10 | * {}. | ||
11 | * | ||
12 | * Return the entry if the current CPU matches the entries in the | ||
13 | * passed x86_cpu_id match table. Otherwise NULL. The match table | ||
14 | * contains vendor (X86_VENDOR_*), family, model and feature bits or | ||
15 | * respective wildcard entries. | ||
16 | * | ||
17 | * A typical table entry would be to match a specific CPU | ||
18 | * { X86_VENDOR_INTEL, 6, 0x12 } | ||
19 | * or to match a specific CPU feature | ||
20 | * { X86_FEATURE_MATCH(X86_FEATURE_FOOBAR) } | ||
21 | * | ||
22 | * Fields can be wildcarded with %X86_VENDOR_ANY, %X86_FAMILY_ANY, | ||
23 | * %X86_MODEL_ANY, %X86_FEATURE_ANY or 0 (except for vendor) | ||
24 | * | ||
25 | * Arrays used to match for this should also be declared using | ||
26 | * MODULE_DEVICE_TABLE(x86_cpu, ...) | ||
27 | * | ||
28 | * This always matches against the boot cpu, assuming models and features are | ||
29 | * consistent over all CPUs. | ||
30 | */ | ||
31 | const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match) | ||
32 | { | ||
33 | const struct x86_cpu_id *m; | ||
34 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
35 | |||
36 | for (m = match; m->vendor | m->family | m->model | m->feature; m++) { | ||
37 | if (m->vendor != X86_VENDOR_ANY && c->x86_vendor != m->vendor) | ||
38 | continue; | ||
39 | if (m->family != X86_FAMILY_ANY && c->x86 != m->family) | ||
40 | continue; | ||
41 | if (m->model != X86_MODEL_ANY && c->x86_model != m->model) | ||
42 | continue; | ||
43 | if (m->feature != X86_FEATURE_ANY && !cpu_has(c, m->feature)) | ||
44 | continue; | ||
45 | return m; | ||
46 | } | ||
47 | return NULL; | ||
48 | } | ||
49 | EXPORT_SYMBOL(x86_match_cpu); | ||
50 | |||
51 | ssize_t arch_print_cpu_modalias(struct device *dev, | ||
52 | struct device_attribute *attr, | ||
53 | char *bufptr) | ||
54 | { | ||
55 | int size = PAGE_SIZE; | ||
56 | int i, n; | ||
57 | char *buf = bufptr; | ||
58 | |||
59 | n = snprintf(buf, size, "x86cpu:vendor:%04X:family:%04X:" | ||
60 | "model:%04X:feature:", | ||
61 | boot_cpu_data.x86_vendor, | ||
62 | boot_cpu_data.x86, | ||
63 | boot_cpu_data.x86_model); | ||
64 | size -= n; | ||
65 | buf += n; | ||
66 | size -= 1; | ||
67 | for (i = 0; i < NCAPINTS*32; i++) { | ||
68 | if (boot_cpu_has(i)) { | ||
69 | n = snprintf(buf, size, ",%04X", i); | ||
70 | if (n >= size) { | ||
71 | WARN(1, "x86 features overflow page\n"); | ||
72 | break; | ||
73 | } | ||
74 | size -= n; | ||
75 | buf += n; | ||
76 | } | ||
77 | } | ||
78 | *buf++ = '\n'; | ||
79 | return buf - bufptr; | ||
80 | } | ||
81 | |||
82 | int arch_cpu_uevent(struct device *dev, struct kobj_uevent_env *env) | ||
83 | { | ||
84 | char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL); | ||
85 | if (buf) { | ||
86 | arch_print_cpu_modalias(NULL, NULL, buf); | ||
87 | add_uevent_var(env, "MODALIAS=%s", buf); | ||
88 | kfree(buf); | ||
89 | } | ||
90 | return 0; | ||
91 | } | ||
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 5adce1040b11..0a18d16cb58d 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/cpu.h> | 25 | #include <linux/cpu.h> |
26 | #include <linux/bitops.h> | 26 | #include <linux/bitops.h> |
27 | #include <linux/device.h> | ||
27 | 28 | ||
28 | #include <asm/apic.h> | 29 | #include <asm/apic.h> |
29 | #include <asm/stacktrace.h> | 30 | #include <asm/stacktrace.h> |
@@ -31,6 +32,7 @@ | |||
31 | #include <asm/compat.h> | 32 | #include <asm/compat.h> |
32 | #include <asm/smp.h> | 33 | #include <asm/smp.h> |
33 | #include <asm/alternative.h> | 34 | #include <asm/alternative.h> |
35 | #include <asm/timer.h> | ||
34 | 36 | ||
35 | #include "perf_event.h" | 37 | #include "perf_event.h" |
36 | 38 | ||
@@ -351,6 +353,36 @@ int x86_setup_perfctr(struct perf_event *event) | |||
351 | return 0; | 353 | return 0; |
352 | } | 354 | } |
353 | 355 | ||
356 | /* | ||
357 | * check that branch_sample_type is compatible with | ||
358 | * settings needed for precise_ip > 1 which implies | ||
359 | * using the LBR to capture ALL taken branches at the | ||
360 | * priv levels of the measurement | ||
361 | */ | ||
362 | static inline int precise_br_compat(struct perf_event *event) | ||
363 | { | ||
364 | u64 m = event->attr.branch_sample_type; | ||
365 | u64 b = 0; | ||
366 | |||
367 | /* must capture all branches */ | ||
368 | if (!(m & PERF_SAMPLE_BRANCH_ANY)) | ||
369 | return 0; | ||
370 | |||
371 | m &= PERF_SAMPLE_BRANCH_KERNEL | PERF_SAMPLE_BRANCH_USER; | ||
372 | |||
373 | if (!event->attr.exclude_user) | ||
374 | b |= PERF_SAMPLE_BRANCH_USER; | ||
375 | |||
376 | if (!event->attr.exclude_kernel) | ||
377 | b |= PERF_SAMPLE_BRANCH_KERNEL; | ||
378 | |||
379 | /* | ||
380 | * ignore PERF_SAMPLE_BRANCH_HV, not supported on x86 | ||
381 | */ | ||
382 | |||
383 | return m == b; | ||
384 | } | ||
385 | |||
354 | int x86_pmu_hw_config(struct perf_event *event) | 386 | int x86_pmu_hw_config(struct perf_event *event) |
355 | { | 387 | { |
356 | if (event->attr.precise_ip) { | 388 | if (event->attr.precise_ip) { |
@@ -367,6 +399,36 @@ int x86_pmu_hw_config(struct perf_event *event) | |||
367 | 399 | ||
368 | if (event->attr.precise_ip > precise) | 400 | if (event->attr.precise_ip > precise) |
369 | return -EOPNOTSUPP; | 401 | return -EOPNOTSUPP; |
402 | /* | ||
403 | * check that PEBS LBR correction does not conflict with | ||
404 | * whatever the user is asking with attr->branch_sample_type | ||
405 | */ | ||
406 | if (event->attr.precise_ip > 1) { | ||
407 | u64 *br_type = &event->attr.branch_sample_type; | ||
408 | |||
409 | if (has_branch_stack(event)) { | ||
410 | if (!precise_br_compat(event)) | ||
411 | return -EOPNOTSUPP; | ||
412 | |||
413 | /* branch_sample_type is compatible */ | ||
414 | |||
415 | } else { | ||
416 | /* | ||
417 | * user did not specify branch_sample_type | ||
418 | * | ||
419 | * For PEBS fixups, we capture all | ||
420 | * the branches at the priv level of the | ||
421 | * event. | ||
422 | */ | ||
423 | *br_type = PERF_SAMPLE_BRANCH_ANY; | ||
424 | |||
425 | if (!event->attr.exclude_user) | ||
426 | *br_type |= PERF_SAMPLE_BRANCH_USER; | ||
427 | |||
428 | if (!event->attr.exclude_kernel) | ||
429 | *br_type |= PERF_SAMPLE_BRANCH_KERNEL; | ||
430 | } | ||
431 | } | ||
370 | } | 432 | } |
371 | 433 | ||
372 | /* | 434 | /* |
@@ -424,6 +486,10 @@ static int __x86_pmu_event_init(struct perf_event *event) | |||
424 | /* mark unused */ | 486 | /* mark unused */ |
425 | event->hw.extra_reg.idx = EXTRA_REG_NONE; | 487 | event->hw.extra_reg.idx = EXTRA_REG_NONE; |
426 | 488 | ||
489 | /* mark not used */ | ||
490 | event->hw.extra_reg.idx = EXTRA_REG_NONE; | ||
491 | event->hw.branch_reg.idx = EXTRA_REG_NONE; | ||
492 | |||
427 | return x86_pmu.hw_config(event); | 493 | return x86_pmu.hw_config(event); |
428 | } | 494 | } |
429 | 495 | ||
@@ -1210,6 +1276,8 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | |||
1210 | break; | 1276 | break; |
1211 | 1277 | ||
1212 | case CPU_STARTING: | 1278 | case CPU_STARTING: |
1279 | if (x86_pmu.attr_rdpmc) | ||
1280 | set_in_cr4(X86_CR4_PCE); | ||
1213 | if (x86_pmu.cpu_starting) | 1281 | if (x86_pmu.cpu_starting) |
1214 | x86_pmu.cpu_starting(cpu); | 1282 | x86_pmu.cpu_starting(cpu); |
1215 | break; | 1283 | break; |
@@ -1319,6 +1387,8 @@ static int __init init_hw_perf_events(void) | |||
1319 | } | 1387 | } |
1320 | } | 1388 | } |
1321 | 1389 | ||
1390 | x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ | ||
1391 | |||
1322 | pr_info("... version: %d\n", x86_pmu.version); | 1392 | pr_info("... version: %d\n", x86_pmu.version); |
1323 | pr_info("... bit width: %d\n", x86_pmu.cntval_bits); | 1393 | pr_info("... bit width: %d\n", x86_pmu.cntval_bits); |
1324 | pr_info("... generic registers: %d\n", x86_pmu.num_counters); | 1394 | pr_info("... generic registers: %d\n", x86_pmu.num_counters); |
@@ -1542,23 +1612,106 @@ static int x86_pmu_event_init(struct perf_event *event) | |||
1542 | return err; | 1612 | return err; |
1543 | } | 1613 | } |
1544 | 1614 | ||
1615 | static int x86_pmu_event_idx(struct perf_event *event) | ||
1616 | { | ||
1617 | int idx = event->hw.idx; | ||
1618 | |||
1619 | if (x86_pmu.num_counters_fixed && idx >= X86_PMC_IDX_FIXED) { | ||
1620 | idx -= X86_PMC_IDX_FIXED; | ||
1621 | idx |= 1 << 30; | ||
1622 | } | ||
1623 | |||
1624 | return idx + 1; | ||
1625 | } | ||
1626 | |||
1627 | static ssize_t get_attr_rdpmc(struct device *cdev, | ||
1628 | struct device_attribute *attr, | ||
1629 | char *buf) | ||
1630 | { | ||
1631 | return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc); | ||
1632 | } | ||
1633 | |||
1634 | static void change_rdpmc(void *info) | ||
1635 | { | ||
1636 | bool enable = !!(unsigned long)info; | ||
1637 | |||
1638 | if (enable) | ||
1639 | set_in_cr4(X86_CR4_PCE); | ||
1640 | else | ||
1641 | clear_in_cr4(X86_CR4_PCE); | ||
1642 | } | ||
1643 | |||
1644 | static ssize_t set_attr_rdpmc(struct device *cdev, | ||
1645 | struct device_attribute *attr, | ||
1646 | const char *buf, size_t count) | ||
1647 | { | ||
1648 | unsigned long val = simple_strtoul(buf, NULL, 0); | ||
1649 | |||
1650 | if (!!val != !!x86_pmu.attr_rdpmc) { | ||
1651 | x86_pmu.attr_rdpmc = !!val; | ||
1652 | smp_call_function(change_rdpmc, (void *)val, 1); | ||
1653 | } | ||
1654 | |||
1655 | return count; | ||
1656 | } | ||
1657 | |||
1658 | static DEVICE_ATTR(rdpmc, S_IRUSR | S_IWUSR, get_attr_rdpmc, set_attr_rdpmc); | ||
1659 | |||
1660 | static struct attribute *x86_pmu_attrs[] = { | ||
1661 | &dev_attr_rdpmc.attr, | ||
1662 | NULL, | ||
1663 | }; | ||
1664 | |||
1665 | static struct attribute_group x86_pmu_attr_group = { | ||
1666 | .attrs = x86_pmu_attrs, | ||
1667 | }; | ||
1668 | |||
1669 | static const struct attribute_group *x86_pmu_attr_groups[] = { | ||
1670 | &x86_pmu_attr_group, | ||
1671 | NULL, | ||
1672 | }; | ||
1673 | |||
1674 | static void x86_pmu_flush_branch_stack(void) | ||
1675 | { | ||
1676 | if (x86_pmu.flush_branch_stack) | ||
1677 | x86_pmu.flush_branch_stack(); | ||
1678 | } | ||
1679 | |||
1545 | static struct pmu pmu = { | 1680 | static struct pmu pmu = { |
1546 | .pmu_enable = x86_pmu_enable, | 1681 | .pmu_enable = x86_pmu_enable, |
1547 | .pmu_disable = x86_pmu_disable, | 1682 | .pmu_disable = x86_pmu_disable, |
1683 | |||
1684 | .attr_groups = x86_pmu_attr_groups, | ||
1548 | 1685 | ||
1549 | .event_init = x86_pmu_event_init, | 1686 | .event_init = x86_pmu_event_init, |
1550 | 1687 | ||
1551 | .add = x86_pmu_add, | 1688 | .add = x86_pmu_add, |
1552 | .del = x86_pmu_del, | 1689 | .del = x86_pmu_del, |
1553 | .start = x86_pmu_start, | 1690 | .start = x86_pmu_start, |
1554 | .stop = x86_pmu_stop, | 1691 | .stop = x86_pmu_stop, |
1555 | .read = x86_pmu_read, | 1692 | .read = x86_pmu_read, |
1556 | 1693 | ||
1557 | .start_txn = x86_pmu_start_txn, | 1694 | .start_txn = x86_pmu_start_txn, |
1558 | .cancel_txn = x86_pmu_cancel_txn, | 1695 | .cancel_txn = x86_pmu_cancel_txn, |
1559 | .commit_txn = x86_pmu_commit_txn, | 1696 | .commit_txn = x86_pmu_commit_txn, |
1697 | |||
1698 | .event_idx = x86_pmu_event_idx, | ||
1699 | .flush_branch_stack = x86_pmu_flush_branch_stack, | ||
1560 | }; | 1700 | }; |
1561 | 1701 | ||
1702 | void perf_update_user_clock(struct perf_event_mmap_page *userpg, u64 now) | ||
1703 | { | ||
1704 | if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) | ||
1705 | return; | ||
1706 | |||
1707 | if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) | ||
1708 | return; | ||
1709 | |||
1710 | userpg->time_mult = this_cpu_read(cyc2ns); | ||
1711 | userpg->time_shift = CYC2NS_SCALE_FACTOR; | ||
1712 | userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; | ||
1713 | } | ||
1714 | |||
1562 | /* | 1715 | /* |
1563 | * callchain support | 1716 | * callchain support |
1564 | */ | 1717 | */ |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index c30c807ddc72..8484e77c211e 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -33,6 +33,7 @@ enum extra_reg_type { | |||
33 | 33 | ||
34 | EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ | 34 | EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ |
35 | EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ | 35 | EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ |
36 | EXTRA_REG_LBR = 2, /* lbr_select */ | ||
36 | 37 | ||
37 | EXTRA_REG_MAX /* number of entries needed */ | 38 | EXTRA_REG_MAX /* number of entries needed */ |
38 | }; | 39 | }; |
@@ -130,6 +131,8 @@ struct cpu_hw_events { | |||
130 | void *lbr_context; | 131 | void *lbr_context; |
131 | struct perf_branch_stack lbr_stack; | 132 | struct perf_branch_stack lbr_stack; |
132 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; | 133 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; |
134 | struct er_account *lbr_sel; | ||
135 | u64 br_sel; | ||
133 | 136 | ||
134 | /* | 137 | /* |
135 | * Intel host/guest exclude bits | 138 | * Intel host/guest exclude bits |
@@ -268,6 +271,29 @@ struct x86_pmu_quirk { | |||
268 | void (*func)(void); | 271 | void (*func)(void); |
269 | }; | 272 | }; |
270 | 273 | ||
274 | union x86_pmu_config { | ||
275 | struct { | ||
276 | u64 event:8, | ||
277 | umask:8, | ||
278 | usr:1, | ||
279 | os:1, | ||
280 | edge:1, | ||
281 | pc:1, | ||
282 | interrupt:1, | ||
283 | __reserved1:1, | ||
284 | en:1, | ||
285 | inv:1, | ||
286 | cmask:8, | ||
287 | event2:4, | ||
288 | __reserved2:4, | ||
289 | go:1, | ||
290 | ho:1; | ||
291 | } bits; | ||
292 | u64 value; | ||
293 | }; | ||
294 | |||
295 | #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value | ||
296 | |||
271 | /* | 297 | /* |
272 | * struct x86_pmu - generic x86 pmu | 298 | * struct x86_pmu - generic x86 pmu |
273 | */ | 299 | */ |
@@ -309,10 +335,19 @@ struct x86_pmu { | |||
309 | struct x86_pmu_quirk *quirks; | 335 | struct x86_pmu_quirk *quirks; |
310 | int perfctr_second_write; | 336 | int perfctr_second_write; |
311 | 337 | ||
338 | /* | ||
339 | * sysfs attrs | ||
340 | */ | ||
341 | int attr_rdpmc; | ||
342 | |||
343 | /* | ||
344 | * CPU Hotplug hooks | ||
345 | */ | ||
312 | int (*cpu_prepare)(int cpu); | 346 | int (*cpu_prepare)(int cpu); |
313 | void (*cpu_starting)(int cpu); | 347 | void (*cpu_starting)(int cpu); |
314 | void (*cpu_dying)(int cpu); | 348 | void (*cpu_dying)(int cpu); |
315 | void (*cpu_dead)(int cpu); | 349 | void (*cpu_dead)(int cpu); |
350 | void (*flush_branch_stack)(void); | ||
316 | 351 | ||
317 | /* | 352 | /* |
318 | * Intel Arch Perfmon v2+ | 353 | * Intel Arch Perfmon v2+ |
@@ -334,6 +369,8 @@ struct x86_pmu { | |||
334 | */ | 369 | */ |
335 | unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ | 370 | unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ |
336 | int lbr_nr; /* hardware stack size */ | 371 | int lbr_nr; /* hardware stack size */ |
372 | u64 lbr_sel_mask; /* LBR_SELECT valid bits */ | ||
373 | const int *lbr_sel_map; /* lbr_select mappings */ | ||
337 | 374 | ||
338 | /* | 375 | /* |
339 | * Extra registers for events | 376 | * Extra registers for events |
@@ -447,6 +484,15 @@ extern struct event_constraint emptyconstraint; | |||
447 | 484 | ||
448 | extern struct event_constraint unconstrained; | 485 | extern struct event_constraint unconstrained; |
449 | 486 | ||
487 | static inline bool kernel_ip(unsigned long ip) | ||
488 | { | ||
489 | #ifdef CONFIG_X86_32 | ||
490 | return ip > PAGE_OFFSET; | ||
491 | #else | ||
492 | return (long)ip < 0; | ||
493 | #endif | ||
494 | } | ||
495 | |||
450 | #ifdef CONFIG_CPU_SUP_AMD | 496 | #ifdef CONFIG_CPU_SUP_AMD |
451 | 497 | ||
452 | int amd_pmu_init(void); | 498 | int amd_pmu_init(void); |
@@ -527,6 +573,10 @@ void intel_pmu_lbr_init_nhm(void); | |||
527 | 573 | ||
528 | void intel_pmu_lbr_init_atom(void); | 574 | void intel_pmu_lbr_init_atom(void); |
529 | 575 | ||
576 | void intel_pmu_lbr_init_snb(void); | ||
577 | |||
578 | int intel_pmu_setup_lbr_filter(struct perf_event *event); | ||
579 | |||
530 | int p4_pmu_init(void); | 580 | int p4_pmu_init(void); |
531 | 581 | ||
532 | int p6_pmu_init(void); | 582 | int p6_pmu_init(void); |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 67250a52430b..dd002faff7a6 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -139,6 +139,9 @@ static int amd_pmu_hw_config(struct perf_event *event) | |||
139 | if (ret) | 139 | if (ret) |
140 | return ret; | 140 | return ret; |
141 | 141 | ||
142 | if (has_branch_stack(event)) | ||
143 | return -EOPNOTSUPP; | ||
144 | |||
142 | if (event->attr.exclude_host && event->attr.exclude_guest) | 145 | if (event->attr.exclude_host && event->attr.exclude_guest) |
143 | /* | 146 | /* |
144 | * When HO == GO == 1 the hardware treats that as GO == HO == 0 | 147 | * When HO == GO == 1 the hardware treats that as GO == HO == 0 |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 61d4f79a550e..6a84e7f28f05 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -728,6 +728,19 @@ static __initconst const u64 atom_hw_cache_event_ids | |||
728 | }, | 728 | }, |
729 | }; | 729 | }; |
730 | 730 | ||
731 | static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event) | ||
732 | { | ||
733 | /* user explicitly requested branch sampling */ | ||
734 | if (has_branch_stack(event)) | ||
735 | return true; | ||
736 | |||
737 | /* implicit branch sampling to correct PEBS skid */ | ||
738 | if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1) | ||
739 | return true; | ||
740 | |||
741 | return false; | ||
742 | } | ||
743 | |||
731 | static void intel_pmu_disable_all(void) | 744 | static void intel_pmu_disable_all(void) |
732 | { | 745 | { |
733 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 746 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
@@ -882,6 +895,13 @@ static void intel_pmu_disable_event(struct perf_event *event) | |||
882 | cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx); | 895 | cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx); |
883 | cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx); | 896 | cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx); |
884 | 897 | ||
898 | /* | ||
899 | * must disable before any actual event | ||
900 | * because any event may be combined with LBR | ||
901 | */ | ||
902 | if (intel_pmu_needs_lbr_smpl(event)) | ||
903 | intel_pmu_lbr_disable(event); | ||
904 | |||
885 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 905 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
886 | intel_pmu_disable_fixed(hwc); | 906 | intel_pmu_disable_fixed(hwc); |
887 | return; | 907 | return; |
@@ -936,6 +956,12 @@ static void intel_pmu_enable_event(struct perf_event *event) | |||
936 | intel_pmu_enable_bts(hwc->config); | 956 | intel_pmu_enable_bts(hwc->config); |
937 | return; | 957 | return; |
938 | } | 958 | } |
959 | /* | ||
960 | * must enabled before any actual event | ||
961 | * because any event may be combined with LBR | ||
962 | */ | ||
963 | if (intel_pmu_needs_lbr_smpl(event)) | ||
964 | intel_pmu_lbr_enable(event); | ||
939 | 965 | ||
940 | if (event->attr.exclude_host) | 966 | if (event->attr.exclude_host) |
941 | cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx); | 967 | cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx); |
@@ -1058,6 +1084,9 @@ again: | |||
1058 | 1084 | ||
1059 | data.period = event->hw.last_period; | 1085 | data.period = event->hw.last_period; |
1060 | 1086 | ||
1087 | if (has_branch_stack(event)) | ||
1088 | data.br_stack = &cpuc->lbr_stack; | ||
1089 | |||
1061 | if (perf_event_overflow(event, &data, regs)) | 1090 | if (perf_event_overflow(event, &data, regs)) |
1062 | x86_pmu_stop(event, 0); | 1091 | x86_pmu_stop(event, 0); |
1063 | } | 1092 | } |
@@ -1124,17 +1153,17 @@ static bool intel_try_alt_er(struct perf_event *event, int orig_idx) | |||
1124 | */ | 1153 | */ |
1125 | static struct event_constraint * | 1154 | static struct event_constraint * |
1126 | __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, | 1155 | __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, |
1127 | struct perf_event *event) | 1156 | struct perf_event *event, |
1157 | struct hw_perf_event_extra *reg) | ||
1128 | { | 1158 | { |
1129 | struct event_constraint *c = &emptyconstraint; | 1159 | struct event_constraint *c = &emptyconstraint; |
1130 | struct hw_perf_event_extra *reg = &event->hw.extra_reg; | ||
1131 | struct er_account *era; | 1160 | struct er_account *era; |
1132 | unsigned long flags; | 1161 | unsigned long flags; |
1133 | int orig_idx = reg->idx; | 1162 | int orig_idx = reg->idx; |
1134 | 1163 | ||
1135 | /* already allocated shared msr */ | 1164 | /* already allocated shared msr */ |
1136 | if (reg->alloc) | 1165 | if (reg->alloc) |
1137 | return &unconstrained; | 1166 | return NULL; /* call x86_get_event_constraint() */ |
1138 | 1167 | ||
1139 | again: | 1168 | again: |
1140 | era = &cpuc->shared_regs->regs[reg->idx]; | 1169 | era = &cpuc->shared_regs->regs[reg->idx]; |
@@ -1157,14 +1186,10 @@ again: | |||
1157 | reg->alloc = 1; | 1186 | reg->alloc = 1; |
1158 | 1187 | ||
1159 | /* | 1188 | /* |
1160 | * All events using extra_reg are unconstrained. | 1189 | * need to call x86_get_event_constraint() |
1161 | * Avoids calling x86_get_event_constraints() | 1190 | * to check if associated event has constraints |
1162 | * | ||
1163 | * Must revisit if extra_reg controlling events | ||
1164 | * ever have constraints. Worst case we go through | ||
1165 | * the regular event constraint table. | ||
1166 | */ | 1191 | */ |
1167 | c = &unconstrained; | 1192 | c = NULL; |
1168 | } else if (intel_try_alt_er(event, orig_idx)) { | 1193 | } else if (intel_try_alt_er(event, orig_idx)) { |
1169 | raw_spin_unlock_irqrestore(&era->lock, flags); | 1194 | raw_spin_unlock_irqrestore(&era->lock, flags); |
1170 | goto again; | 1195 | goto again; |
@@ -1201,11 +1226,23 @@ static struct event_constraint * | |||
1201 | intel_shared_regs_constraints(struct cpu_hw_events *cpuc, | 1226 | intel_shared_regs_constraints(struct cpu_hw_events *cpuc, |
1202 | struct perf_event *event) | 1227 | struct perf_event *event) |
1203 | { | 1228 | { |
1204 | struct event_constraint *c = NULL; | 1229 | struct event_constraint *c = NULL, *d; |
1205 | 1230 | struct hw_perf_event_extra *xreg, *breg; | |
1206 | if (event->hw.extra_reg.idx != EXTRA_REG_NONE) | 1231 | |
1207 | c = __intel_shared_reg_get_constraints(cpuc, event); | 1232 | xreg = &event->hw.extra_reg; |
1208 | 1233 | if (xreg->idx != EXTRA_REG_NONE) { | |
1234 | c = __intel_shared_reg_get_constraints(cpuc, event, xreg); | ||
1235 | if (c == &emptyconstraint) | ||
1236 | return c; | ||
1237 | } | ||
1238 | breg = &event->hw.branch_reg; | ||
1239 | if (breg->idx != EXTRA_REG_NONE) { | ||
1240 | d = __intel_shared_reg_get_constraints(cpuc, event, breg); | ||
1241 | if (d == &emptyconstraint) { | ||
1242 | __intel_shared_reg_put_constraints(cpuc, xreg); | ||
1243 | c = d; | ||
1244 | } | ||
1245 | } | ||
1209 | return c; | 1246 | return c; |
1210 | } | 1247 | } |
1211 | 1248 | ||
@@ -1253,6 +1290,10 @@ intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc, | |||
1253 | reg = &event->hw.extra_reg; | 1290 | reg = &event->hw.extra_reg; |
1254 | if (reg->idx != EXTRA_REG_NONE) | 1291 | if (reg->idx != EXTRA_REG_NONE) |
1255 | __intel_shared_reg_put_constraints(cpuc, reg); | 1292 | __intel_shared_reg_put_constraints(cpuc, reg); |
1293 | |||
1294 | reg = &event->hw.branch_reg; | ||
1295 | if (reg->idx != EXTRA_REG_NONE) | ||
1296 | __intel_shared_reg_put_constraints(cpuc, reg); | ||
1256 | } | 1297 | } |
1257 | 1298 | ||
1258 | static void intel_put_event_constraints(struct cpu_hw_events *cpuc, | 1299 | static void intel_put_event_constraints(struct cpu_hw_events *cpuc, |
@@ -1288,12 +1329,19 @@ static int intel_pmu_hw_config(struct perf_event *event) | |||
1288 | * | 1329 | * |
1289 | * Thereby we gain a PEBS capable cycle counter. | 1330 | * Thereby we gain a PEBS capable cycle counter. |
1290 | */ | 1331 | */ |
1291 | u64 alt_config = 0x108000c0; /* INST_RETIRED.TOTAL_CYCLES */ | 1332 | u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16); |
1333 | |||
1292 | 1334 | ||
1293 | alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); | 1335 | alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); |
1294 | event->hw.config = alt_config; | 1336 | event->hw.config = alt_config; |
1295 | } | 1337 | } |
1296 | 1338 | ||
1339 | if (intel_pmu_needs_lbr_smpl(event)) { | ||
1340 | ret = intel_pmu_setup_lbr_filter(event); | ||
1341 | if (ret) | ||
1342 | return ret; | ||
1343 | } | ||
1344 | |||
1297 | if (event->attr.type != PERF_TYPE_RAW) | 1345 | if (event->attr.type != PERF_TYPE_RAW) |
1298 | return 0; | 1346 | return 0; |
1299 | 1347 | ||
@@ -1432,7 +1480,7 @@ static int intel_pmu_cpu_prepare(int cpu) | |||
1432 | { | 1480 | { |
1433 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | 1481 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); |
1434 | 1482 | ||
1435 | if (!x86_pmu.extra_regs) | 1483 | if (!(x86_pmu.extra_regs || x86_pmu.lbr_sel_map)) |
1436 | return NOTIFY_OK; | 1484 | return NOTIFY_OK; |
1437 | 1485 | ||
1438 | cpuc->shared_regs = allocate_shared_regs(cpu); | 1486 | cpuc->shared_regs = allocate_shared_regs(cpu); |
@@ -1454,22 +1502,28 @@ static void intel_pmu_cpu_starting(int cpu) | |||
1454 | */ | 1502 | */ |
1455 | intel_pmu_lbr_reset(); | 1503 | intel_pmu_lbr_reset(); |
1456 | 1504 | ||
1457 | if (!cpuc->shared_regs || (x86_pmu.er_flags & ERF_NO_HT_SHARING)) | 1505 | cpuc->lbr_sel = NULL; |
1506 | |||
1507 | if (!cpuc->shared_regs) | ||
1458 | return; | 1508 | return; |
1459 | 1509 | ||
1460 | for_each_cpu(i, topology_thread_cpumask(cpu)) { | 1510 | if (!(x86_pmu.er_flags & ERF_NO_HT_SHARING)) { |
1461 | struct intel_shared_regs *pc; | 1511 | for_each_cpu(i, topology_thread_cpumask(cpu)) { |
1512 | struct intel_shared_regs *pc; | ||
1462 | 1513 | ||
1463 | pc = per_cpu(cpu_hw_events, i).shared_regs; | 1514 | pc = per_cpu(cpu_hw_events, i).shared_regs; |
1464 | if (pc && pc->core_id == core_id) { | 1515 | if (pc && pc->core_id == core_id) { |
1465 | cpuc->kfree_on_online = cpuc->shared_regs; | 1516 | cpuc->kfree_on_online = cpuc->shared_regs; |
1466 | cpuc->shared_regs = pc; | 1517 | cpuc->shared_regs = pc; |
1467 | break; | 1518 | break; |
1519 | } | ||
1468 | } | 1520 | } |
1521 | cpuc->shared_regs->core_id = core_id; | ||
1522 | cpuc->shared_regs->refcnt++; | ||
1469 | } | 1523 | } |
1470 | 1524 | ||
1471 | cpuc->shared_regs->core_id = core_id; | 1525 | if (x86_pmu.lbr_sel_map) |
1472 | cpuc->shared_regs->refcnt++; | 1526 | cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR]; |
1473 | } | 1527 | } |
1474 | 1528 | ||
1475 | static void intel_pmu_cpu_dying(int cpu) | 1529 | static void intel_pmu_cpu_dying(int cpu) |
@@ -1487,6 +1541,18 @@ static void intel_pmu_cpu_dying(int cpu) | |||
1487 | fini_debug_store_on_cpu(cpu); | 1541 | fini_debug_store_on_cpu(cpu); |
1488 | } | 1542 | } |
1489 | 1543 | ||
1544 | static void intel_pmu_flush_branch_stack(void) | ||
1545 | { | ||
1546 | /* | ||
1547 | * Intel LBR does not tag entries with the | ||
1548 | * PID of the current task, then we need to | ||
1549 | * flush it on ctxsw | ||
1550 | * For now, we simply reset it | ||
1551 | */ | ||
1552 | if (x86_pmu.lbr_nr) | ||
1553 | intel_pmu_lbr_reset(); | ||
1554 | } | ||
1555 | |||
1490 | static __initconst const struct x86_pmu intel_pmu = { | 1556 | static __initconst const struct x86_pmu intel_pmu = { |
1491 | .name = "Intel", | 1557 | .name = "Intel", |
1492 | .handle_irq = intel_pmu_handle_irq, | 1558 | .handle_irq = intel_pmu_handle_irq, |
@@ -1514,6 +1580,7 @@ static __initconst const struct x86_pmu intel_pmu = { | |||
1514 | .cpu_starting = intel_pmu_cpu_starting, | 1580 | .cpu_starting = intel_pmu_cpu_starting, |
1515 | .cpu_dying = intel_pmu_cpu_dying, | 1581 | .cpu_dying = intel_pmu_cpu_dying, |
1516 | .guest_get_msrs = intel_guest_get_msrs, | 1582 | .guest_get_msrs = intel_guest_get_msrs, |
1583 | .flush_branch_stack = intel_pmu_flush_branch_stack, | ||
1517 | }; | 1584 | }; |
1518 | 1585 | ||
1519 | static __init void intel_clovertown_quirk(void) | 1586 | static __init void intel_clovertown_quirk(void) |
@@ -1690,9 +1757,11 @@ __init int intel_pmu_init(void) | |||
1690 | x86_pmu.extra_regs = intel_nehalem_extra_regs; | 1757 | x86_pmu.extra_regs = intel_nehalem_extra_regs; |
1691 | 1758 | ||
1692 | /* UOPS_ISSUED.STALLED_CYCLES */ | 1759 | /* UOPS_ISSUED.STALLED_CYCLES */ |
1693 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; | 1760 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = |
1761 | X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); | ||
1694 | /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ | 1762 | /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ |
1695 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; | 1763 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = |
1764 | X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); | ||
1696 | 1765 | ||
1697 | x86_add_quirk(intel_nehalem_quirk); | 1766 | x86_add_quirk(intel_nehalem_quirk); |
1698 | 1767 | ||
@@ -1727,9 +1796,11 @@ __init int intel_pmu_init(void) | |||
1727 | x86_pmu.er_flags |= ERF_HAS_RSP_1; | 1796 | x86_pmu.er_flags |= ERF_HAS_RSP_1; |
1728 | 1797 | ||
1729 | /* UOPS_ISSUED.STALLED_CYCLES */ | 1798 | /* UOPS_ISSUED.STALLED_CYCLES */ |
1730 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; | 1799 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = |
1800 | X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); | ||
1731 | /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ | 1801 | /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ |
1732 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; | 1802 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = |
1803 | X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); | ||
1733 | 1804 | ||
1734 | pr_cont("Westmere events, "); | 1805 | pr_cont("Westmere events, "); |
1735 | break; | 1806 | break; |
@@ -1740,7 +1811,7 @@ __init int intel_pmu_init(void) | |||
1740 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, | 1811 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, |
1741 | sizeof(hw_cache_event_ids)); | 1812 | sizeof(hw_cache_event_ids)); |
1742 | 1813 | ||
1743 | intel_pmu_lbr_init_nhm(); | 1814 | intel_pmu_lbr_init_snb(); |
1744 | 1815 | ||
1745 | x86_pmu.event_constraints = intel_snb_event_constraints; | 1816 | x86_pmu.event_constraints = intel_snb_event_constraints; |
1746 | x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; | 1817 | x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; |
@@ -1750,9 +1821,11 @@ __init int intel_pmu_init(void) | |||
1750 | x86_pmu.er_flags |= ERF_NO_HT_SHARING; | 1821 | x86_pmu.er_flags |= ERF_NO_HT_SHARING; |
1751 | 1822 | ||
1752 | /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ | 1823 | /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ |
1753 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; | 1824 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = |
1825 | X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); | ||
1754 | /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/ | 1826 | /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/ |
1755 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x18001b1; | 1827 | intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = |
1828 | X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1); | ||
1756 | 1829 | ||
1757 | pr_cont("SandyBridge events, "); | 1830 | pr_cont("SandyBridge events, "); |
1758 | break; | 1831 | break; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index d6bd49faa40c..7f64df19e7dd 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <linux/slab.h> | 3 | #include <linux/slab.h> |
4 | 4 | ||
5 | #include <asm/perf_event.h> | 5 | #include <asm/perf_event.h> |
6 | #include <asm/insn.h> | ||
6 | 7 | ||
7 | #include "perf_event.h" | 8 | #include "perf_event.h" |
8 | 9 | ||
@@ -439,9 +440,6 @@ void intel_pmu_pebs_enable(struct perf_event *event) | |||
439 | hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; | 440 | hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; |
440 | 441 | ||
441 | cpuc->pebs_enabled |= 1ULL << hwc->idx; | 442 | cpuc->pebs_enabled |= 1ULL << hwc->idx; |
442 | |||
443 | if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1) | ||
444 | intel_pmu_lbr_enable(event); | ||
445 | } | 443 | } |
446 | 444 | ||
447 | void intel_pmu_pebs_disable(struct perf_event *event) | 445 | void intel_pmu_pebs_disable(struct perf_event *event) |
@@ -454,9 +452,6 @@ void intel_pmu_pebs_disable(struct perf_event *event) | |||
454 | wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); | 452 | wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); |
455 | 453 | ||
456 | hwc->config |= ARCH_PERFMON_EVENTSEL_INT; | 454 | hwc->config |= ARCH_PERFMON_EVENTSEL_INT; |
457 | |||
458 | if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1) | ||
459 | intel_pmu_lbr_disable(event); | ||
460 | } | 455 | } |
461 | 456 | ||
462 | void intel_pmu_pebs_enable_all(void) | 457 | void intel_pmu_pebs_enable_all(void) |
@@ -475,17 +470,6 @@ void intel_pmu_pebs_disable_all(void) | |||
475 | wrmsrl(MSR_IA32_PEBS_ENABLE, 0); | 470 | wrmsrl(MSR_IA32_PEBS_ENABLE, 0); |
476 | } | 471 | } |
477 | 472 | ||
478 | #include <asm/insn.h> | ||
479 | |||
480 | static inline bool kernel_ip(unsigned long ip) | ||
481 | { | ||
482 | #ifdef CONFIG_X86_32 | ||
483 | return ip > PAGE_OFFSET; | ||
484 | #else | ||
485 | return (long)ip < 0; | ||
486 | #endif | ||
487 | } | ||
488 | |||
489 | static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) | 473 | static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) |
490 | { | 474 | { |
491 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 475 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
@@ -572,6 +556,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event, | |||
572 | * both formats and we don't use the other fields in this | 556 | * both formats and we don't use the other fields in this |
573 | * routine. | 557 | * routine. |
574 | */ | 558 | */ |
559 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
575 | struct pebs_record_core *pebs = __pebs; | 560 | struct pebs_record_core *pebs = __pebs; |
576 | struct perf_sample_data data; | 561 | struct perf_sample_data data; |
577 | struct pt_regs regs; | 562 | struct pt_regs regs; |
@@ -602,6 +587,9 @@ static void __intel_pmu_pebs_event(struct perf_event *event, | |||
602 | else | 587 | else |
603 | regs.flags &= ~PERF_EFLAGS_EXACT; | 588 | regs.flags &= ~PERF_EFLAGS_EXACT; |
604 | 589 | ||
590 | if (has_branch_stack(event)) | ||
591 | data.br_stack = &cpuc->lbr_stack; | ||
592 | |||
605 | if (perf_event_overflow(event, &data, ®s)) | 593 | if (perf_event_overflow(event, &data, ®s)) |
606 | x86_pmu_stop(event, 0); | 594 | x86_pmu_stop(event, 0); |
607 | } | 595 | } |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index 47a7e63bfe54..520b4265fcd2 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <asm/perf_event.h> | 4 | #include <asm/perf_event.h> |
5 | #include <asm/msr.h> | 5 | #include <asm/msr.h> |
6 | #include <asm/insn.h> | ||
6 | 7 | ||
7 | #include "perf_event.h" | 8 | #include "perf_event.h" |
8 | 9 | ||
@@ -14,6 +15,100 @@ enum { | |||
14 | }; | 15 | }; |
15 | 16 | ||
16 | /* | 17 | /* |
18 | * Intel LBR_SELECT bits | ||
19 | * Intel Vol3a, April 2011, Section 16.7 Table 16-10 | ||
20 | * | ||
21 | * Hardware branch filter (not available on all CPUs) | ||
22 | */ | ||
23 | #define LBR_KERNEL_BIT 0 /* do not capture at ring0 */ | ||
24 | #define LBR_USER_BIT 1 /* do not capture at ring > 0 */ | ||
25 | #define LBR_JCC_BIT 2 /* do not capture conditional branches */ | ||
26 | #define LBR_REL_CALL_BIT 3 /* do not capture relative calls */ | ||
27 | #define LBR_IND_CALL_BIT 4 /* do not capture indirect calls */ | ||
28 | #define LBR_RETURN_BIT 5 /* do not capture near returns */ | ||
29 | #define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */ | ||
30 | #define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */ | ||
31 | #define LBR_FAR_BIT 8 /* do not capture far branches */ | ||
32 | |||
33 | #define LBR_KERNEL (1 << LBR_KERNEL_BIT) | ||
34 | #define LBR_USER (1 << LBR_USER_BIT) | ||
35 | #define LBR_JCC (1 << LBR_JCC_BIT) | ||
36 | #define LBR_REL_CALL (1 << LBR_REL_CALL_BIT) | ||
37 | #define LBR_IND_CALL (1 << LBR_IND_CALL_BIT) | ||
38 | #define LBR_RETURN (1 << LBR_RETURN_BIT) | ||
39 | #define LBR_REL_JMP (1 << LBR_REL_JMP_BIT) | ||
40 | #define LBR_IND_JMP (1 << LBR_IND_JMP_BIT) | ||
41 | #define LBR_FAR (1 << LBR_FAR_BIT) | ||
42 | |||
43 | #define LBR_PLM (LBR_KERNEL | LBR_USER) | ||
44 | |||
45 | #define LBR_SEL_MASK 0x1ff /* valid bits in LBR_SELECT */ | ||
46 | #define LBR_NOT_SUPP -1 /* LBR filter not supported */ | ||
47 | #define LBR_IGN 0 /* ignored */ | ||
48 | |||
49 | #define LBR_ANY \ | ||
50 | (LBR_JCC |\ | ||
51 | LBR_REL_CALL |\ | ||
52 | LBR_IND_CALL |\ | ||
53 | LBR_RETURN |\ | ||
54 | LBR_REL_JMP |\ | ||
55 | LBR_IND_JMP |\ | ||
56 | LBR_FAR) | ||
57 | |||
58 | #define LBR_FROM_FLAG_MISPRED (1ULL << 63) | ||
59 | |||
60 | #define for_each_branch_sample_type(x) \ | ||
61 | for ((x) = PERF_SAMPLE_BRANCH_USER; \ | ||
62 | (x) < PERF_SAMPLE_BRANCH_MAX; (x) <<= 1) | ||
63 | |||
64 | /* | ||
65 | * x86control flow change classification | ||
66 | * x86control flow changes include branches, interrupts, traps, faults | ||
67 | */ | ||
68 | enum { | ||
69 | X86_BR_NONE = 0, /* unknown */ | ||
70 | |||
71 | X86_BR_USER = 1 << 0, /* branch target is user */ | ||
72 | X86_BR_KERNEL = 1 << 1, /* branch target is kernel */ | ||
73 | |||
74 | X86_BR_CALL = 1 << 2, /* call */ | ||
75 | X86_BR_RET = 1 << 3, /* return */ | ||
76 | X86_BR_SYSCALL = 1 << 4, /* syscall */ | ||
77 | X86_BR_SYSRET = 1 << 5, /* syscall return */ | ||
78 | X86_BR_INT = 1 << 6, /* sw interrupt */ | ||
79 | X86_BR_IRET = 1 << 7, /* return from interrupt */ | ||
80 | X86_BR_JCC = 1 << 8, /* conditional */ | ||
81 | X86_BR_JMP = 1 << 9, /* jump */ | ||
82 | X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */ | ||
83 | X86_BR_IND_CALL = 1 << 11,/* indirect calls */ | ||
84 | }; | ||
85 | |||
86 | #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL) | ||
87 | |||
88 | #define X86_BR_ANY \ | ||
89 | (X86_BR_CALL |\ | ||
90 | X86_BR_RET |\ | ||
91 | X86_BR_SYSCALL |\ | ||
92 | X86_BR_SYSRET |\ | ||
93 | X86_BR_INT |\ | ||
94 | X86_BR_IRET |\ | ||
95 | X86_BR_JCC |\ | ||
96 | X86_BR_JMP |\ | ||
97 | X86_BR_IRQ |\ | ||
98 | X86_BR_IND_CALL) | ||
99 | |||
100 | #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY) | ||
101 | |||
102 | #define X86_BR_ANY_CALL \ | ||
103 | (X86_BR_CALL |\ | ||
104 | X86_BR_IND_CALL |\ | ||
105 | X86_BR_SYSCALL |\ | ||
106 | X86_BR_IRQ |\ | ||
107 | X86_BR_INT) | ||
108 | |||
109 | static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc); | ||
110 | |||
111 | /* | ||
17 | * We only support LBR implementations that have FREEZE_LBRS_ON_PMI | 112 | * We only support LBR implementations that have FREEZE_LBRS_ON_PMI |
18 | * otherwise it becomes near impossible to get a reliable stack. | 113 | * otherwise it becomes near impossible to get a reliable stack. |
19 | */ | 114 | */ |
@@ -21,6 +116,10 @@ enum { | |||
21 | static void __intel_pmu_lbr_enable(void) | 116 | static void __intel_pmu_lbr_enable(void) |
22 | { | 117 | { |
23 | u64 debugctl; | 118 | u64 debugctl; |
119 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
120 | |||
121 | if (cpuc->lbr_sel) | ||
122 | wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config); | ||
24 | 123 | ||
25 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); | 124 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); |
26 | debugctl |= (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); | 125 | debugctl |= (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); |
@@ -76,11 +175,11 @@ void intel_pmu_lbr_enable(struct perf_event *event) | |||
76 | * Reset the LBR stack if we changed task context to | 175 | * Reset the LBR stack if we changed task context to |
77 | * avoid data leaks. | 176 | * avoid data leaks. |
78 | */ | 177 | */ |
79 | |||
80 | if (event->ctx->task && cpuc->lbr_context != event->ctx) { | 178 | if (event->ctx->task && cpuc->lbr_context != event->ctx) { |
81 | intel_pmu_lbr_reset(); | 179 | intel_pmu_lbr_reset(); |
82 | cpuc->lbr_context = event->ctx; | 180 | cpuc->lbr_context = event->ctx; |
83 | } | 181 | } |
182 | cpuc->br_sel = event->hw.branch_reg.reg; | ||
84 | 183 | ||
85 | cpuc->lbr_users++; | 184 | cpuc->lbr_users++; |
86 | } | 185 | } |
@@ -95,8 +194,11 @@ void intel_pmu_lbr_disable(struct perf_event *event) | |||
95 | cpuc->lbr_users--; | 194 | cpuc->lbr_users--; |
96 | WARN_ON_ONCE(cpuc->lbr_users < 0); | 195 | WARN_ON_ONCE(cpuc->lbr_users < 0); |
97 | 196 | ||
98 | if (cpuc->enabled && !cpuc->lbr_users) | 197 | if (cpuc->enabled && !cpuc->lbr_users) { |
99 | __intel_pmu_lbr_disable(); | 198 | __intel_pmu_lbr_disable(); |
199 | /* avoid stale pointer */ | ||
200 | cpuc->lbr_context = NULL; | ||
201 | } | ||
100 | } | 202 | } |
101 | 203 | ||
102 | void intel_pmu_lbr_enable_all(void) | 204 | void intel_pmu_lbr_enable_all(void) |
@@ -115,6 +217,9 @@ void intel_pmu_lbr_disable_all(void) | |||
115 | __intel_pmu_lbr_disable(); | 217 | __intel_pmu_lbr_disable(); |
116 | } | 218 | } |
117 | 219 | ||
220 | /* | ||
221 | * TOS = most recently recorded branch | ||
222 | */ | ||
118 | static inline u64 intel_pmu_lbr_tos(void) | 223 | static inline u64 intel_pmu_lbr_tos(void) |
119 | { | 224 | { |
120 | u64 tos; | 225 | u64 tos; |
@@ -142,15 +247,15 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc) | |||
142 | 247 | ||
143 | rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr); | 248 | rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr); |
144 | 249 | ||
145 | cpuc->lbr_entries[i].from = msr_lastbranch.from; | 250 | cpuc->lbr_entries[i].from = msr_lastbranch.from; |
146 | cpuc->lbr_entries[i].to = msr_lastbranch.to; | 251 | cpuc->lbr_entries[i].to = msr_lastbranch.to; |
147 | cpuc->lbr_entries[i].flags = 0; | 252 | cpuc->lbr_entries[i].mispred = 0; |
253 | cpuc->lbr_entries[i].predicted = 0; | ||
254 | cpuc->lbr_entries[i].reserved = 0; | ||
148 | } | 255 | } |
149 | cpuc->lbr_stack.nr = i; | 256 | cpuc->lbr_stack.nr = i; |
150 | } | 257 | } |
151 | 258 | ||
152 | #define LBR_FROM_FLAG_MISPRED (1ULL << 63) | ||
153 | |||
154 | /* | 259 | /* |
155 | * Due to lack of segmentation in Linux the effective address (offset) | 260 | * Due to lack of segmentation in Linux the effective address (offset) |
156 | * is the same as the linear address, allowing us to merge the LIP and EIP | 261 | * is the same as the linear address, allowing us to merge the LIP and EIP |
@@ -165,19 +270,22 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) | |||
165 | 270 | ||
166 | for (i = 0; i < x86_pmu.lbr_nr; i++) { | 271 | for (i = 0; i < x86_pmu.lbr_nr; i++) { |
167 | unsigned long lbr_idx = (tos - i) & mask; | 272 | unsigned long lbr_idx = (tos - i) & mask; |
168 | u64 from, to, flags = 0; | 273 | u64 from, to, mis = 0, pred = 0; |
169 | 274 | ||
170 | rdmsrl(x86_pmu.lbr_from + lbr_idx, from); | 275 | rdmsrl(x86_pmu.lbr_from + lbr_idx, from); |
171 | rdmsrl(x86_pmu.lbr_to + lbr_idx, to); | 276 | rdmsrl(x86_pmu.lbr_to + lbr_idx, to); |
172 | 277 | ||
173 | if (lbr_format == LBR_FORMAT_EIP_FLAGS) { | 278 | if (lbr_format == LBR_FORMAT_EIP_FLAGS) { |
174 | flags = !!(from & LBR_FROM_FLAG_MISPRED); | 279 | mis = !!(from & LBR_FROM_FLAG_MISPRED); |
280 | pred = !mis; | ||
175 | from = (u64)((((s64)from) << 1) >> 1); | 281 | from = (u64)((((s64)from) << 1) >> 1); |
176 | } | 282 | } |
177 | 283 | ||
178 | cpuc->lbr_entries[i].from = from; | 284 | cpuc->lbr_entries[i].from = from; |
179 | cpuc->lbr_entries[i].to = to; | 285 | cpuc->lbr_entries[i].to = to; |
180 | cpuc->lbr_entries[i].flags = flags; | 286 | cpuc->lbr_entries[i].mispred = mis; |
287 | cpuc->lbr_entries[i].predicted = pred; | ||
288 | cpuc->lbr_entries[i].reserved = 0; | ||
181 | } | 289 | } |
182 | cpuc->lbr_stack.nr = i; | 290 | cpuc->lbr_stack.nr = i; |
183 | } | 291 | } |
@@ -193,28 +301,404 @@ void intel_pmu_lbr_read(void) | |||
193 | intel_pmu_lbr_read_32(cpuc); | 301 | intel_pmu_lbr_read_32(cpuc); |
194 | else | 302 | else |
195 | intel_pmu_lbr_read_64(cpuc); | 303 | intel_pmu_lbr_read_64(cpuc); |
304 | |||
305 | intel_pmu_lbr_filter(cpuc); | ||
306 | } | ||
307 | |||
308 | /* | ||
309 | * SW filter is used: | ||
310 | * - in case there is no HW filter | ||
311 | * - in case the HW filter has errata or limitations | ||
312 | */ | ||
313 | static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event) | ||
314 | { | ||
315 | u64 br_type = event->attr.branch_sample_type; | ||
316 | int mask = 0; | ||
317 | |||
318 | if (br_type & PERF_SAMPLE_BRANCH_USER) | ||
319 | mask |= X86_BR_USER; | ||
320 | |||
321 | if (br_type & PERF_SAMPLE_BRANCH_KERNEL) | ||
322 | mask |= X86_BR_KERNEL; | ||
323 | |||
324 | /* we ignore BRANCH_HV here */ | ||
325 | |||
326 | if (br_type & PERF_SAMPLE_BRANCH_ANY) | ||
327 | mask |= X86_BR_ANY; | ||
328 | |||
329 | if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL) | ||
330 | mask |= X86_BR_ANY_CALL; | ||
331 | |||
332 | if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN) | ||
333 | mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET; | ||
334 | |||
335 | if (br_type & PERF_SAMPLE_BRANCH_IND_CALL) | ||
336 | mask |= X86_BR_IND_CALL; | ||
337 | /* | ||
338 | * stash actual user request into reg, it may | ||
339 | * be used by fixup code for some CPU | ||
340 | */ | ||
341 | event->hw.branch_reg.reg = mask; | ||
342 | } | ||
343 | |||
344 | /* | ||
345 | * setup the HW LBR filter | ||
346 | * Used only when available, may not be enough to disambiguate | ||
347 | * all branches, may need the help of the SW filter | ||
348 | */ | ||
349 | static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event) | ||
350 | { | ||
351 | struct hw_perf_event_extra *reg; | ||
352 | u64 br_type = event->attr.branch_sample_type; | ||
353 | u64 mask = 0, m; | ||
354 | u64 v; | ||
355 | |||
356 | for_each_branch_sample_type(m) { | ||
357 | if (!(br_type & m)) | ||
358 | continue; | ||
359 | |||
360 | v = x86_pmu.lbr_sel_map[m]; | ||
361 | if (v == LBR_NOT_SUPP) | ||
362 | return -EOPNOTSUPP; | ||
363 | |||
364 | if (v != LBR_IGN) | ||
365 | mask |= v; | ||
366 | } | ||
367 | reg = &event->hw.branch_reg; | ||
368 | reg->idx = EXTRA_REG_LBR; | ||
369 | |||
370 | /* LBR_SELECT operates in suppress mode so invert mask */ | ||
371 | reg->config = ~mask & x86_pmu.lbr_sel_mask; | ||
372 | |||
373 | return 0; | ||
374 | } | ||
375 | |||
376 | int intel_pmu_setup_lbr_filter(struct perf_event *event) | ||
377 | { | ||
378 | int ret = 0; | ||
379 | |||
380 | /* | ||
381 | * no LBR on this PMU | ||
382 | */ | ||
383 | if (!x86_pmu.lbr_nr) | ||
384 | return -EOPNOTSUPP; | ||
385 | |||
386 | /* | ||
387 | * setup SW LBR filter | ||
388 | */ | ||
389 | intel_pmu_setup_sw_lbr_filter(event); | ||
390 | |||
391 | /* | ||
392 | * setup HW LBR filter, if any | ||
393 | */ | ||
394 | if (x86_pmu.lbr_sel_map) | ||
395 | ret = intel_pmu_setup_hw_lbr_filter(event); | ||
396 | |||
397 | return ret; | ||
196 | } | 398 | } |
197 | 399 | ||
400 | /* | ||
401 | * return the type of control flow change at address "from" | ||
402 | * intruction is not necessarily a branch (in case of interrupt). | ||
403 | * | ||
404 | * The branch type returned also includes the priv level of the | ||
405 | * target of the control flow change (X86_BR_USER, X86_BR_KERNEL). | ||
406 | * | ||
407 | * If a branch type is unknown OR the instruction cannot be | ||
408 | * decoded (e.g., text page not present), then X86_BR_NONE is | ||
409 | * returned. | ||
410 | */ | ||
411 | static int branch_type(unsigned long from, unsigned long to) | ||
412 | { | ||
413 | struct insn insn; | ||
414 | void *addr; | ||
415 | int bytes, size = MAX_INSN_SIZE; | ||
416 | int ret = X86_BR_NONE; | ||
417 | int ext, to_plm, from_plm; | ||
418 | u8 buf[MAX_INSN_SIZE]; | ||
419 | int is64 = 0; | ||
420 | |||
421 | to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER; | ||
422 | from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER; | ||
423 | |||
424 | /* | ||
425 | * maybe zero if lbr did not fill up after a reset by the time | ||
426 | * we get a PMU interrupt | ||
427 | */ | ||
428 | if (from == 0 || to == 0) | ||
429 | return X86_BR_NONE; | ||
430 | |||
431 | if (from_plm == X86_BR_USER) { | ||
432 | /* | ||
433 | * can happen if measuring at the user level only | ||
434 | * and we interrupt in a kernel thread, e.g., idle. | ||
435 | */ | ||
436 | if (!current->mm) | ||
437 | return X86_BR_NONE; | ||
438 | |||
439 | /* may fail if text not present */ | ||
440 | bytes = copy_from_user_nmi(buf, (void __user *)from, size); | ||
441 | if (bytes != size) | ||
442 | return X86_BR_NONE; | ||
443 | |||
444 | addr = buf; | ||
445 | } else | ||
446 | addr = (void *)from; | ||
447 | |||
448 | /* | ||
449 | * decoder needs to know the ABI especially | ||
450 | * on 64-bit systems running 32-bit apps | ||
451 | */ | ||
452 | #ifdef CONFIG_X86_64 | ||
453 | is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32); | ||
454 | #endif | ||
455 | insn_init(&insn, addr, is64); | ||
456 | insn_get_opcode(&insn); | ||
457 | |||
458 | switch (insn.opcode.bytes[0]) { | ||
459 | case 0xf: | ||
460 | switch (insn.opcode.bytes[1]) { | ||
461 | case 0x05: /* syscall */ | ||
462 | case 0x34: /* sysenter */ | ||
463 | ret = X86_BR_SYSCALL; | ||
464 | break; | ||
465 | case 0x07: /* sysret */ | ||
466 | case 0x35: /* sysexit */ | ||
467 | ret = X86_BR_SYSRET; | ||
468 | break; | ||
469 | case 0x80 ... 0x8f: /* conditional */ | ||
470 | ret = X86_BR_JCC; | ||
471 | break; | ||
472 | default: | ||
473 | ret = X86_BR_NONE; | ||
474 | } | ||
475 | break; | ||
476 | case 0x70 ... 0x7f: /* conditional */ | ||
477 | ret = X86_BR_JCC; | ||
478 | break; | ||
479 | case 0xc2: /* near ret */ | ||
480 | case 0xc3: /* near ret */ | ||
481 | case 0xca: /* far ret */ | ||
482 | case 0xcb: /* far ret */ | ||
483 | ret = X86_BR_RET; | ||
484 | break; | ||
485 | case 0xcf: /* iret */ | ||
486 | ret = X86_BR_IRET; | ||
487 | break; | ||
488 | case 0xcc ... 0xce: /* int */ | ||
489 | ret = X86_BR_INT; | ||
490 | break; | ||
491 | case 0xe8: /* call near rel */ | ||
492 | case 0x9a: /* call far absolute */ | ||
493 | ret = X86_BR_CALL; | ||
494 | break; | ||
495 | case 0xe0 ... 0xe3: /* loop jmp */ | ||
496 | ret = X86_BR_JCC; | ||
497 | break; | ||
498 | case 0xe9 ... 0xeb: /* jmp */ | ||
499 | ret = X86_BR_JMP; | ||
500 | break; | ||
501 | case 0xff: /* call near absolute, call far absolute ind */ | ||
502 | insn_get_modrm(&insn); | ||
503 | ext = (insn.modrm.bytes[0] >> 3) & 0x7; | ||
504 | switch (ext) { | ||
505 | case 2: /* near ind call */ | ||
506 | case 3: /* far ind call */ | ||
507 | ret = X86_BR_IND_CALL; | ||
508 | break; | ||
509 | case 4: | ||
510 | case 5: | ||
511 | ret = X86_BR_JMP; | ||
512 | break; | ||
513 | } | ||
514 | break; | ||
515 | default: | ||
516 | ret = X86_BR_NONE; | ||
517 | } | ||
518 | /* | ||
519 | * interrupts, traps, faults (and thus ring transition) may | ||
520 | * occur on any instructions. Thus, to classify them correctly, | ||
521 | * we need to first look at the from and to priv levels. If they | ||
522 | * are different and to is in the kernel, then it indicates | ||
523 | * a ring transition. If the from instruction is not a ring | ||
524 | * transition instr (syscall, systenter, int), then it means | ||
525 | * it was a irq, trap or fault. | ||
526 | * | ||
527 | * we have no way of detecting kernel to kernel faults. | ||
528 | */ | ||
529 | if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL | ||
530 | && ret != X86_BR_SYSCALL && ret != X86_BR_INT) | ||
531 | ret = X86_BR_IRQ; | ||
532 | |||
533 | /* | ||
534 | * branch priv level determined by target as | ||
535 | * is done by HW when LBR_SELECT is implemented | ||
536 | */ | ||
537 | if (ret != X86_BR_NONE) | ||
538 | ret |= to_plm; | ||
539 | |||
540 | return ret; | ||
541 | } | ||
542 | |||
543 | /* | ||
544 | * implement actual branch filter based on user demand. | ||
545 | * Hardware may not exactly satisfy that request, thus | ||
546 | * we need to inspect opcodes. Mismatched branches are | ||
547 | * discarded. Therefore, the number of branches returned | ||
548 | * in PERF_SAMPLE_BRANCH_STACK sample may vary. | ||
549 | */ | ||
550 | static void | ||
551 | intel_pmu_lbr_filter(struct cpu_hw_events *cpuc) | ||
552 | { | ||
553 | u64 from, to; | ||
554 | int br_sel = cpuc->br_sel; | ||
555 | int i, j, type; | ||
556 | bool compress = false; | ||
557 | |||
558 | /* if sampling all branches, then nothing to filter */ | ||
559 | if ((br_sel & X86_BR_ALL) == X86_BR_ALL) | ||
560 | return; | ||
561 | |||
562 | for (i = 0; i < cpuc->lbr_stack.nr; i++) { | ||
563 | |||
564 | from = cpuc->lbr_entries[i].from; | ||
565 | to = cpuc->lbr_entries[i].to; | ||
566 | |||
567 | type = branch_type(from, to); | ||
568 | |||
569 | /* if type does not correspond, then discard */ | ||
570 | if (type == X86_BR_NONE || (br_sel & type) != type) { | ||
571 | cpuc->lbr_entries[i].from = 0; | ||
572 | compress = true; | ||
573 | } | ||
574 | } | ||
575 | |||
576 | if (!compress) | ||
577 | return; | ||
578 | |||
579 | /* remove all entries with from=0 */ | ||
580 | for (i = 0; i < cpuc->lbr_stack.nr; ) { | ||
581 | if (!cpuc->lbr_entries[i].from) { | ||
582 | j = i; | ||
583 | while (++j < cpuc->lbr_stack.nr) | ||
584 | cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j]; | ||
585 | cpuc->lbr_stack.nr--; | ||
586 | if (!cpuc->lbr_entries[i].from) | ||
587 | continue; | ||
588 | } | ||
589 | i++; | ||
590 | } | ||
591 | } | ||
592 | |||
593 | /* | ||
594 | * Map interface branch filters onto LBR filters | ||
595 | */ | ||
596 | static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = { | ||
597 | [PERF_SAMPLE_BRANCH_ANY] = LBR_ANY, | ||
598 | [PERF_SAMPLE_BRANCH_USER] = LBR_USER, | ||
599 | [PERF_SAMPLE_BRANCH_KERNEL] = LBR_KERNEL, | ||
600 | [PERF_SAMPLE_BRANCH_HV] = LBR_IGN, | ||
601 | [PERF_SAMPLE_BRANCH_ANY_RETURN] = LBR_RETURN | LBR_REL_JMP | ||
602 | | LBR_IND_JMP | LBR_FAR, | ||
603 | /* | ||
604 | * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches | ||
605 | */ | ||
606 | [PERF_SAMPLE_BRANCH_ANY_CALL] = | ||
607 | LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR, | ||
608 | /* | ||
609 | * NHM/WSM erratum: must include IND_JMP to capture IND_CALL | ||
610 | */ | ||
611 | [PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL | LBR_IND_JMP, | ||
612 | }; | ||
613 | |||
614 | static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = { | ||
615 | [PERF_SAMPLE_BRANCH_ANY] = LBR_ANY, | ||
616 | [PERF_SAMPLE_BRANCH_USER] = LBR_USER, | ||
617 | [PERF_SAMPLE_BRANCH_KERNEL] = LBR_KERNEL, | ||
618 | [PERF_SAMPLE_BRANCH_HV] = LBR_IGN, | ||
619 | [PERF_SAMPLE_BRANCH_ANY_RETURN] = LBR_RETURN | LBR_FAR, | ||
620 | [PERF_SAMPLE_BRANCH_ANY_CALL] = LBR_REL_CALL | LBR_IND_CALL | ||
621 | | LBR_FAR, | ||
622 | [PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL, | ||
623 | }; | ||
624 | |||
625 | /* core */ | ||
198 | void intel_pmu_lbr_init_core(void) | 626 | void intel_pmu_lbr_init_core(void) |
199 | { | 627 | { |
200 | x86_pmu.lbr_nr = 4; | 628 | x86_pmu.lbr_nr = 4; |
201 | x86_pmu.lbr_tos = 0x01c9; | 629 | x86_pmu.lbr_tos = MSR_LBR_TOS; |
202 | x86_pmu.lbr_from = 0x40; | 630 | x86_pmu.lbr_from = MSR_LBR_CORE_FROM; |
203 | x86_pmu.lbr_to = 0x60; | 631 | x86_pmu.lbr_to = MSR_LBR_CORE_TO; |
632 | |||
633 | /* | ||
634 | * SW branch filter usage: | ||
635 | * - compensate for lack of HW filter | ||
636 | */ | ||
637 | pr_cont("4-deep LBR, "); | ||
204 | } | 638 | } |
205 | 639 | ||
640 | /* nehalem/westmere */ | ||
206 | void intel_pmu_lbr_init_nhm(void) | 641 | void intel_pmu_lbr_init_nhm(void) |
207 | { | 642 | { |
208 | x86_pmu.lbr_nr = 16; | 643 | x86_pmu.lbr_nr = 16; |
209 | x86_pmu.lbr_tos = 0x01c9; | 644 | x86_pmu.lbr_tos = MSR_LBR_TOS; |
210 | x86_pmu.lbr_from = 0x680; | 645 | x86_pmu.lbr_from = MSR_LBR_NHM_FROM; |
211 | x86_pmu.lbr_to = 0x6c0; | 646 | x86_pmu.lbr_to = MSR_LBR_NHM_TO; |
647 | |||
648 | x86_pmu.lbr_sel_mask = LBR_SEL_MASK; | ||
649 | x86_pmu.lbr_sel_map = nhm_lbr_sel_map; | ||
650 | |||
651 | /* | ||
652 | * SW branch filter usage: | ||
653 | * - workaround LBR_SEL errata (see above) | ||
654 | * - support syscall, sysret capture. | ||
655 | * That requires LBR_FAR but that means far | ||
656 | * jmp need to be filtered out | ||
657 | */ | ||
658 | pr_cont("16-deep LBR, "); | ||
659 | } | ||
660 | |||
661 | /* sandy bridge */ | ||
662 | void intel_pmu_lbr_init_snb(void) | ||
663 | { | ||
664 | x86_pmu.lbr_nr = 16; | ||
665 | x86_pmu.lbr_tos = MSR_LBR_TOS; | ||
666 | x86_pmu.lbr_from = MSR_LBR_NHM_FROM; | ||
667 | x86_pmu.lbr_to = MSR_LBR_NHM_TO; | ||
668 | |||
669 | x86_pmu.lbr_sel_mask = LBR_SEL_MASK; | ||
670 | x86_pmu.lbr_sel_map = snb_lbr_sel_map; | ||
671 | |||
672 | /* | ||
673 | * SW branch filter usage: | ||
674 | * - support syscall, sysret capture. | ||
675 | * That requires LBR_FAR but that means far | ||
676 | * jmp need to be filtered out | ||
677 | */ | ||
678 | pr_cont("16-deep LBR, "); | ||
212 | } | 679 | } |
213 | 680 | ||
681 | /* atom */ | ||
214 | void intel_pmu_lbr_init_atom(void) | 682 | void intel_pmu_lbr_init_atom(void) |
215 | { | 683 | { |
684 | /* | ||
685 | * only models starting at stepping 10 seems | ||
686 | * to have an operational LBR which can freeze | ||
687 | * on PMU interrupt | ||
688 | */ | ||
689 | if (boot_cpu_data.x86_mask < 10) { | ||
690 | pr_cont("LBR disabled due to erratum"); | ||
691 | return; | ||
692 | } | ||
693 | |||
216 | x86_pmu.lbr_nr = 8; | 694 | x86_pmu.lbr_nr = 8; |
217 | x86_pmu.lbr_tos = 0x01c9; | 695 | x86_pmu.lbr_tos = MSR_LBR_TOS; |
218 | x86_pmu.lbr_from = 0x40; | 696 | x86_pmu.lbr_from = MSR_LBR_CORE_FROM; |
219 | x86_pmu.lbr_to = 0x60; | 697 | x86_pmu.lbr_to = MSR_LBR_CORE_TO; |
698 | |||
699 | /* | ||
700 | * SW branch filter usage: | ||
701 | * - compensate for lack of HW filter | ||
702 | */ | ||
703 | pr_cont("8-deep LBR, "); | ||
220 | } | 704 | } |
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index c7f64e6f537a..addf9e82a7f2 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c | |||
@@ -40,6 +40,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) | |||
40 | { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 }, | 40 | { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 }, |
41 | { X86_FEATURE_XSAVEOPT, CR_EAX, 0, 0x0000000d, 1 }, | 41 | { X86_FEATURE_XSAVEOPT, CR_EAX, 0, 0x0000000d, 1 }, |
42 | { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 }, | 42 | { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 }, |
43 | { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 }, | ||
43 | { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a, 0 }, | 44 | { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a, 0 }, |
44 | { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 }, | 45 | { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 }, |
45 | { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 }, | 46 | { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 }, |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 40fc86161d92..58b7f27cb3e9 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -100,13 +100,8 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) | |||
100 | irqctx->tinfo.task = curctx->tinfo.task; | 100 | irqctx->tinfo.task = curctx->tinfo.task; |
101 | irqctx->tinfo.previous_esp = current_stack_pointer; | 101 | irqctx->tinfo.previous_esp = current_stack_pointer; |
102 | 102 | ||
103 | /* | 103 | /* Copy the preempt_count so that the [soft]irq checks work. */ |
104 | * Copy the softirq bits in preempt_count so that the | 104 | irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count; |
105 | * softirq checks work in the hardirq context. | ||
106 | */ | ||
107 | irqctx->tinfo.preempt_count = | ||
108 | (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | | ||
109 | (curctx->tinfo.preempt_count & SOFTIRQ_MASK); | ||
110 | 105 | ||
111 | if (unlikely(overflow)) | 106 | if (unlikely(overflow)) |
112 | call_on_stack(print_stack_overflow, isp); | 107 | call_on_stack(print_stack_overflow, isp); |
@@ -196,7 +191,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) | |||
196 | if (unlikely(!desc)) | 191 | if (unlikely(!desc)) |
197 | return false; | 192 | return false; |
198 | 193 | ||
199 | if (!execute_on_irq_stack(overflow, desc, irq)) { | 194 | if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) { |
200 | if (unlikely(overflow)) | 195 | if (unlikely(overflow)) |
201 | print_stack_overflow(); | 196 | print_stack_overflow(); |
202 | desc->handle_irq(irq, desc); | 197 | desc->handle_irq(irq, desc); |
diff --git a/arch/x86/kernel/kprobes-common.h b/arch/x86/kernel/kprobes-common.h new file mode 100644 index 000000000000..3230b68ef29a --- /dev/null +++ b/arch/x86/kernel/kprobes-common.h | |||
@@ -0,0 +1,102 @@ | |||
1 | #ifndef __X86_KERNEL_KPROBES_COMMON_H | ||
2 | #define __X86_KERNEL_KPROBES_COMMON_H | ||
3 | |||
4 | /* Kprobes and Optprobes common header */ | ||
5 | |||
6 | #ifdef CONFIG_X86_64 | ||
7 | #define SAVE_REGS_STRING \ | ||
8 | /* Skip cs, ip, orig_ax. */ \ | ||
9 | " subq $24, %rsp\n" \ | ||
10 | " pushq %rdi\n" \ | ||
11 | " pushq %rsi\n" \ | ||
12 | " pushq %rdx\n" \ | ||
13 | " pushq %rcx\n" \ | ||
14 | " pushq %rax\n" \ | ||
15 | " pushq %r8\n" \ | ||
16 | " pushq %r9\n" \ | ||
17 | " pushq %r10\n" \ | ||
18 | " pushq %r11\n" \ | ||
19 | " pushq %rbx\n" \ | ||
20 | " pushq %rbp\n" \ | ||
21 | " pushq %r12\n" \ | ||
22 | " pushq %r13\n" \ | ||
23 | " pushq %r14\n" \ | ||
24 | " pushq %r15\n" | ||
25 | #define RESTORE_REGS_STRING \ | ||
26 | " popq %r15\n" \ | ||
27 | " popq %r14\n" \ | ||
28 | " popq %r13\n" \ | ||
29 | " popq %r12\n" \ | ||
30 | " popq %rbp\n" \ | ||
31 | " popq %rbx\n" \ | ||
32 | " popq %r11\n" \ | ||
33 | " popq %r10\n" \ | ||
34 | " popq %r9\n" \ | ||
35 | " popq %r8\n" \ | ||
36 | " popq %rax\n" \ | ||
37 | " popq %rcx\n" \ | ||
38 | " popq %rdx\n" \ | ||
39 | " popq %rsi\n" \ | ||
40 | " popq %rdi\n" \ | ||
41 | /* Skip orig_ax, ip, cs */ \ | ||
42 | " addq $24, %rsp\n" | ||
43 | #else | ||
44 | #define SAVE_REGS_STRING \ | ||
45 | /* Skip cs, ip, orig_ax and gs. */ \ | ||
46 | " subl $16, %esp\n" \ | ||
47 | " pushl %fs\n" \ | ||
48 | " pushl %es\n" \ | ||
49 | " pushl %ds\n" \ | ||
50 | " pushl %eax\n" \ | ||
51 | " pushl %ebp\n" \ | ||
52 | " pushl %edi\n" \ | ||
53 | " pushl %esi\n" \ | ||
54 | " pushl %edx\n" \ | ||
55 | " pushl %ecx\n" \ | ||
56 | " pushl %ebx\n" | ||
57 | #define RESTORE_REGS_STRING \ | ||
58 | " popl %ebx\n" \ | ||
59 | " popl %ecx\n" \ | ||
60 | " popl %edx\n" \ | ||
61 | " popl %esi\n" \ | ||
62 | " popl %edi\n" \ | ||
63 | " popl %ebp\n" \ | ||
64 | " popl %eax\n" \ | ||
65 | /* Skip ds, es, fs, gs, orig_ax, and ip. Note: don't pop cs here*/\ | ||
66 | " addl $24, %esp\n" | ||
67 | #endif | ||
68 | |||
69 | /* Ensure if the instruction can be boostable */ | ||
70 | extern int can_boost(kprobe_opcode_t *instruction); | ||
71 | /* Recover instruction if given address is probed */ | ||
72 | extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf, | ||
73 | unsigned long addr); | ||
74 | /* | ||
75 | * Copy an instruction and adjust the displacement if the instruction | ||
76 | * uses the %rip-relative addressing mode. | ||
77 | */ | ||
78 | extern int __copy_instruction(u8 *dest, u8 *src); | ||
79 | |||
80 | /* Generate a relative-jump/call instruction */ | ||
81 | extern void synthesize_reljump(void *from, void *to); | ||
82 | extern void synthesize_relcall(void *from, void *to); | ||
83 | |||
84 | #ifdef CONFIG_OPTPROBES | ||
85 | extern int arch_init_optprobes(void); | ||
86 | extern int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter); | ||
87 | extern unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr); | ||
88 | #else /* !CONFIG_OPTPROBES */ | ||
89 | static inline int arch_init_optprobes(void) | ||
90 | { | ||
91 | return 0; | ||
92 | } | ||
93 | static inline int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) | ||
94 | { | ||
95 | return 0; | ||
96 | } | ||
97 | static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) | ||
98 | { | ||
99 | return addr; | ||
100 | } | ||
101 | #endif | ||
102 | #endif | ||
diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c new file mode 100644 index 000000000000..c5e410eed403 --- /dev/null +++ b/arch/x86/kernel/kprobes-opt.c | |||
@@ -0,0 +1,512 @@ | |||
1 | /* | ||
2 | * Kernel Probes Jump Optimization (Optprobes) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright (C) IBM Corporation, 2002, 2004 | ||
19 | * Copyright (C) Hitachi Ltd., 2012 | ||
20 | */ | ||
21 | #include <linux/kprobes.h> | ||
22 | #include <linux/ptrace.h> | ||
23 | #include <linux/string.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/hardirq.h> | ||
26 | #include <linux/preempt.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/kdebug.h> | ||
29 | #include <linux/kallsyms.h> | ||
30 | #include <linux/ftrace.h> | ||
31 | |||
32 | #include <asm/cacheflush.h> | ||
33 | #include <asm/desc.h> | ||
34 | #include <asm/pgtable.h> | ||
35 | #include <asm/uaccess.h> | ||
36 | #include <asm/alternative.h> | ||
37 | #include <asm/insn.h> | ||
38 | #include <asm/debugreg.h> | ||
39 | |||
40 | #include "kprobes-common.h" | ||
41 | |||
42 | unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) | ||
43 | { | ||
44 | struct optimized_kprobe *op; | ||
45 | struct kprobe *kp; | ||
46 | long offs; | ||
47 | int i; | ||
48 | |||
49 | for (i = 0; i < RELATIVEJUMP_SIZE; i++) { | ||
50 | kp = get_kprobe((void *)addr - i); | ||
51 | /* This function only handles jump-optimized kprobe */ | ||
52 | if (kp && kprobe_optimized(kp)) { | ||
53 | op = container_of(kp, struct optimized_kprobe, kp); | ||
54 | /* If op->list is not empty, op is under optimizing */ | ||
55 | if (list_empty(&op->list)) | ||
56 | goto found; | ||
57 | } | ||
58 | } | ||
59 | |||
60 | return addr; | ||
61 | found: | ||
62 | /* | ||
63 | * If the kprobe can be optimized, original bytes which can be | ||
64 | * overwritten by jump destination address. In this case, original | ||
65 | * bytes must be recovered from op->optinsn.copied_insn buffer. | ||
66 | */ | ||
67 | memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); | ||
68 | if (addr == (unsigned long)kp->addr) { | ||
69 | buf[0] = kp->opcode; | ||
70 | memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); | ||
71 | } else { | ||
72 | offs = addr - (unsigned long)kp->addr - 1; | ||
73 | memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs); | ||
74 | } | ||
75 | |||
76 | return (unsigned long)buf; | ||
77 | } | ||
78 | |||
79 | /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */ | ||
80 | static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val) | ||
81 | { | ||
82 | #ifdef CONFIG_X86_64 | ||
83 | *addr++ = 0x48; | ||
84 | *addr++ = 0xbf; | ||
85 | #else | ||
86 | *addr++ = 0xb8; | ||
87 | #endif | ||
88 | *(unsigned long *)addr = val; | ||
89 | } | ||
90 | |||
91 | static void __used __kprobes kprobes_optinsn_template_holder(void) | ||
92 | { | ||
93 | asm volatile ( | ||
94 | ".global optprobe_template_entry\n" | ||
95 | "optprobe_template_entry:\n" | ||
96 | #ifdef CONFIG_X86_64 | ||
97 | /* We don't bother saving the ss register */ | ||
98 | " pushq %rsp\n" | ||
99 | " pushfq\n" | ||
100 | SAVE_REGS_STRING | ||
101 | " movq %rsp, %rsi\n" | ||
102 | ".global optprobe_template_val\n" | ||
103 | "optprobe_template_val:\n" | ||
104 | ASM_NOP5 | ||
105 | ASM_NOP5 | ||
106 | ".global optprobe_template_call\n" | ||
107 | "optprobe_template_call:\n" | ||
108 | ASM_NOP5 | ||
109 | /* Move flags to rsp */ | ||
110 | " movq 144(%rsp), %rdx\n" | ||
111 | " movq %rdx, 152(%rsp)\n" | ||
112 | RESTORE_REGS_STRING | ||
113 | /* Skip flags entry */ | ||
114 | " addq $8, %rsp\n" | ||
115 | " popfq\n" | ||
116 | #else /* CONFIG_X86_32 */ | ||
117 | " pushf\n" | ||
118 | SAVE_REGS_STRING | ||
119 | " movl %esp, %edx\n" | ||
120 | ".global optprobe_template_val\n" | ||
121 | "optprobe_template_val:\n" | ||
122 | ASM_NOP5 | ||
123 | ".global optprobe_template_call\n" | ||
124 | "optprobe_template_call:\n" | ||
125 | ASM_NOP5 | ||
126 | RESTORE_REGS_STRING | ||
127 | " addl $4, %esp\n" /* skip cs */ | ||
128 | " popf\n" | ||
129 | #endif | ||
130 | ".global optprobe_template_end\n" | ||
131 | "optprobe_template_end:\n"); | ||
132 | } | ||
133 | |||
134 | #define TMPL_MOVE_IDX \ | ||
135 | ((long)&optprobe_template_val - (long)&optprobe_template_entry) | ||
136 | #define TMPL_CALL_IDX \ | ||
137 | ((long)&optprobe_template_call - (long)&optprobe_template_entry) | ||
138 | #define TMPL_END_IDX \ | ||
139 | ((long)&optprobe_template_end - (long)&optprobe_template_entry) | ||
140 | |||
141 | #define INT3_SIZE sizeof(kprobe_opcode_t) | ||
142 | |||
143 | /* Optimized kprobe call back function: called from optinsn */ | ||
144 | static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) | ||
145 | { | ||
146 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
147 | unsigned long flags; | ||
148 | |||
149 | /* This is possible if op is under delayed unoptimizing */ | ||
150 | if (kprobe_disabled(&op->kp)) | ||
151 | return; | ||
152 | |||
153 | local_irq_save(flags); | ||
154 | if (kprobe_running()) { | ||
155 | kprobes_inc_nmissed_count(&op->kp); | ||
156 | } else { | ||
157 | /* Save skipped registers */ | ||
158 | #ifdef CONFIG_X86_64 | ||
159 | regs->cs = __KERNEL_CS; | ||
160 | #else | ||
161 | regs->cs = __KERNEL_CS | get_kernel_rpl(); | ||
162 | regs->gs = 0; | ||
163 | #endif | ||
164 | regs->ip = (unsigned long)op->kp.addr + INT3_SIZE; | ||
165 | regs->orig_ax = ~0UL; | ||
166 | |||
167 | __this_cpu_write(current_kprobe, &op->kp); | ||
168 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | ||
169 | opt_pre_handler(&op->kp, regs); | ||
170 | __this_cpu_write(current_kprobe, NULL); | ||
171 | } | ||
172 | local_irq_restore(flags); | ||
173 | } | ||
174 | |||
175 | static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) | ||
176 | { | ||
177 | int len = 0, ret; | ||
178 | |||
179 | while (len < RELATIVEJUMP_SIZE) { | ||
180 | ret = __copy_instruction(dest + len, src + len); | ||
181 | if (!ret || !can_boost(dest + len)) | ||
182 | return -EINVAL; | ||
183 | len += ret; | ||
184 | } | ||
185 | /* Check whether the address range is reserved */ | ||
186 | if (ftrace_text_reserved(src, src + len - 1) || | ||
187 | alternatives_text_reserved(src, src + len - 1) || | ||
188 | jump_label_text_reserved(src, src + len - 1)) | ||
189 | return -EBUSY; | ||
190 | |||
191 | return len; | ||
192 | } | ||
193 | |||
194 | /* Check whether insn is indirect jump */ | ||
195 | static int __kprobes insn_is_indirect_jump(struct insn *insn) | ||
196 | { | ||
197 | return ((insn->opcode.bytes[0] == 0xff && | ||
198 | (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ | ||
199 | insn->opcode.bytes[0] == 0xea); /* Segment based jump */ | ||
200 | } | ||
201 | |||
202 | /* Check whether insn jumps into specified address range */ | ||
203 | static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) | ||
204 | { | ||
205 | unsigned long target = 0; | ||
206 | |||
207 | switch (insn->opcode.bytes[0]) { | ||
208 | case 0xe0: /* loopne */ | ||
209 | case 0xe1: /* loope */ | ||
210 | case 0xe2: /* loop */ | ||
211 | case 0xe3: /* jcxz */ | ||
212 | case 0xe9: /* near relative jump */ | ||
213 | case 0xeb: /* short relative jump */ | ||
214 | break; | ||
215 | case 0x0f: | ||
216 | if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */ | ||
217 | break; | ||
218 | return 0; | ||
219 | default: | ||
220 | if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */ | ||
221 | break; | ||
222 | return 0; | ||
223 | } | ||
224 | target = (unsigned long)insn->next_byte + insn->immediate.value; | ||
225 | |||
226 | return (start <= target && target <= start + len); | ||
227 | } | ||
228 | |||
229 | /* Decode whole function to ensure any instructions don't jump into target */ | ||
230 | static int __kprobes can_optimize(unsigned long paddr) | ||
231 | { | ||
232 | unsigned long addr, size = 0, offset = 0; | ||
233 | struct insn insn; | ||
234 | kprobe_opcode_t buf[MAX_INSN_SIZE]; | ||
235 | |||
236 | /* Lookup symbol including addr */ | ||
237 | if (!kallsyms_lookup_size_offset(paddr, &size, &offset)) | ||
238 | return 0; | ||
239 | |||
240 | /* | ||
241 | * Do not optimize in the entry code due to the unstable | ||
242 | * stack handling. | ||
243 | */ | ||
244 | if ((paddr >= (unsigned long)__entry_text_start) && | ||
245 | (paddr < (unsigned long)__entry_text_end)) | ||
246 | return 0; | ||
247 | |||
248 | /* Check there is enough space for a relative jump. */ | ||
249 | if (size - offset < RELATIVEJUMP_SIZE) | ||
250 | return 0; | ||
251 | |||
252 | /* Decode instructions */ | ||
253 | addr = paddr - offset; | ||
254 | while (addr < paddr - offset + size) { /* Decode until function end */ | ||
255 | if (search_exception_tables(addr)) | ||
256 | /* | ||
257 | * Since some fixup code will jumps into this function, | ||
258 | * we can't optimize kprobe in this function. | ||
259 | */ | ||
260 | return 0; | ||
261 | kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, addr)); | ||
262 | insn_get_length(&insn); | ||
263 | /* Another subsystem puts a breakpoint */ | ||
264 | if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) | ||
265 | return 0; | ||
266 | /* Recover address */ | ||
267 | insn.kaddr = (void *)addr; | ||
268 | insn.next_byte = (void *)(addr + insn.length); | ||
269 | /* Check any instructions don't jump into target */ | ||
270 | if (insn_is_indirect_jump(&insn) || | ||
271 | insn_jump_into_range(&insn, paddr + INT3_SIZE, | ||
272 | RELATIVE_ADDR_SIZE)) | ||
273 | return 0; | ||
274 | addr += insn.length; | ||
275 | } | ||
276 | |||
277 | return 1; | ||
278 | } | ||
279 | |||
280 | /* Check optimized_kprobe can actually be optimized. */ | ||
281 | int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op) | ||
282 | { | ||
283 | int i; | ||
284 | struct kprobe *p; | ||
285 | |||
286 | for (i = 1; i < op->optinsn.size; i++) { | ||
287 | p = get_kprobe(op->kp.addr + i); | ||
288 | if (p && !kprobe_disabled(p)) | ||
289 | return -EEXIST; | ||
290 | } | ||
291 | |||
292 | return 0; | ||
293 | } | ||
294 | |||
295 | /* Check the addr is within the optimized instructions. */ | ||
296 | int __kprobes | ||
297 | arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr) | ||
298 | { | ||
299 | return ((unsigned long)op->kp.addr <= addr && | ||
300 | (unsigned long)op->kp.addr + op->optinsn.size > addr); | ||
301 | } | ||
302 | |||
303 | /* Free optimized instruction slot */ | ||
304 | static __kprobes | ||
305 | void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) | ||
306 | { | ||
307 | if (op->optinsn.insn) { | ||
308 | free_optinsn_slot(op->optinsn.insn, dirty); | ||
309 | op->optinsn.insn = NULL; | ||
310 | op->optinsn.size = 0; | ||
311 | } | ||
312 | } | ||
313 | |||
314 | void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op) | ||
315 | { | ||
316 | __arch_remove_optimized_kprobe(op, 1); | ||
317 | } | ||
318 | |||
319 | /* | ||
320 | * Copy replacing target instructions | ||
321 | * Target instructions MUST be relocatable (checked inside) | ||
322 | * This is called when new aggr(opt)probe is allocated or reused. | ||
323 | */ | ||
324 | int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) | ||
325 | { | ||
326 | u8 *buf; | ||
327 | int ret; | ||
328 | long rel; | ||
329 | |||
330 | if (!can_optimize((unsigned long)op->kp.addr)) | ||
331 | return -EILSEQ; | ||
332 | |||
333 | op->optinsn.insn = get_optinsn_slot(); | ||
334 | if (!op->optinsn.insn) | ||
335 | return -ENOMEM; | ||
336 | |||
337 | /* | ||
338 | * Verify if the address gap is in 2GB range, because this uses | ||
339 | * a relative jump. | ||
340 | */ | ||
341 | rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE; | ||
342 | if (abs(rel) > 0x7fffffff) | ||
343 | return -ERANGE; | ||
344 | |||
345 | buf = (u8 *)op->optinsn.insn; | ||
346 | |||
347 | /* Copy instructions into the out-of-line buffer */ | ||
348 | ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr); | ||
349 | if (ret < 0) { | ||
350 | __arch_remove_optimized_kprobe(op, 0); | ||
351 | return ret; | ||
352 | } | ||
353 | op->optinsn.size = ret; | ||
354 | |||
355 | /* Copy arch-dep-instance from template */ | ||
356 | memcpy(buf, &optprobe_template_entry, TMPL_END_IDX); | ||
357 | |||
358 | /* Set probe information */ | ||
359 | synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op); | ||
360 | |||
361 | /* Set probe function call */ | ||
362 | synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback); | ||
363 | |||
364 | /* Set returning jmp instruction at the tail of out-of-line buffer */ | ||
365 | synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size, | ||
366 | (u8 *)op->kp.addr + op->optinsn.size); | ||
367 | |||
368 | flush_icache_range((unsigned long) buf, | ||
369 | (unsigned long) buf + TMPL_END_IDX + | ||
370 | op->optinsn.size + RELATIVEJUMP_SIZE); | ||
371 | return 0; | ||
372 | } | ||
373 | |||
374 | #define MAX_OPTIMIZE_PROBES 256 | ||
375 | static struct text_poke_param *jump_poke_params; | ||
376 | static struct jump_poke_buffer { | ||
377 | u8 buf[RELATIVEJUMP_SIZE]; | ||
378 | } *jump_poke_bufs; | ||
379 | |||
380 | static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm, | ||
381 | u8 *insn_buf, | ||
382 | struct optimized_kprobe *op) | ||
383 | { | ||
384 | s32 rel = (s32)((long)op->optinsn.insn - | ||
385 | ((long)op->kp.addr + RELATIVEJUMP_SIZE)); | ||
386 | |||
387 | /* Backup instructions which will be replaced by jump address */ | ||
388 | memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, | ||
389 | RELATIVE_ADDR_SIZE); | ||
390 | |||
391 | insn_buf[0] = RELATIVEJUMP_OPCODE; | ||
392 | *(s32 *)(&insn_buf[1]) = rel; | ||
393 | |||
394 | tprm->addr = op->kp.addr; | ||
395 | tprm->opcode = insn_buf; | ||
396 | tprm->len = RELATIVEJUMP_SIZE; | ||
397 | } | ||
398 | |||
399 | /* | ||
400 | * Replace breakpoints (int3) with relative jumps. | ||
401 | * Caller must call with locking kprobe_mutex and text_mutex. | ||
402 | */ | ||
403 | void __kprobes arch_optimize_kprobes(struct list_head *oplist) | ||
404 | { | ||
405 | struct optimized_kprobe *op, *tmp; | ||
406 | int c = 0; | ||
407 | |||
408 | list_for_each_entry_safe(op, tmp, oplist, list) { | ||
409 | WARN_ON(kprobe_disabled(&op->kp)); | ||
410 | /* Setup param */ | ||
411 | setup_optimize_kprobe(&jump_poke_params[c], | ||
412 | jump_poke_bufs[c].buf, op); | ||
413 | list_del_init(&op->list); | ||
414 | if (++c >= MAX_OPTIMIZE_PROBES) | ||
415 | break; | ||
416 | } | ||
417 | |||
418 | /* | ||
419 | * text_poke_smp doesn't support NMI/MCE code modifying. | ||
420 | * However, since kprobes itself also doesn't support NMI/MCE | ||
421 | * code probing, it's not a problem. | ||
422 | */ | ||
423 | text_poke_smp_batch(jump_poke_params, c); | ||
424 | } | ||
425 | |||
426 | static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm, | ||
427 | u8 *insn_buf, | ||
428 | struct optimized_kprobe *op) | ||
429 | { | ||
430 | /* Set int3 to first byte for kprobes */ | ||
431 | insn_buf[0] = BREAKPOINT_INSTRUCTION; | ||
432 | memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); | ||
433 | |||
434 | tprm->addr = op->kp.addr; | ||
435 | tprm->opcode = insn_buf; | ||
436 | tprm->len = RELATIVEJUMP_SIZE; | ||
437 | } | ||
438 | |||
439 | /* | ||
440 | * Recover original instructions and breakpoints from relative jumps. | ||
441 | * Caller must call with locking kprobe_mutex. | ||
442 | */ | ||
443 | extern void arch_unoptimize_kprobes(struct list_head *oplist, | ||
444 | struct list_head *done_list) | ||
445 | { | ||
446 | struct optimized_kprobe *op, *tmp; | ||
447 | int c = 0; | ||
448 | |||
449 | list_for_each_entry_safe(op, tmp, oplist, list) { | ||
450 | /* Setup param */ | ||
451 | setup_unoptimize_kprobe(&jump_poke_params[c], | ||
452 | jump_poke_bufs[c].buf, op); | ||
453 | list_move(&op->list, done_list); | ||
454 | if (++c >= MAX_OPTIMIZE_PROBES) | ||
455 | break; | ||
456 | } | ||
457 | |||
458 | /* | ||
459 | * text_poke_smp doesn't support NMI/MCE code modifying. | ||
460 | * However, since kprobes itself also doesn't support NMI/MCE | ||
461 | * code probing, it's not a problem. | ||
462 | */ | ||
463 | text_poke_smp_batch(jump_poke_params, c); | ||
464 | } | ||
465 | |||
466 | /* Replace a relative jump with a breakpoint (int3). */ | ||
467 | void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op) | ||
468 | { | ||
469 | u8 buf[RELATIVEJUMP_SIZE]; | ||
470 | |||
471 | /* Set int3 to first byte for kprobes */ | ||
472 | buf[0] = BREAKPOINT_INSTRUCTION; | ||
473 | memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); | ||
474 | text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE); | ||
475 | } | ||
476 | |||
477 | int __kprobes | ||
478 | setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) | ||
479 | { | ||
480 | struct optimized_kprobe *op; | ||
481 | |||
482 | if (p->flags & KPROBE_FLAG_OPTIMIZED) { | ||
483 | /* This kprobe is really able to run optimized path. */ | ||
484 | op = container_of(p, struct optimized_kprobe, kp); | ||
485 | /* Detour through copied instructions */ | ||
486 | regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX; | ||
487 | if (!reenter) | ||
488 | reset_current_kprobe(); | ||
489 | preempt_enable_no_resched(); | ||
490 | return 1; | ||
491 | } | ||
492 | return 0; | ||
493 | } | ||
494 | |||
495 | int __kprobes arch_init_optprobes(void) | ||
496 | { | ||
497 | /* Allocate code buffer and parameter array */ | ||
498 | jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) * | ||
499 | MAX_OPTIMIZE_PROBES, GFP_KERNEL); | ||
500 | if (!jump_poke_bufs) | ||
501 | return -ENOMEM; | ||
502 | |||
503 | jump_poke_params = kmalloc(sizeof(struct text_poke_param) * | ||
504 | MAX_OPTIMIZE_PROBES, GFP_KERNEL); | ||
505 | if (!jump_poke_params) { | ||
506 | kfree(jump_poke_bufs); | ||
507 | jump_poke_bufs = NULL; | ||
508 | return -ENOMEM; | ||
509 | } | ||
510 | |||
511 | return 0; | ||
512 | } | ||
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 7da647d8b64c..e213fc8408d2 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -30,16 +30,15 @@ | |||
30 | * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi | 30 | * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi |
31 | * <prasanna@in.ibm.com> added function-return probes. | 31 | * <prasanna@in.ibm.com> added function-return probes. |
32 | * 2005-May Rusty Lynch <rusty.lynch@intel.com> | 32 | * 2005-May Rusty Lynch <rusty.lynch@intel.com> |
33 | * Added function return probes functionality | 33 | * Added function return probes functionality |
34 | * 2006-Feb Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added | 34 | * 2006-Feb Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added |
35 | * kprobe-booster and kretprobe-booster for i386. | 35 | * kprobe-booster and kretprobe-booster for i386. |
36 | * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster | 36 | * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster |
37 | * and kretprobe-booster for x86-64 | 37 | * and kretprobe-booster for x86-64 |
38 | * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven | 38 | * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven |
39 | * <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com> | 39 | * <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com> |
40 | * unified x86 kprobes code. | 40 | * unified x86 kprobes code. |
41 | */ | 41 | */ |
42 | |||
43 | #include <linux/kprobes.h> | 42 | #include <linux/kprobes.h> |
44 | #include <linux/ptrace.h> | 43 | #include <linux/ptrace.h> |
45 | #include <linux/string.h> | 44 | #include <linux/string.h> |
@@ -59,6 +58,8 @@ | |||
59 | #include <asm/insn.h> | 58 | #include <asm/insn.h> |
60 | #include <asm/debugreg.h> | 59 | #include <asm/debugreg.h> |
61 | 60 | ||
61 | #include "kprobes-common.h" | ||
62 | |||
62 | void jprobe_return_end(void); | 63 | void jprobe_return_end(void); |
63 | 64 | ||
64 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; | 65 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
@@ -108,6 +109,7 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = { | |||
108 | doesn't switch kernel stack.*/ | 109 | doesn't switch kernel stack.*/ |
109 | {NULL, NULL} /* Terminator */ | 110 | {NULL, NULL} /* Terminator */ |
110 | }; | 111 | }; |
112 | |||
111 | const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); | 113 | const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); |
112 | 114 | ||
113 | static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) | 115 | static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) |
@@ -123,11 +125,17 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) | |||
123 | } | 125 | } |
124 | 126 | ||
125 | /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ | 127 | /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ |
126 | static void __kprobes synthesize_reljump(void *from, void *to) | 128 | void __kprobes synthesize_reljump(void *from, void *to) |
127 | { | 129 | { |
128 | __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE); | 130 | __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE); |
129 | } | 131 | } |
130 | 132 | ||
133 | /* Insert a call instruction at address 'from', which calls address 'to'.*/ | ||
134 | void __kprobes synthesize_relcall(void *from, void *to) | ||
135 | { | ||
136 | __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE); | ||
137 | } | ||
138 | |||
131 | /* | 139 | /* |
132 | * Skip the prefixes of the instruction. | 140 | * Skip the prefixes of the instruction. |
133 | */ | 141 | */ |
@@ -151,7 +159,7 @@ static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn) | |||
151 | * Returns non-zero if opcode is boostable. | 159 | * Returns non-zero if opcode is boostable. |
152 | * RIP relative instructions are adjusted at copying time in 64 bits mode | 160 | * RIP relative instructions are adjusted at copying time in 64 bits mode |
153 | */ | 161 | */ |
154 | static int __kprobes can_boost(kprobe_opcode_t *opcodes) | 162 | int __kprobes can_boost(kprobe_opcode_t *opcodes) |
155 | { | 163 | { |
156 | kprobe_opcode_t opcode; | 164 | kprobe_opcode_t opcode; |
157 | kprobe_opcode_t *orig_opcodes = opcodes; | 165 | kprobe_opcode_t *orig_opcodes = opcodes; |
@@ -207,13 +215,15 @@ retry: | |||
207 | } | 215 | } |
208 | } | 216 | } |
209 | 217 | ||
210 | /* Recover the probed instruction at addr for further analysis. */ | 218 | static unsigned long |
211 | static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) | 219 | __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) |
212 | { | 220 | { |
213 | struct kprobe *kp; | 221 | struct kprobe *kp; |
222 | |||
214 | kp = get_kprobe((void *)addr); | 223 | kp = get_kprobe((void *)addr); |
224 | /* There is no probe, return original address */ | ||
215 | if (!kp) | 225 | if (!kp) |
216 | return -EINVAL; | 226 | return addr; |
217 | 227 | ||
218 | /* | 228 | /* |
219 | * Basically, kp->ainsn.insn has an original instruction. | 229 | * Basically, kp->ainsn.insn has an original instruction. |
@@ -230,14 +240,29 @@ static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) | |||
230 | */ | 240 | */ |
231 | memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); | 241 | memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); |
232 | buf[0] = kp->opcode; | 242 | buf[0] = kp->opcode; |
233 | return 0; | 243 | return (unsigned long)buf; |
244 | } | ||
245 | |||
246 | /* | ||
247 | * Recover the probed instruction at addr for further analysis. | ||
248 | * Caller must lock kprobes by kprobe_mutex, or disable preemption | ||
249 | * for preventing to release referencing kprobes. | ||
250 | */ | ||
251 | unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) | ||
252 | { | ||
253 | unsigned long __addr; | ||
254 | |||
255 | __addr = __recover_optprobed_insn(buf, addr); | ||
256 | if (__addr != addr) | ||
257 | return __addr; | ||
258 | |||
259 | return __recover_probed_insn(buf, addr); | ||
234 | } | 260 | } |
235 | 261 | ||
236 | /* Check if paddr is at an instruction boundary */ | 262 | /* Check if paddr is at an instruction boundary */ |
237 | static int __kprobes can_probe(unsigned long paddr) | 263 | static int __kprobes can_probe(unsigned long paddr) |
238 | { | 264 | { |
239 | int ret; | 265 | unsigned long addr, __addr, offset = 0; |
240 | unsigned long addr, offset = 0; | ||
241 | struct insn insn; | 266 | struct insn insn; |
242 | kprobe_opcode_t buf[MAX_INSN_SIZE]; | 267 | kprobe_opcode_t buf[MAX_INSN_SIZE]; |
243 | 268 | ||
@@ -247,26 +272,24 @@ static int __kprobes can_probe(unsigned long paddr) | |||
247 | /* Decode instructions */ | 272 | /* Decode instructions */ |
248 | addr = paddr - offset; | 273 | addr = paddr - offset; |
249 | while (addr < paddr) { | 274 | while (addr < paddr) { |
250 | kernel_insn_init(&insn, (void *)addr); | ||
251 | insn_get_opcode(&insn); | ||
252 | |||
253 | /* | 275 | /* |
254 | * Check if the instruction has been modified by another | 276 | * Check if the instruction has been modified by another |
255 | * kprobe, in which case we replace the breakpoint by the | 277 | * kprobe, in which case we replace the breakpoint by the |
256 | * original instruction in our buffer. | 278 | * original instruction in our buffer. |
279 | * Also, jump optimization will change the breakpoint to | ||
280 | * relative-jump. Since the relative-jump itself is | ||
281 | * normally used, we just go through if there is no kprobe. | ||
257 | */ | 282 | */ |
258 | if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) { | 283 | __addr = recover_probed_instruction(buf, addr); |
259 | ret = recover_probed_instruction(buf, addr); | 284 | kernel_insn_init(&insn, (void *)__addr); |
260 | if (ret) | ||
261 | /* | ||
262 | * Another debugging subsystem might insert | ||
263 | * this breakpoint. In that case, we can't | ||
264 | * recover it. | ||
265 | */ | ||
266 | return 0; | ||
267 | kernel_insn_init(&insn, buf); | ||
268 | } | ||
269 | insn_get_length(&insn); | 285 | insn_get_length(&insn); |
286 | |||
287 | /* | ||
288 | * Another debugging subsystem might insert this breakpoint. | ||
289 | * In that case, we can't recover it. | ||
290 | */ | ||
291 | if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) | ||
292 | return 0; | ||
270 | addr += insn.length; | 293 | addr += insn.length; |
271 | } | 294 | } |
272 | 295 | ||
@@ -299,24 +322,16 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) | |||
299 | * If not, return null. | 322 | * If not, return null. |
300 | * Only applicable to 64-bit x86. | 323 | * Only applicable to 64-bit x86. |
301 | */ | 324 | */ |
302 | static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover) | 325 | int __kprobes __copy_instruction(u8 *dest, u8 *src) |
303 | { | 326 | { |
304 | struct insn insn; | 327 | struct insn insn; |
305 | int ret; | ||
306 | kprobe_opcode_t buf[MAX_INSN_SIZE]; | 328 | kprobe_opcode_t buf[MAX_INSN_SIZE]; |
307 | 329 | ||
308 | kernel_insn_init(&insn, src); | 330 | kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, (unsigned long)src)); |
309 | if (recover) { | ||
310 | insn_get_opcode(&insn); | ||
311 | if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) { | ||
312 | ret = recover_probed_instruction(buf, | ||
313 | (unsigned long)src); | ||
314 | if (ret) | ||
315 | return 0; | ||
316 | kernel_insn_init(&insn, buf); | ||
317 | } | ||
318 | } | ||
319 | insn_get_length(&insn); | 331 | insn_get_length(&insn); |
332 | /* Another subsystem puts a breakpoint, failed to recover */ | ||
333 | if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) | ||
334 | return 0; | ||
320 | memcpy(dest, insn.kaddr, insn.length); | 335 | memcpy(dest, insn.kaddr, insn.length); |
321 | 336 | ||
322 | #ifdef CONFIG_X86_64 | 337 | #ifdef CONFIG_X86_64 |
@@ -337,8 +352,7 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover) | |||
337 | * extension of the original signed 32-bit displacement would | 352 | * extension of the original signed 32-bit displacement would |
338 | * have given. | 353 | * have given. |
339 | */ | 354 | */ |
340 | newdisp = (u8 *) src + (s64) insn.displacement.value - | 355 | newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest; |
341 | (u8 *) dest; | ||
342 | BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ | 356 | BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ |
343 | disp = (u8 *) dest + insn_offset_displacement(&insn); | 357 | disp = (u8 *) dest + insn_offset_displacement(&insn); |
344 | *(s32 *) disp = (s32) newdisp; | 358 | *(s32 *) disp = (s32) newdisp; |
@@ -349,18 +363,20 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover) | |||
349 | 363 | ||
350 | static void __kprobes arch_copy_kprobe(struct kprobe *p) | 364 | static void __kprobes arch_copy_kprobe(struct kprobe *p) |
351 | { | 365 | { |
366 | /* Copy an instruction with recovering if other optprobe modifies it.*/ | ||
367 | __copy_instruction(p->ainsn.insn, p->addr); | ||
368 | |||
352 | /* | 369 | /* |
353 | * Copy an instruction without recovering int3, because it will be | 370 | * __copy_instruction can modify the displacement of the instruction, |
354 | * put by another subsystem. | 371 | * but it doesn't affect boostable check. |
355 | */ | 372 | */ |
356 | __copy_instruction(p->ainsn.insn, p->addr, 0); | 373 | if (can_boost(p->ainsn.insn)) |
357 | |||
358 | if (can_boost(p->addr)) | ||
359 | p->ainsn.boostable = 0; | 374 | p->ainsn.boostable = 0; |
360 | else | 375 | else |
361 | p->ainsn.boostable = -1; | 376 | p->ainsn.boostable = -1; |
362 | 377 | ||
363 | p->opcode = *p->addr; | 378 | /* Also, displacement change doesn't affect the first byte */ |
379 | p->opcode = p->ainsn.insn[0]; | ||
364 | } | 380 | } |
365 | 381 | ||
366 | int __kprobes arch_prepare_kprobe(struct kprobe *p) | 382 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
@@ -442,8 +458,8 @@ static void __kprobes restore_btf(void) | |||
442 | } | 458 | } |
443 | } | 459 | } |
444 | 460 | ||
445 | void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, | 461 | void __kprobes |
446 | struct pt_regs *regs) | 462 | arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) |
447 | { | 463 | { |
448 | unsigned long *sara = stack_addr(regs); | 464 | unsigned long *sara = stack_addr(regs); |
449 | 465 | ||
@@ -453,16 +469,8 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, | |||
453 | *sara = (unsigned long) &kretprobe_trampoline; | 469 | *sara = (unsigned long) &kretprobe_trampoline; |
454 | } | 470 | } |
455 | 471 | ||
456 | #ifdef CONFIG_OPTPROBES | 472 | static void __kprobes |
457 | static int __kprobes setup_detour_execution(struct kprobe *p, | 473 | setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter) |
458 | struct pt_regs *regs, | ||
459 | int reenter); | ||
460 | #else | ||
461 | #define setup_detour_execution(p, regs, reenter) (0) | ||
462 | #endif | ||
463 | |||
464 | static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, | ||
465 | struct kprobe_ctlblk *kcb, int reenter) | ||
466 | { | 474 | { |
467 | if (setup_detour_execution(p, regs, reenter)) | 475 | if (setup_detour_execution(p, regs, reenter)) |
468 | return; | 476 | return; |
@@ -504,8 +512,8 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, | |||
504 | * within the handler. We save the original kprobes variables and just single | 512 | * within the handler. We save the original kprobes variables and just single |
505 | * step on the instruction of the new probe without calling any user handlers. | 513 | * step on the instruction of the new probe without calling any user handlers. |
506 | */ | 514 | */ |
507 | static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs, | 515 | static int __kprobes |
508 | struct kprobe_ctlblk *kcb) | 516 | reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) |
509 | { | 517 | { |
510 | switch (kcb->kprobe_status) { | 518 | switch (kcb->kprobe_status) { |
511 | case KPROBE_HIT_SSDONE: | 519 | case KPROBE_HIT_SSDONE: |
@@ -600,69 +608,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
600 | return 0; | 608 | return 0; |
601 | } | 609 | } |
602 | 610 | ||
603 | #ifdef CONFIG_X86_64 | ||
604 | #define SAVE_REGS_STRING \ | ||
605 | /* Skip cs, ip, orig_ax. */ \ | ||
606 | " subq $24, %rsp\n" \ | ||
607 | " pushq %rdi\n" \ | ||
608 | " pushq %rsi\n" \ | ||
609 | " pushq %rdx\n" \ | ||
610 | " pushq %rcx\n" \ | ||
611 | " pushq %rax\n" \ | ||
612 | " pushq %r8\n" \ | ||
613 | " pushq %r9\n" \ | ||
614 | " pushq %r10\n" \ | ||
615 | " pushq %r11\n" \ | ||
616 | " pushq %rbx\n" \ | ||
617 | " pushq %rbp\n" \ | ||
618 | " pushq %r12\n" \ | ||
619 | " pushq %r13\n" \ | ||
620 | " pushq %r14\n" \ | ||
621 | " pushq %r15\n" | ||
622 | #define RESTORE_REGS_STRING \ | ||
623 | " popq %r15\n" \ | ||
624 | " popq %r14\n" \ | ||
625 | " popq %r13\n" \ | ||
626 | " popq %r12\n" \ | ||
627 | " popq %rbp\n" \ | ||
628 | " popq %rbx\n" \ | ||
629 | " popq %r11\n" \ | ||
630 | " popq %r10\n" \ | ||
631 | " popq %r9\n" \ | ||
632 | " popq %r8\n" \ | ||
633 | " popq %rax\n" \ | ||
634 | " popq %rcx\n" \ | ||
635 | " popq %rdx\n" \ | ||
636 | " popq %rsi\n" \ | ||
637 | " popq %rdi\n" \ | ||
638 | /* Skip orig_ax, ip, cs */ \ | ||
639 | " addq $24, %rsp\n" | ||
640 | #else | ||
641 | #define SAVE_REGS_STRING \ | ||
642 | /* Skip cs, ip, orig_ax and gs. */ \ | ||
643 | " subl $16, %esp\n" \ | ||
644 | " pushl %fs\n" \ | ||
645 | " pushl %es\n" \ | ||
646 | " pushl %ds\n" \ | ||
647 | " pushl %eax\n" \ | ||
648 | " pushl %ebp\n" \ | ||
649 | " pushl %edi\n" \ | ||
650 | " pushl %esi\n" \ | ||
651 | " pushl %edx\n" \ | ||
652 | " pushl %ecx\n" \ | ||
653 | " pushl %ebx\n" | ||
654 | #define RESTORE_REGS_STRING \ | ||
655 | " popl %ebx\n" \ | ||
656 | " popl %ecx\n" \ | ||
657 | " popl %edx\n" \ | ||
658 | " popl %esi\n" \ | ||
659 | " popl %edi\n" \ | ||
660 | " popl %ebp\n" \ | ||
661 | " popl %eax\n" \ | ||
662 | /* Skip ds, es, fs, gs, orig_ax, and ip. Note: don't pop cs here*/\ | ||
663 | " addl $24, %esp\n" | ||
664 | #endif | ||
665 | |||
666 | /* | 611 | /* |
667 | * When a retprobed function returns, this code saves registers and | 612 | * When a retprobed function returns, this code saves registers and |
668 | * calls trampoline_handler() runs, which calls the kretprobe's handler. | 613 | * calls trampoline_handler() runs, which calls the kretprobe's handler. |
@@ -816,8 +761,8 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) | |||
816 | * jump instruction after the copied instruction, that jumps to the next | 761 | * jump instruction after the copied instruction, that jumps to the next |
817 | * instruction after the probepoint. | 762 | * instruction after the probepoint. |
818 | */ | 763 | */ |
819 | static void __kprobes resume_execution(struct kprobe *p, | 764 | static void __kprobes |
820 | struct pt_regs *regs, struct kprobe_ctlblk *kcb) | 765 | resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) |
821 | { | 766 | { |
822 | unsigned long *tos = stack_addr(regs); | 767 | unsigned long *tos = stack_addr(regs); |
823 | unsigned long copy_ip = (unsigned long)p->ainsn.insn; | 768 | unsigned long copy_ip = (unsigned long)p->ainsn.insn; |
@@ -996,8 +941,8 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |||
996 | /* | 941 | /* |
997 | * Wrapper routine for handling exceptions. | 942 | * Wrapper routine for handling exceptions. |
998 | */ | 943 | */ |
999 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | 944 | int __kprobes |
1000 | unsigned long val, void *data) | 945 | kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) |
1001 | { | 946 | { |
1002 | struct die_args *args = data; | 947 | struct die_args *args = data; |
1003 | int ret = NOTIFY_DONE; | 948 | int ret = NOTIFY_DONE; |
@@ -1107,466 +1052,9 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
1107 | return 0; | 1052 | return 0; |
1108 | } | 1053 | } |
1109 | 1054 | ||
1110 | |||
1111 | #ifdef CONFIG_OPTPROBES | ||
1112 | |||
1113 | /* Insert a call instruction at address 'from', which calls address 'to'.*/ | ||
1114 | static void __kprobes synthesize_relcall(void *from, void *to) | ||
1115 | { | ||
1116 | __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE); | ||
1117 | } | ||
1118 | |||
1119 | /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */ | ||
1120 | static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, | ||
1121 | unsigned long val) | ||
1122 | { | ||
1123 | #ifdef CONFIG_X86_64 | ||
1124 | *addr++ = 0x48; | ||
1125 | *addr++ = 0xbf; | ||
1126 | #else | ||
1127 | *addr++ = 0xb8; | ||
1128 | #endif | ||
1129 | *(unsigned long *)addr = val; | ||
1130 | } | ||
1131 | |||
1132 | static void __used __kprobes kprobes_optinsn_template_holder(void) | ||
1133 | { | ||
1134 | asm volatile ( | ||
1135 | ".global optprobe_template_entry\n" | ||
1136 | "optprobe_template_entry: \n" | ||
1137 | #ifdef CONFIG_X86_64 | ||
1138 | /* We don't bother saving the ss register */ | ||
1139 | " pushq %rsp\n" | ||
1140 | " pushfq\n" | ||
1141 | SAVE_REGS_STRING | ||
1142 | " movq %rsp, %rsi\n" | ||
1143 | ".global optprobe_template_val\n" | ||
1144 | "optprobe_template_val: \n" | ||
1145 | ASM_NOP5 | ||
1146 | ASM_NOP5 | ||
1147 | ".global optprobe_template_call\n" | ||
1148 | "optprobe_template_call: \n" | ||
1149 | ASM_NOP5 | ||
1150 | /* Move flags to rsp */ | ||
1151 | " movq 144(%rsp), %rdx\n" | ||
1152 | " movq %rdx, 152(%rsp)\n" | ||
1153 | RESTORE_REGS_STRING | ||
1154 | /* Skip flags entry */ | ||
1155 | " addq $8, %rsp\n" | ||
1156 | " popfq\n" | ||
1157 | #else /* CONFIG_X86_32 */ | ||
1158 | " pushf\n" | ||
1159 | SAVE_REGS_STRING | ||
1160 | " movl %esp, %edx\n" | ||
1161 | ".global optprobe_template_val\n" | ||
1162 | "optprobe_template_val: \n" | ||
1163 | ASM_NOP5 | ||
1164 | ".global optprobe_template_call\n" | ||
1165 | "optprobe_template_call: \n" | ||
1166 | ASM_NOP5 | ||
1167 | RESTORE_REGS_STRING | ||
1168 | " addl $4, %esp\n" /* skip cs */ | ||
1169 | " popf\n" | ||
1170 | #endif | ||
1171 | ".global optprobe_template_end\n" | ||
1172 | "optprobe_template_end: \n"); | ||
1173 | } | ||
1174 | |||
1175 | #define TMPL_MOVE_IDX \ | ||
1176 | ((long)&optprobe_template_val - (long)&optprobe_template_entry) | ||
1177 | #define TMPL_CALL_IDX \ | ||
1178 | ((long)&optprobe_template_call - (long)&optprobe_template_entry) | ||
1179 | #define TMPL_END_IDX \ | ||
1180 | ((long)&optprobe_template_end - (long)&optprobe_template_entry) | ||
1181 | |||
1182 | #define INT3_SIZE sizeof(kprobe_opcode_t) | ||
1183 | |||
1184 | /* Optimized kprobe call back function: called from optinsn */ | ||
1185 | static void __kprobes optimized_callback(struct optimized_kprobe *op, | ||
1186 | struct pt_regs *regs) | ||
1187 | { | ||
1188 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
1189 | unsigned long flags; | ||
1190 | |||
1191 | /* This is possible if op is under delayed unoptimizing */ | ||
1192 | if (kprobe_disabled(&op->kp)) | ||
1193 | return; | ||
1194 | |||
1195 | local_irq_save(flags); | ||
1196 | if (kprobe_running()) { | ||
1197 | kprobes_inc_nmissed_count(&op->kp); | ||
1198 | } else { | ||
1199 | /* Save skipped registers */ | ||
1200 | #ifdef CONFIG_X86_64 | ||
1201 | regs->cs = __KERNEL_CS; | ||
1202 | #else | ||
1203 | regs->cs = __KERNEL_CS | get_kernel_rpl(); | ||
1204 | regs->gs = 0; | ||
1205 | #endif | ||
1206 | regs->ip = (unsigned long)op->kp.addr + INT3_SIZE; | ||
1207 | regs->orig_ax = ~0UL; | ||
1208 | |||
1209 | __this_cpu_write(current_kprobe, &op->kp); | ||
1210 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | ||
1211 | opt_pre_handler(&op->kp, regs); | ||
1212 | __this_cpu_write(current_kprobe, NULL); | ||
1213 | } | ||
1214 | local_irq_restore(flags); | ||
1215 | } | ||
1216 | |||
1217 | static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) | ||
1218 | { | ||
1219 | int len = 0, ret; | ||
1220 | |||
1221 | while (len < RELATIVEJUMP_SIZE) { | ||
1222 | ret = __copy_instruction(dest + len, src + len, 1); | ||
1223 | if (!ret || !can_boost(dest + len)) | ||
1224 | return -EINVAL; | ||
1225 | len += ret; | ||
1226 | } | ||
1227 | /* Check whether the address range is reserved */ | ||
1228 | if (ftrace_text_reserved(src, src + len - 1) || | ||
1229 | alternatives_text_reserved(src, src + len - 1) || | ||
1230 | jump_label_text_reserved(src, src + len - 1)) | ||
1231 | return -EBUSY; | ||
1232 | |||
1233 | return len; | ||
1234 | } | ||
1235 | |||
1236 | /* Check whether insn is indirect jump */ | ||
1237 | static int __kprobes insn_is_indirect_jump(struct insn *insn) | ||
1238 | { | ||
1239 | return ((insn->opcode.bytes[0] == 0xff && | ||
1240 | (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ | ||
1241 | insn->opcode.bytes[0] == 0xea); /* Segment based jump */ | ||
1242 | } | ||
1243 | |||
1244 | /* Check whether insn jumps into specified address range */ | ||
1245 | static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) | ||
1246 | { | ||
1247 | unsigned long target = 0; | ||
1248 | |||
1249 | switch (insn->opcode.bytes[0]) { | ||
1250 | case 0xe0: /* loopne */ | ||
1251 | case 0xe1: /* loope */ | ||
1252 | case 0xe2: /* loop */ | ||
1253 | case 0xe3: /* jcxz */ | ||
1254 | case 0xe9: /* near relative jump */ | ||
1255 | case 0xeb: /* short relative jump */ | ||
1256 | break; | ||
1257 | case 0x0f: | ||
1258 | if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */ | ||
1259 | break; | ||
1260 | return 0; | ||
1261 | default: | ||
1262 | if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */ | ||
1263 | break; | ||
1264 | return 0; | ||
1265 | } | ||
1266 | target = (unsigned long)insn->next_byte + insn->immediate.value; | ||
1267 | |||
1268 | return (start <= target && target <= start + len); | ||
1269 | } | ||
1270 | |||
1271 | /* Decode whole function to ensure any instructions don't jump into target */ | ||
1272 | static int __kprobes can_optimize(unsigned long paddr) | ||
1273 | { | ||
1274 | int ret; | ||
1275 | unsigned long addr, size = 0, offset = 0; | ||
1276 | struct insn insn; | ||
1277 | kprobe_opcode_t buf[MAX_INSN_SIZE]; | ||
1278 | |||
1279 | /* Lookup symbol including addr */ | ||
1280 | if (!kallsyms_lookup_size_offset(paddr, &size, &offset)) | ||
1281 | return 0; | ||
1282 | |||
1283 | /* | ||
1284 | * Do not optimize in the entry code due to the unstable | ||
1285 | * stack handling. | ||
1286 | */ | ||
1287 | if ((paddr >= (unsigned long )__entry_text_start) && | ||
1288 | (paddr < (unsigned long )__entry_text_end)) | ||
1289 | return 0; | ||
1290 | |||
1291 | /* Check there is enough space for a relative jump. */ | ||
1292 | if (size - offset < RELATIVEJUMP_SIZE) | ||
1293 | return 0; | ||
1294 | |||
1295 | /* Decode instructions */ | ||
1296 | addr = paddr - offset; | ||
1297 | while (addr < paddr - offset + size) { /* Decode until function end */ | ||
1298 | if (search_exception_tables(addr)) | ||
1299 | /* | ||
1300 | * Since some fixup code will jumps into this function, | ||
1301 | * we can't optimize kprobe in this function. | ||
1302 | */ | ||
1303 | return 0; | ||
1304 | kernel_insn_init(&insn, (void *)addr); | ||
1305 | insn_get_opcode(&insn); | ||
1306 | if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) { | ||
1307 | ret = recover_probed_instruction(buf, addr); | ||
1308 | if (ret) | ||
1309 | return 0; | ||
1310 | kernel_insn_init(&insn, buf); | ||
1311 | } | ||
1312 | insn_get_length(&insn); | ||
1313 | /* Recover address */ | ||
1314 | insn.kaddr = (void *)addr; | ||
1315 | insn.next_byte = (void *)(addr + insn.length); | ||
1316 | /* Check any instructions don't jump into target */ | ||
1317 | if (insn_is_indirect_jump(&insn) || | ||
1318 | insn_jump_into_range(&insn, paddr + INT3_SIZE, | ||
1319 | RELATIVE_ADDR_SIZE)) | ||
1320 | return 0; | ||
1321 | addr += insn.length; | ||
1322 | } | ||
1323 | |||
1324 | return 1; | ||
1325 | } | ||
1326 | |||
1327 | /* Check optimized_kprobe can actually be optimized. */ | ||
1328 | int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op) | ||
1329 | { | ||
1330 | int i; | ||
1331 | struct kprobe *p; | ||
1332 | |||
1333 | for (i = 1; i < op->optinsn.size; i++) { | ||
1334 | p = get_kprobe(op->kp.addr + i); | ||
1335 | if (p && !kprobe_disabled(p)) | ||
1336 | return -EEXIST; | ||
1337 | } | ||
1338 | |||
1339 | return 0; | ||
1340 | } | ||
1341 | |||
1342 | /* Check the addr is within the optimized instructions. */ | ||
1343 | int __kprobes arch_within_optimized_kprobe(struct optimized_kprobe *op, | ||
1344 | unsigned long addr) | ||
1345 | { | ||
1346 | return ((unsigned long)op->kp.addr <= addr && | ||
1347 | (unsigned long)op->kp.addr + op->optinsn.size > addr); | ||
1348 | } | ||
1349 | |||
1350 | /* Free optimized instruction slot */ | ||
1351 | static __kprobes | ||
1352 | void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) | ||
1353 | { | ||
1354 | if (op->optinsn.insn) { | ||
1355 | free_optinsn_slot(op->optinsn.insn, dirty); | ||
1356 | op->optinsn.insn = NULL; | ||
1357 | op->optinsn.size = 0; | ||
1358 | } | ||
1359 | } | ||
1360 | |||
1361 | void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op) | ||
1362 | { | ||
1363 | __arch_remove_optimized_kprobe(op, 1); | ||
1364 | } | ||
1365 | |||
1366 | /* | ||
1367 | * Copy replacing target instructions | ||
1368 | * Target instructions MUST be relocatable (checked inside) | ||
1369 | */ | ||
1370 | int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) | ||
1371 | { | ||
1372 | u8 *buf; | ||
1373 | int ret; | ||
1374 | long rel; | ||
1375 | |||
1376 | if (!can_optimize((unsigned long)op->kp.addr)) | ||
1377 | return -EILSEQ; | ||
1378 | |||
1379 | op->optinsn.insn = get_optinsn_slot(); | ||
1380 | if (!op->optinsn.insn) | ||
1381 | return -ENOMEM; | ||
1382 | |||
1383 | /* | ||
1384 | * Verify if the address gap is in 2GB range, because this uses | ||
1385 | * a relative jump. | ||
1386 | */ | ||
1387 | rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE; | ||
1388 | if (abs(rel) > 0x7fffffff) | ||
1389 | return -ERANGE; | ||
1390 | |||
1391 | buf = (u8 *)op->optinsn.insn; | ||
1392 | |||
1393 | /* Copy instructions into the out-of-line buffer */ | ||
1394 | ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr); | ||
1395 | if (ret < 0) { | ||
1396 | __arch_remove_optimized_kprobe(op, 0); | ||
1397 | return ret; | ||
1398 | } | ||
1399 | op->optinsn.size = ret; | ||
1400 | |||
1401 | /* Copy arch-dep-instance from template */ | ||
1402 | memcpy(buf, &optprobe_template_entry, TMPL_END_IDX); | ||
1403 | |||
1404 | /* Set probe information */ | ||
1405 | synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op); | ||
1406 | |||
1407 | /* Set probe function call */ | ||
1408 | synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback); | ||
1409 | |||
1410 | /* Set returning jmp instruction at the tail of out-of-line buffer */ | ||
1411 | synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size, | ||
1412 | (u8 *)op->kp.addr + op->optinsn.size); | ||
1413 | |||
1414 | flush_icache_range((unsigned long) buf, | ||
1415 | (unsigned long) buf + TMPL_END_IDX + | ||
1416 | op->optinsn.size + RELATIVEJUMP_SIZE); | ||
1417 | return 0; | ||
1418 | } | ||
1419 | |||
1420 | #define MAX_OPTIMIZE_PROBES 256 | ||
1421 | static struct text_poke_param *jump_poke_params; | ||
1422 | static struct jump_poke_buffer { | ||
1423 | u8 buf[RELATIVEJUMP_SIZE]; | ||
1424 | } *jump_poke_bufs; | ||
1425 | |||
1426 | static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm, | ||
1427 | u8 *insn_buf, | ||
1428 | struct optimized_kprobe *op) | ||
1429 | { | ||
1430 | s32 rel = (s32)((long)op->optinsn.insn - | ||
1431 | ((long)op->kp.addr + RELATIVEJUMP_SIZE)); | ||
1432 | |||
1433 | /* Backup instructions which will be replaced by jump address */ | ||
1434 | memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, | ||
1435 | RELATIVE_ADDR_SIZE); | ||
1436 | |||
1437 | insn_buf[0] = RELATIVEJUMP_OPCODE; | ||
1438 | *(s32 *)(&insn_buf[1]) = rel; | ||
1439 | |||
1440 | tprm->addr = op->kp.addr; | ||
1441 | tprm->opcode = insn_buf; | ||
1442 | tprm->len = RELATIVEJUMP_SIZE; | ||
1443 | } | ||
1444 | |||
1445 | /* | ||
1446 | * Replace breakpoints (int3) with relative jumps. | ||
1447 | * Caller must call with locking kprobe_mutex and text_mutex. | ||
1448 | */ | ||
1449 | void __kprobes arch_optimize_kprobes(struct list_head *oplist) | ||
1450 | { | ||
1451 | struct optimized_kprobe *op, *tmp; | ||
1452 | int c = 0; | ||
1453 | |||
1454 | list_for_each_entry_safe(op, tmp, oplist, list) { | ||
1455 | WARN_ON(kprobe_disabled(&op->kp)); | ||
1456 | /* Setup param */ | ||
1457 | setup_optimize_kprobe(&jump_poke_params[c], | ||
1458 | jump_poke_bufs[c].buf, op); | ||
1459 | list_del_init(&op->list); | ||
1460 | if (++c >= MAX_OPTIMIZE_PROBES) | ||
1461 | break; | ||
1462 | } | ||
1463 | |||
1464 | /* | ||
1465 | * text_poke_smp doesn't support NMI/MCE code modifying. | ||
1466 | * However, since kprobes itself also doesn't support NMI/MCE | ||
1467 | * code probing, it's not a problem. | ||
1468 | */ | ||
1469 | text_poke_smp_batch(jump_poke_params, c); | ||
1470 | } | ||
1471 | |||
1472 | static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm, | ||
1473 | u8 *insn_buf, | ||
1474 | struct optimized_kprobe *op) | ||
1475 | { | ||
1476 | /* Set int3 to first byte for kprobes */ | ||
1477 | insn_buf[0] = BREAKPOINT_INSTRUCTION; | ||
1478 | memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); | ||
1479 | |||
1480 | tprm->addr = op->kp.addr; | ||
1481 | tprm->opcode = insn_buf; | ||
1482 | tprm->len = RELATIVEJUMP_SIZE; | ||
1483 | } | ||
1484 | |||
1485 | /* | ||
1486 | * Recover original instructions and breakpoints from relative jumps. | ||
1487 | * Caller must call with locking kprobe_mutex. | ||
1488 | */ | ||
1489 | extern void arch_unoptimize_kprobes(struct list_head *oplist, | ||
1490 | struct list_head *done_list) | ||
1491 | { | ||
1492 | struct optimized_kprobe *op, *tmp; | ||
1493 | int c = 0; | ||
1494 | |||
1495 | list_for_each_entry_safe(op, tmp, oplist, list) { | ||
1496 | /* Setup param */ | ||
1497 | setup_unoptimize_kprobe(&jump_poke_params[c], | ||
1498 | jump_poke_bufs[c].buf, op); | ||
1499 | list_move(&op->list, done_list); | ||
1500 | if (++c >= MAX_OPTIMIZE_PROBES) | ||
1501 | break; | ||
1502 | } | ||
1503 | |||
1504 | /* | ||
1505 | * text_poke_smp doesn't support NMI/MCE code modifying. | ||
1506 | * However, since kprobes itself also doesn't support NMI/MCE | ||
1507 | * code probing, it's not a problem. | ||
1508 | */ | ||
1509 | text_poke_smp_batch(jump_poke_params, c); | ||
1510 | } | ||
1511 | |||
1512 | /* Replace a relative jump with a breakpoint (int3). */ | ||
1513 | void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op) | ||
1514 | { | ||
1515 | u8 buf[RELATIVEJUMP_SIZE]; | ||
1516 | |||
1517 | /* Set int3 to first byte for kprobes */ | ||
1518 | buf[0] = BREAKPOINT_INSTRUCTION; | ||
1519 | memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); | ||
1520 | text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE); | ||
1521 | } | ||
1522 | |||
1523 | static int __kprobes setup_detour_execution(struct kprobe *p, | ||
1524 | struct pt_regs *regs, | ||
1525 | int reenter) | ||
1526 | { | ||
1527 | struct optimized_kprobe *op; | ||
1528 | |||
1529 | if (p->flags & KPROBE_FLAG_OPTIMIZED) { | ||
1530 | /* This kprobe is really able to run optimized path. */ | ||
1531 | op = container_of(p, struct optimized_kprobe, kp); | ||
1532 | /* Detour through copied instructions */ | ||
1533 | regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX; | ||
1534 | if (!reenter) | ||
1535 | reset_current_kprobe(); | ||
1536 | preempt_enable_no_resched(); | ||
1537 | return 1; | ||
1538 | } | ||
1539 | return 0; | ||
1540 | } | ||
1541 | |||
1542 | static int __kprobes init_poke_params(void) | ||
1543 | { | ||
1544 | /* Allocate code buffer and parameter array */ | ||
1545 | jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) * | ||
1546 | MAX_OPTIMIZE_PROBES, GFP_KERNEL); | ||
1547 | if (!jump_poke_bufs) | ||
1548 | return -ENOMEM; | ||
1549 | |||
1550 | jump_poke_params = kmalloc(sizeof(struct text_poke_param) * | ||
1551 | MAX_OPTIMIZE_PROBES, GFP_KERNEL); | ||
1552 | if (!jump_poke_params) { | ||
1553 | kfree(jump_poke_bufs); | ||
1554 | jump_poke_bufs = NULL; | ||
1555 | return -ENOMEM; | ||
1556 | } | ||
1557 | |||
1558 | return 0; | ||
1559 | } | ||
1560 | #else /* !CONFIG_OPTPROBES */ | ||
1561 | static int __kprobes init_poke_params(void) | ||
1562 | { | ||
1563 | return 0; | ||
1564 | } | ||
1565 | #endif | ||
1566 | |||
1567 | int __init arch_init_kprobes(void) | 1055 | int __init arch_init_kprobes(void) |
1568 | { | 1056 | { |
1569 | return init_poke_params(); | 1057 | return arch_init_optprobes(); |
1570 | } | 1058 | } |
1571 | 1059 | ||
1572 | int __kprobes arch_trampoline_kprobe(struct kprobe *p) | 1060 | int __kprobes arch_trampoline_kprobe(struct kprobe *p) |
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index f0c6fd6f176b..694d801bf606 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -438,9 +438,9 @@ void __init kvm_guest_init(void) | |||
438 | static __init int activate_jump_labels(void) | 438 | static __init int activate_jump_labels(void) |
439 | { | 439 | { |
440 | if (has_steal_clock) { | 440 | if (has_steal_clock) { |
441 | jump_label_inc(¶virt_steal_enabled); | 441 | static_key_slow_inc(¶virt_steal_enabled); |
442 | if (steal_acc) | 442 | if (steal_acc) |
443 | jump_label_inc(¶virt_steal_rq_enabled); | 443 | static_key_slow_inc(¶virt_steal_rq_enabled); |
444 | } | 444 | } |
445 | 445 | ||
446 | return 0; | 446 | return 0; |
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index fda91c307104..87a0f8688301 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c | |||
@@ -86,6 +86,7 @@ | |||
86 | 86 | ||
87 | #include <asm/microcode.h> | 87 | #include <asm/microcode.h> |
88 | #include <asm/processor.h> | 88 | #include <asm/processor.h> |
89 | #include <asm/cpu_device_id.h> | ||
89 | 90 | ||
90 | MODULE_DESCRIPTION("Microcode Update Driver"); | 91 | MODULE_DESCRIPTION("Microcode Update Driver"); |
91 | MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>"); | 92 | MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>"); |
@@ -504,6 +505,20 @@ static struct notifier_block __refdata mc_cpu_notifier = { | |||
504 | .notifier_call = mc_cpu_callback, | 505 | .notifier_call = mc_cpu_callback, |
505 | }; | 506 | }; |
506 | 507 | ||
508 | #ifdef MODULE | ||
509 | /* Autoload on Intel and AMD systems */ | ||
510 | static const struct x86_cpu_id microcode_id[] = { | ||
511 | #ifdef CONFIG_MICROCODE_INTEL | ||
512 | { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, }, | ||
513 | #endif | ||
514 | #ifdef CONFIG_MICROCODE_AMD | ||
515 | { X86_VENDOR_AMD, X86_FAMILY_ANY, X86_MODEL_ANY, }, | ||
516 | #endif | ||
517 | {} | ||
518 | }; | ||
519 | MODULE_DEVICE_TABLE(x86cpu, microcode_id); | ||
520 | #endif | ||
521 | |||
507 | static int __init microcode_init(void) | 522 | static int __init microcode_init(void) |
508 | { | 523 | { |
509 | struct cpuinfo_x86 *c = &cpu_data(0); | 524 | struct cpuinfo_x86 *c = &cpu_data(0); |
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index d90272e6bc40..ada2f99388dd 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -202,8 +202,8 @@ static void native_flush_tlb_single(unsigned long addr) | |||
202 | __native_flush_tlb_single(addr); | 202 | __native_flush_tlb_single(addr); |
203 | } | 203 | } |
204 | 204 | ||
205 | struct jump_label_key paravirt_steal_enabled; | 205 | struct static_key paravirt_steal_enabled; |
206 | struct jump_label_key paravirt_steal_rq_enabled; | 206 | struct static_key paravirt_steal_rq_enabled; |
207 | 207 | ||
208 | static u64 native_steal_clock(int cpu) | 208 | static u64 native_steal_clock(int cpu) |
209 | { | 209 | { |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 15763af7bfe3..44eefde92109 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -377,8 +377,8 @@ static inline int hlt_use_halt(void) | |||
377 | void default_idle(void) | 377 | void default_idle(void) |
378 | { | 378 | { |
379 | if (hlt_use_halt()) { | 379 | if (hlt_use_halt()) { |
380 | trace_power_start(POWER_CSTATE, 1, smp_processor_id()); | 380 | trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id()); |
381 | trace_cpu_idle(1, smp_processor_id()); | 381 | trace_cpu_idle_rcuidle(1, smp_processor_id()); |
382 | current_thread_info()->status &= ~TS_POLLING; | 382 | current_thread_info()->status &= ~TS_POLLING; |
383 | /* | 383 | /* |
384 | * TS_POLLING-cleared state must be visible before we | 384 | * TS_POLLING-cleared state must be visible before we |
@@ -391,8 +391,8 @@ void default_idle(void) | |||
391 | else | 391 | else |
392 | local_irq_enable(); | 392 | local_irq_enable(); |
393 | current_thread_info()->status |= TS_POLLING; | 393 | current_thread_info()->status |= TS_POLLING; |
394 | trace_power_end(smp_processor_id()); | 394 | trace_power_end_rcuidle(smp_processor_id()); |
395 | trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); | 395 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); |
396 | } else { | 396 | } else { |
397 | local_irq_enable(); | 397 | local_irq_enable(); |
398 | /* loop is done by the caller */ | 398 | /* loop is done by the caller */ |
@@ -450,8 +450,8 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait); | |||
450 | static void mwait_idle(void) | 450 | static void mwait_idle(void) |
451 | { | 451 | { |
452 | if (!need_resched()) { | 452 | if (!need_resched()) { |
453 | trace_power_start(POWER_CSTATE, 1, smp_processor_id()); | 453 | trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id()); |
454 | trace_cpu_idle(1, smp_processor_id()); | 454 | trace_cpu_idle_rcuidle(1, smp_processor_id()); |
455 | if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) | 455 | if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) |
456 | clflush((void *)¤t_thread_info()->flags); | 456 | clflush((void *)¤t_thread_info()->flags); |
457 | 457 | ||
@@ -461,8 +461,8 @@ static void mwait_idle(void) | |||
461 | __sti_mwait(0, 0); | 461 | __sti_mwait(0, 0); |
462 | else | 462 | else |
463 | local_irq_enable(); | 463 | local_irq_enable(); |
464 | trace_power_end(smp_processor_id()); | 464 | trace_power_end_rcuidle(smp_processor_id()); |
465 | trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); | 465 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); |
466 | } else | 466 | } else |
467 | local_irq_enable(); | 467 | local_irq_enable(); |
468 | } | 468 | } |
@@ -474,13 +474,13 @@ static void mwait_idle(void) | |||
474 | */ | 474 | */ |
475 | static void poll_idle(void) | 475 | static void poll_idle(void) |
476 | { | 476 | { |
477 | trace_power_start(POWER_CSTATE, 0, smp_processor_id()); | 477 | trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id()); |
478 | trace_cpu_idle(0, smp_processor_id()); | 478 | trace_cpu_idle_rcuidle(0, smp_processor_id()); |
479 | local_irq_enable(); | 479 | local_irq_enable(); |
480 | while (!need_resched()) | 480 | while (!need_resched()) |
481 | cpu_relax(); | 481 | cpu_relax(); |
482 | trace_power_end(smp_processor_id()); | 482 | trace_power_end_rcuidle(smp_processor_id()); |
483 | trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); | 483 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); |
484 | } | 484 | } |
485 | 485 | ||
486 | /* | 486 | /* |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index c08d1ff12b7c..49888fefe794 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -119,9 +119,7 @@ void cpu_idle(void) | |||
119 | } | 119 | } |
120 | rcu_idle_exit(); | 120 | rcu_idle_exit(); |
121 | tick_nohz_idle_exit(); | 121 | tick_nohz_idle_exit(); |
122 | preempt_enable_no_resched(); | 122 | schedule_preempt_disabled(); |
123 | schedule(); | ||
124 | preempt_disable(); | ||
125 | } | 123 | } |
126 | } | 124 | } |
127 | 125 | ||
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index cfa5c90c01db..e34257c70c28 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -156,9 +156,7 @@ void cpu_idle(void) | |||
156 | } | 156 | } |
157 | 157 | ||
158 | tick_nohz_idle_exit(); | 158 | tick_nohz_idle_exit(); |
159 | preempt_enable_no_resched(); | 159 | schedule_preempt_disabled(); |
160 | schedule(); | ||
161 | preempt_disable(); | ||
162 | } | 160 | } |
163 | } | 161 | } |
164 | 162 | ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 66d250c00d11..58f78165d308 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -291,19 +291,6 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
291 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; | 291 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; |
292 | x86_platform.nmi_init(); | 292 | x86_platform.nmi_init(); |
293 | 293 | ||
294 | /* | ||
295 | * Wait until the cpu which brought this one up marked it | ||
296 | * online before enabling interrupts. If we don't do that then | ||
297 | * we can end up waking up the softirq thread before this cpu | ||
298 | * reached the active state, which makes the scheduler unhappy | ||
299 | * and schedule the softirq thread on the wrong cpu. This is | ||
300 | * only observable with forced threaded interrupts, but in | ||
301 | * theory it could also happen w/o them. It's just way harder | ||
302 | * to achieve. | ||
303 | */ | ||
304 | while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask)) | ||
305 | cpu_relax(); | ||
306 | |||
307 | /* enable local interrupts */ | 294 | /* enable local interrupts */ |
308 | local_irq_enable(); | 295 | local_irq_enable(); |
309 | 296 | ||
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index dd5fbf4101fc..c6eba2b42673 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c | |||
@@ -57,9 +57,6 @@ EXPORT_SYMBOL(profile_pc); | |||
57 | */ | 57 | */ |
58 | static irqreturn_t timer_interrupt(int irq, void *dev_id) | 58 | static irqreturn_t timer_interrupt(int irq, void *dev_id) |
59 | { | 59 | { |
60 | /* Keep nmi watchdog up to date */ | ||
61 | inc_irq_stat(irq0_irqs); | ||
62 | |||
63 | global_clock_event->event_handler(global_clock_event); | 60 | global_clock_event->event_handler(global_clock_event); |
64 | 61 | ||
65 | /* MCA bus quirk: Acknowledge irq0 by setting bit 7 in port 0x61 */ | 62 | /* MCA bus quirk: Acknowledge irq0 by setting bit 7 in port 0x61 */ |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index a62c201c97ec..183c5925a9fe 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -620,7 +620,8 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) | |||
620 | 620 | ||
621 | if (cpu_khz) { | 621 | if (cpu_khz) { |
622 | *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; | 622 | *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; |
623 | *offset = ns_now - (tsc_now * *scale >> CYC2NS_SCALE_FACTOR); | 623 | *offset = ns_now - mult_frac(tsc_now, *scale, |
624 | (1UL << CYC2NS_SCALE_FACTOR)); | ||
624 | } | 625 | } |
625 | 626 | ||
626 | sched_clock_idle_wakeup_event(0); | 627 | sched_clock_idle_wakeup_event(0); |
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 9eba29b46cb7..fc25e60a5884 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c | |||
@@ -42,7 +42,7 @@ static __cpuinitdata int nr_warps; | |||
42 | /* | 42 | /* |
43 | * TSC-warp measurement loop running on both CPUs: | 43 | * TSC-warp measurement loop running on both CPUs: |
44 | */ | 44 | */ |
45 | static __cpuinit void check_tsc_warp(void) | 45 | static __cpuinit void check_tsc_warp(unsigned int timeout) |
46 | { | 46 | { |
47 | cycles_t start, now, prev, end; | 47 | cycles_t start, now, prev, end; |
48 | int i; | 48 | int i; |
@@ -51,9 +51,9 @@ static __cpuinit void check_tsc_warp(void) | |||
51 | start = get_cycles(); | 51 | start = get_cycles(); |
52 | rdtsc_barrier(); | 52 | rdtsc_barrier(); |
53 | /* | 53 | /* |
54 | * The measurement runs for 20 msecs: | 54 | * The measurement runs for 'timeout' msecs: |
55 | */ | 55 | */ |
56 | end = start + tsc_khz * 20ULL; | 56 | end = start + (cycles_t) tsc_khz * timeout; |
57 | now = start; | 57 | now = start; |
58 | 58 | ||
59 | for (i = 0; ; i++) { | 59 | for (i = 0; ; i++) { |
@@ -99,6 +99,25 @@ static __cpuinit void check_tsc_warp(void) | |||
99 | } | 99 | } |
100 | 100 | ||
101 | /* | 101 | /* |
102 | * If the target CPU coming online doesn't have any of its core-siblings | ||
103 | * online, a timeout of 20msec will be used for the TSC-warp measurement | ||
104 | * loop. Otherwise a smaller timeout of 2msec will be used, as we have some | ||
105 | * information about this socket already (and this information grows as we | ||
106 | * have more and more logical-siblings in that socket). | ||
107 | * | ||
108 | * Ideally we should be able to skip the TSC sync check on the other | ||
109 | * core-siblings, if the first logical CPU in a socket passed the sync test. | ||
110 | * But as the TSC is per-logical CPU and can potentially be modified wrongly | ||
111 | * by the bios, TSC sync test for smaller duration should be able | ||
112 | * to catch such errors. Also this will catch the condition where all the | ||
113 | * cores in the socket doesn't get reset at the same time. | ||
114 | */ | ||
115 | static inline unsigned int loop_timeout(int cpu) | ||
116 | { | ||
117 | return (cpumask_weight(cpu_core_mask(cpu)) > 1) ? 2 : 20; | ||
118 | } | ||
119 | |||
120 | /* | ||
102 | * Source CPU calls into this - it waits for the freshly booted | 121 | * Source CPU calls into this - it waits for the freshly booted |
103 | * target CPU to arrive and then starts the measurement: | 122 | * target CPU to arrive and then starts the measurement: |
104 | */ | 123 | */ |
@@ -135,7 +154,7 @@ void __cpuinit check_tsc_sync_source(int cpu) | |||
135 | */ | 154 | */ |
136 | atomic_inc(&start_count); | 155 | atomic_inc(&start_count); |
137 | 156 | ||
138 | check_tsc_warp(); | 157 | check_tsc_warp(loop_timeout(cpu)); |
139 | 158 | ||
140 | while (atomic_read(&stop_count) != cpus-1) | 159 | while (atomic_read(&stop_count) != cpus-1) |
141 | cpu_relax(); | 160 | cpu_relax(); |
@@ -183,7 +202,7 @@ void __cpuinit check_tsc_sync_target(void) | |||
183 | while (atomic_read(&start_count) != cpus) | 202 | while (atomic_read(&start_count) != cpus) |
184 | cpu_relax(); | 203 | cpu_relax(); |
185 | 204 | ||
186 | check_tsc_warp(); | 205 | check_tsc_warp(loop_timeout(smp_processor_id())); |
187 | 206 | ||
188 | /* | 207 | /* |
189 | * Ok, we are done: | 208 | * Ok, we are done: |
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c index fe15dcc07a6b..ea7b4fd34676 100644 --- a/arch/x86/kvm/mmu_audit.c +++ b/arch/x86/kvm/mmu_audit.c | |||
@@ -234,7 +234,7 @@ static void audit_vcpu_spte(struct kvm_vcpu *vcpu) | |||
234 | } | 234 | } |
235 | 235 | ||
236 | static bool mmu_audit; | 236 | static bool mmu_audit; |
237 | static struct jump_label_key mmu_audit_key; | 237 | static struct static_key mmu_audit_key; |
238 | 238 | ||
239 | static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) | 239 | static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) |
240 | { | 240 | { |
@@ -250,7 +250,7 @@ static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) | |||
250 | 250 | ||
251 | static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) | 251 | static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) |
252 | { | 252 | { |
253 | if (static_branch((&mmu_audit_key))) | 253 | if (static_key_false((&mmu_audit_key))) |
254 | __kvm_mmu_audit(vcpu, point); | 254 | __kvm_mmu_audit(vcpu, point); |
255 | } | 255 | } |
256 | 256 | ||
@@ -259,7 +259,7 @@ static void mmu_audit_enable(void) | |||
259 | if (mmu_audit) | 259 | if (mmu_audit) |
260 | return; | 260 | return; |
261 | 261 | ||
262 | jump_label_inc(&mmu_audit_key); | 262 | static_key_slow_inc(&mmu_audit_key); |
263 | mmu_audit = true; | 263 | mmu_audit = true; |
264 | } | 264 | } |
265 | 265 | ||
@@ -268,7 +268,7 @@ static void mmu_audit_disable(void) | |||
268 | if (!mmu_audit) | 268 | if (!mmu_audit) |
269 | return; | 269 | return; |
270 | 270 | ||
271 | jump_label_dec(&mmu_audit_key); | 271 | static_key_slow_dec(&mmu_audit_key); |
272 | mmu_audit = false; | 272 | mmu_audit = false; |
273 | } | 273 | } |
274 | 274 | ||
diff --git a/arch/x86/lib/inat.c b/arch/x86/lib/inat.c index 88ad5fbda6e1..c1f01a8e9f65 100644 --- a/arch/x86/lib/inat.c +++ b/arch/x86/lib/inat.c | |||
@@ -29,46 +29,46 @@ insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode) | |||
29 | return inat_primary_table[opcode]; | 29 | return inat_primary_table[opcode]; |
30 | } | 30 | } |
31 | 31 | ||
32 | insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, insn_byte_t last_pfx, | 32 | int inat_get_last_prefix_id(insn_byte_t last_pfx) |
33 | { | ||
34 | insn_attr_t lpfx_attr; | ||
35 | |||
36 | lpfx_attr = inat_get_opcode_attribute(last_pfx); | ||
37 | return inat_last_prefix_id(lpfx_attr); | ||
38 | } | ||
39 | |||
40 | insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, int lpfx_id, | ||
33 | insn_attr_t esc_attr) | 41 | insn_attr_t esc_attr) |
34 | { | 42 | { |
35 | const insn_attr_t *table; | 43 | const insn_attr_t *table; |
36 | insn_attr_t lpfx_attr; | 44 | int n; |
37 | int n, m = 0; | ||
38 | 45 | ||
39 | n = inat_escape_id(esc_attr); | 46 | n = inat_escape_id(esc_attr); |
40 | if (last_pfx) { | 47 | |
41 | lpfx_attr = inat_get_opcode_attribute(last_pfx); | ||
42 | m = inat_last_prefix_id(lpfx_attr); | ||
43 | } | ||
44 | table = inat_escape_tables[n][0]; | 48 | table = inat_escape_tables[n][0]; |
45 | if (!table) | 49 | if (!table) |
46 | return 0; | 50 | return 0; |
47 | if (inat_has_variant(table[opcode]) && m) { | 51 | if (inat_has_variant(table[opcode]) && lpfx_id) { |
48 | table = inat_escape_tables[n][m]; | 52 | table = inat_escape_tables[n][lpfx_id]; |
49 | if (!table) | 53 | if (!table) |
50 | return 0; | 54 | return 0; |
51 | } | 55 | } |
52 | return table[opcode]; | 56 | return table[opcode]; |
53 | } | 57 | } |
54 | 58 | ||
55 | insn_attr_t inat_get_group_attribute(insn_byte_t modrm, insn_byte_t last_pfx, | 59 | insn_attr_t inat_get_group_attribute(insn_byte_t modrm, int lpfx_id, |
56 | insn_attr_t grp_attr) | 60 | insn_attr_t grp_attr) |
57 | { | 61 | { |
58 | const insn_attr_t *table; | 62 | const insn_attr_t *table; |
59 | insn_attr_t lpfx_attr; | 63 | int n; |
60 | int n, m = 0; | ||
61 | 64 | ||
62 | n = inat_group_id(grp_attr); | 65 | n = inat_group_id(grp_attr); |
63 | if (last_pfx) { | 66 | |
64 | lpfx_attr = inat_get_opcode_attribute(last_pfx); | ||
65 | m = inat_last_prefix_id(lpfx_attr); | ||
66 | } | ||
67 | table = inat_group_tables[n][0]; | 67 | table = inat_group_tables[n][0]; |
68 | if (!table) | 68 | if (!table) |
69 | return inat_group_common_attribute(grp_attr); | 69 | return inat_group_common_attribute(grp_attr); |
70 | if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && m) { | 70 | if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && lpfx_id) { |
71 | table = inat_group_tables[n][m]; | 71 | table = inat_group_tables[n][lpfx_id]; |
72 | if (!table) | 72 | if (!table) |
73 | return inat_group_common_attribute(grp_attr); | 73 | return inat_group_common_attribute(grp_attr); |
74 | } | 74 | } |
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c index 5a1f9f3e3fbb..25feb1ae71c5 100644 --- a/arch/x86/lib/insn.c +++ b/arch/x86/lib/insn.c | |||
@@ -185,7 +185,8 @@ err_out: | |||
185 | void insn_get_opcode(struct insn *insn) | 185 | void insn_get_opcode(struct insn *insn) |
186 | { | 186 | { |
187 | struct insn_field *opcode = &insn->opcode; | 187 | struct insn_field *opcode = &insn->opcode; |
188 | insn_byte_t op, pfx; | 188 | insn_byte_t op; |
189 | int pfx_id; | ||
189 | if (opcode->got) | 190 | if (opcode->got) |
190 | return; | 191 | return; |
191 | if (!insn->prefixes.got) | 192 | if (!insn->prefixes.got) |
@@ -212,8 +213,8 @@ void insn_get_opcode(struct insn *insn) | |||
212 | /* Get escaped opcode */ | 213 | /* Get escaped opcode */ |
213 | op = get_next(insn_byte_t, insn); | 214 | op = get_next(insn_byte_t, insn); |
214 | opcode->bytes[opcode->nbytes++] = op; | 215 | opcode->bytes[opcode->nbytes++] = op; |
215 | pfx = insn_last_prefix(insn); | 216 | pfx_id = insn_last_prefix_id(insn); |
216 | insn->attr = inat_get_escape_attribute(op, pfx, insn->attr); | 217 | insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr); |
217 | } | 218 | } |
218 | if (inat_must_vex(insn->attr)) | 219 | if (inat_must_vex(insn->attr)) |
219 | insn->attr = 0; /* This instruction is bad */ | 220 | insn->attr = 0; /* This instruction is bad */ |
@@ -235,7 +236,7 @@ err_out: | |||
235 | void insn_get_modrm(struct insn *insn) | 236 | void insn_get_modrm(struct insn *insn) |
236 | { | 237 | { |
237 | struct insn_field *modrm = &insn->modrm; | 238 | struct insn_field *modrm = &insn->modrm; |
238 | insn_byte_t pfx, mod; | 239 | insn_byte_t pfx_id, mod; |
239 | if (modrm->got) | 240 | if (modrm->got) |
240 | return; | 241 | return; |
241 | if (!insn->opcode.got) | 242 | if (!insn->opcode.got) |
@@ -246,8 +247,8 @@ void insn_get_modrm(struct insn *insn) | |||
246 | modrm->value = mod; | 247 | modrm->value = mod; |
247 | modrm->nbytes = 1; | 248 | modrm->nbytes = 1; |
248 | if (inat_is_group(insn->attr)) { | 249 | if (inat_is_group(insn->attr)) { |
249 | pfx = insn_last_prefix(insn); | 250 | pfx_id = insn_last_prefix_id(insn); |
250 | insn->attr = inat_get_group_attribute(mod, pfx, | 251 | insn->attr = inat_get_group_attribute(mod, pfx_id, |
251 | insn->attr); | 252 | insn->attr); |
252 | if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) | 253 | if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) |
253 | insn->attr = 0; /* This is bad */ | 254 | insn->attr = 0; /* This is bad */ |
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 7c1b765ecc59..5671752f8d9c 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c | |||
@@ -475,8 +475,10 @@ void bpf_jit_compile(struct sk_filter *fp) | |||
475 | case BPF_S_LD_W_ABS: | 475 | case BPF_S_LD_W_ABS: |
476 | func = sk_load_word; | 476 | func = sk_load_word; |
477 | common_load: seen |= SEEN_DATAREF; | 477 | common_load: seen |= SEEN_DATAREF; |
478 | if ((int)K < 0) | 478 | if ((int)K < 0) { |
479 | /* Abort the JIT because __load_pointer() is needed. */ | ||
479 | goto out; | 480 | goto out; |
481 | } | ||
480 | t_offset = func - (image + addrs[i]); | 482 | t_offset = func - (image + addrs[i]); |
481 | EMIT1_off32(0xbe, K); /* mov imm32,%esi */ | 483 | EMIT1_off32(0xbe, K); /* mov imm32,%esi */ |
482 | EMIT1_off32(0xe8, t_offset); /* call */ | 484 | EMIT1_off32(0xe8, t_offset); /* call */ |
@@ -489,14 +491,8 @@ common_load: seen |= SEEN_DATAREF; | |||
489 | goto common_load; | 491 | goto common_load; |
490 | case BPF_S_LDX_B_MSH: | 492 | case BPF_S_LDX_B_MSH: |
491 | if ((int)K < 0) { | 493 | if ((int)K < 0) { |
492 | if (pc_ret0 > 0) { | 494 | /* Abort the JIT because __load_pointer() is needed. */ |
493 | /* addrs[pc_ret0 - 1] is the start address */ | 495 | goto out; |
494 | EMIT_JMP(addrs[pc_ret0 - 1] - addrs[i]); | ||
495 | break; | ||
496 | } | ||
497 | CLEAR_A(); | ||
498 | EMIT_JMP(cleanup_addr - addrs[i]); | ||
499 | break; | ||
500 | } | 496 | } |
501 | seen |= SEEN_DATAREF | SEEN_XREG; | 497 | seen |= SEEN_DATAREF | SEEN_XREG; |
502 | t_offset = sk_load_byte_msh - (image + addrs[i]); | 498 | t_offset = sk_load_byte_msh - (image + addrs[i]); |
diff --git a/arch/x86/platform/scx200/scx200_32.c b/arch/x86/platform/scx200/scx200_32.c index 7e004acbe526..7a9ad30d6c9f 100644 --- a/arch/x86/platform/scx200/scx200_32.c +++ b/arch/x86/platform/scx200/scx200_32.c | |||
@@ -17,8 +17,6 @@ | |||
17 | /* Verify that the configuration block really is there */ | 17 | /* Verify that the configuration block really is there */ |
18 | #define scx200_cb_probe(base) (inw((base) + SCx200_CBA) == (base)) | 18 | #define scx200_cb_probe(base) (inw((base) + SCx200_CBA) == (base)) |
19 | 19 | ||
20 | #define NAME "scx200" | ||
21 | |||
22 | MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>"); | 20 | MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>"); |
23 | MODULE_DESCRIPTION("NatSemi SCx200 Driver"); | 21 | MODULE_DESCRIPTION("NatSemi SCx200 Driver"); |
24 | MODULE_LICENSE("GPL"); | 22 | MODULE_LICENSE("GPL"); |
@@ -29,10 +27,10 @@ unsigned long scx200_gpio_shadow[2]; | |||
29 | unsigned scx200_cb_base = 0; | 27 | unsigned scx200_cb_base = 0; |
30 | 28 | ||
31 | static struct pci_device_id scx200_tbl[] = { | 29 | static struct pci_device_id scx200_tbl[] = { |
32 | { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE) }, | 30 | { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE) }, |
33 | { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE) }, | 31 | { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE) }, |
34 | { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_XBUS) }, | 32 | { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SCx200_XBUS) }, |
35 | { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_XBUS) }, | 33 | { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SC1100_XBUS) }, |
36 | { }, | 34 | { }, |
37 | }; | 35 | }; |
38 | MODULE_DEVICE_TABLE(pci,scx200_tbl); | 36 | MODULE_DEVICE_TABLE(pci,scx200_tbl); |
@@ -63,10 +61,11 @@ static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_ | |||
63 | if (pdev->device == PCI_DEVICE_ID_NS_SCx200_BRIDGE || | 61 | if (pdev->device == PCI_DEVICE_ID_NS_SCx200_BRIDGE || |
64 | pdev->device == PCI_DEVICE_ID_NS_SC1100_BRIDGE) { | 62 | pdev->device == PCI_DEVICE_ID_NS_SC1100_BRIDGE) { |
65 | base = pci_resource_start(pdev, 0); | 63 | base = pci_resource_start(pdev, 0); |
66 | printk(KERN_INFO NAME ": GPIO base 0x%x\n", base); | 64 | pr_info("GPIO base 0x%x\n", base); |
67 | 65 | ||
68 | if (!request_region(base, SCx200_GPIO_SIZE, "NatSemi SCx200 GPIO")) { | 66 | if (!request_region(base, SCx200_GPIO_SIZE, |
69 | printk(KERN_ERR NAME ": can't allocate I/O for GPIOs\n"); | 67 | "NatSemi SCx200 GPIO")) { |
68 | pr_err("can't allocate I/O for GPIOs\n"); | ||
70 | return -EBUSY; | 69 | return -EBUSY; |
71 | } | 70 | } |
72 | 71 | ||
@@ -82,11 +81,11 @@ static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_ | |||
82 | if (scx200_cb_probe(base)) { | 81 | if (scx200_cb_probe(base)) { |
83 | scx200_cb_base = base; | 82 | scx200_cb_base = base; |
84 | } else { | 83 | } else { |
85 | printk(KERN_WARNING NAME ": Configuration Block not found\n"); | 84 | pr_warn("Configuration Block not found\n"); |
86 | return -ENODEV; | 85 | return -ENODEV; |
87 | } | 86 | } |
88 | } | 87 | } |
89 | printk(KERN_INFO NAME ": Configuration Block base 0x%x\n", scx200_cb_base); | 88 | pr_info("Configuration Block base 0x%x\n", scx200_cb_base); |
90 | } | 89 | } |
91 | 90 | ||
92 | return 0; | 91 | return 0; |
@@ -111,8 +110,7 @@ u32 scx200_gpio_configure(unsigned index, u32 mask, u32 bits) | |||
111 | 110 | ||
112 | static int __init scx200_init(void) | 111 | static int __init scx200_init(void) |
113 | { | 112 | { |
114 | printk(KERN_INFO NAME ": NatSemi SCx200 Driver\n"); | 113 | pr_info("NatSemi SCx200 Driver\n"); |
115 | |||
116 | return pci_register_driver(&scx200_pci_driver); | 114 | return pci_register_driver(&scx200_pci_driver); |
117 | } | 115 | } |
118 | 116 | ||
diff --git a/arch/xtensa/include/asm/socket.h b/arch/xtensa/include/asm/socket.h index bb06968be227..e36c68184920 100644 --- a/arch/xtensa/include/asm/socket.h +++ b/arch/xtensa/include/asm/socket.h | |||
@@ -75,5 +75,9 @@ | |||
75 | 75 | ||
76 | #define SO_WIFI_STATUS 41 | 76 | #define SO_WIFI_STATUS 41 |
77 | #define SCM_WIFI_STATUS SO_WIFI_STATUS | 77 | #define SCM_WIFI_STATUS SO_WIFI_STATUS |
78 | #define SO_PEEK_OFF 42 | ||
79 | |||
80 | /* Instruct lower device to use last 4-bytes of skb data as FCS */ | ||
81 | #define SO_NOFCS 43 | ||
78 | 82 | ||
79 | #endif /* _XTENSA_SOCKET_H */ | 83 | #endif /* _XTENSA_SOCKET_H */ |
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 47041e7c088c..2c9004770c4e 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c | |||
@@ -113,9 +113,7 @@ void cpu_idle(void) | |||
113 | while (1) { | 113 | while (1) { |
114 | while (!need_resched()) | 114 | while (!need_resched()) |
115 | platform_idle(); | 115 | platform_idle(); |
116 | preempt_enable_no_resched(); | 116 | schedule_preempt_disabled(); |
117 | schedule(); | ||
118 | preempt_disable(); | ||
119 | } | 117 | } |
120 | } | 118 | } |
121 | 119 | ||
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c index 2c723e8b30da..f9726f6afdf1 100644 --- a/arch/xtensa/platforms/iss/console.c +++ b/arch/xtensa/platforms/iss/console.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/param.h> | 19 | #include <linux/param.h> |
20 | #include <linux/seq_file.h> | 20 | #include <linux/seq_file.h> |
21 | #include <linux/serial.h> | 21 | #include <linux/serial.h> |
22 | #include <linux/serialP.h> | ||
23 | 22 | ||
24 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
25 | #include <asm/irq.h> | 24 | #include <asm/irq.h> |
@@ -37,6 +36,7 @@ | |||
37 | #define SERIAL_TIMER_VALUE (20 * HZ) | 36 | #define SERIAL_TIMER_VALUE (20 * HZ) |
38 | 37 | ||
39 | static struct tty_driver *serial_driver; | 38 | static struct tty_driver *serial_driver; |
39 | static struct tty_port serial_port; | ||
40 | static struct timer_list serial_timer; | 40 | static struct timer_list serial_timer; |
41 | 41 | ||
42 | static DEFINE_SPINLOCK(timer_lock); | 42 | static DEFINE_SPINLOCK(timer_lock); |
@@ -68,17 +68,10 @@ static void rs_poll(unsigned long); | |||
68 | 68 | ||
69 | static int rs_open(struct tty_struct *tty, struct file * filp) | 69 | static int rs_open(struct tty_struct *tty, struct file * filp) |
70 | { | 70 | { |
71 | int line = tty->index; | 71 | tty->port = &serial_port; |
72 | |||
73 | if ((line < 0) || (line >= SERIAL_MAX_NUM_LINES)) | ||
74 | return -ENODEV; | ||
75 | |||
76 | spin_lock(&timer_lock); | 72 | spin_lock(&timer_lock); |
77 | |||
78 | if (tty->count == 1) { | 73 | if (tty->count == 1) { |
79 | init_timer(&serial_timer); | 74 | setup_timer(&serial_timer, rs_poll, (unsigned long)tty); |
80 | serial_timer.data = (unsigned long) tty; | ||
81 | serial_timer.function = rs_poll; | ||
82 | mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE); | 75 | mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE); |
83 | } | 76 | } |
84 | spin_unlock(&timer_lock); | 77 | spin_unlock(&timer_lock); |
@@ -99,10 +92,10 @@ static int rs_open(struct tty_struct *tty, struct file * filp) | |||
99 | */ | 92 | */ |
100 | static void rs_close(struct tty_struct *tty, struct file * filp) | 93 | static void rs_close(struct tty_struct *tty, struct file * filp) |
101 | { | 94 | { |
102 | spin_lock(&timer_lock); | 95 | spin_lock_bh(&timer_lock); |
103 | if (tty->count == 1) | 96 | if (tty->count == 1) |
104 | del_timer_sync(&serial_timer); | 97 | del_timer_sync(&serial_timer); |
105 | spin_unlock(&timer_lock); | 98 | spin_unlock_bh(&timer_lock); |
106 | } | 99 | } |
107 | 100 | ||
108 | 101 | ||
@@ -210,13 +203,14 @@ static const struct tty_operations serial_ops = { | |||
210 | 203 | ||
211 | int __init rs_init(void) | 204 | int __init rs_init(void) |
212 | { | 205 | { |
213 | serial_driver = alloc_tty_driver(1); | 206 | tty_port_init(&serial_port); |
207 | |||
208 | serial_driver = alloc_tty_driver(SERIAL_MAX_NUM_LINES); | ||
214 | 209 | ||
215 | printk ("%s %s\n", serial_name, serial_version); | 210 | printk ("%s %s\n", serial_name, serial_version); |
216 | 211 | ||
217 | /* Initialize the tty_driver structure */ | 212 | /* Initialize the tty_driver structure */ |
218 | 213 | ||
219 | serial_driver->owner = THIS_MODULE; | ||
220 | serial_driver->driver_name = "iss_serial"; | 214 | serial_driver->driver_name = "iss_serial"; |
221 | serial_driver->name = "ttyS"; | 215 | serial_driver->name = "ttyS"; |
222 | serial_driver->major = TTY_MAJOR; | 216 | serial_driver->major = TTY_MAJOR; |