diff options
-rw-r--r-- | arch/powerpc/include/asm/machdep.h | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/machine_kexec.c | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 8 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_hash64.c | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/msr-index.h | 5 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 13 | ||||
-rw-r--r-- | drivers/acpi/acpica/aclocal.h | 7 | ||||
-rw-r--r-- | drivers/acpi/acpica/evgpe.c | 17 | ||||
-rw-r--r-- | drivers/acpi/acpica/evxfgpe.c | 42 | ||||
-rw-r--r-- | drivers/acpi/debugfs.c | 20 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 27 | ||||
-rw-r--r-- | drivers/idle/intel_idle.c | 24 | ||||
-rw-r--r-- | fs/ext2/namei.c | 9 |
14 files changed, 143 insertions, 52 deletions
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 991d5998d6be..fe56a23e1ff0 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h | |||
@@ -240,6 +240,12 @@ struct machdep_calls { | |||
240 | * claims to support kexec. | 240 | * claims to support kexec. |
241 | */ | 241 | */ |
242 | int (*machine_kexec_prepare)(struct kimage *image); | 242 | int (*machine_kexec_prepare)(struct kimage *image); |
243 | |||
244 | /* Called to perform the _real_ kexec. | ||
245 | * Do NOT allocate memory or fail here. We are past the point of | ||
246 | * no return. | ||
247 | */ | ||
248 | void (*machine_kexec)(struct kimage *image); | ||
243 | #endif /* CONFIG_KEXEC */ | 249 | #endif /* CONFIG_KEXEC */ |
244 | 250 | ||
245 | #ifdef CONFIG_SUSPEND | 251 | #ifdef CONFIG_SUSPEND |
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 49a170af8145..a5f8672eeff3 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c | |||
@@ -87,7 +87,10 @@ void machine_kexec(struct kimage *image) | |||
87 | 87 | ||
88 | save_ftrace_enabled = __ftrace_enabled_save(); | 88 | save_ftrace_enabled = __ftrace_enabled_save(); |
89 | 89 | ||
90 | default_machine_kexec(image); | 90 | if (ppc_md.machine_kexec) |
91 | ppc_md.machine_kexec(image); | ||
92 | else | ||
93 | default_machine_kexec(image); | ||
91 | 94 | ||
92 | __ftrace_enabled_restore(save_ftrace_enabled); | 95 | __ftrace_enabled_restore(save_ftrace_enabled); |
93 | 96 | ||
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 7a1d5cb76932..8303a6c65ef7 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -353,6 +353,7 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread) | |||
353 | prime_debug_regs(new_thread); | 353 | prime_debug_regs(new_thread); |
354 | } | 354 | } |
355 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | 355 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ |
356 | #ifndef CONFIG_HAVE_HW_BREAKPOINT | ||
356 | static void set_debug_reg_defaults(struct thread_struct *thread) | 357 | static void set_debug_reg_defaults(struct thread_struct *thread) |
357 | { | 358 | { |
358 | if (thread->dabr) { | 359 | if (thread->dabr) { |
@@ -360,6 +361,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread) | |||
360 | set_dabr(0); | 361 | set_dabr(0); |
361 | } | 362 | } |
362 | } | 363 | } |
364 | #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ | ||
363 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | 365 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
364 | 366 | ||
365 | int set_dabr(unsigned long dabr) | 367 | int set_dabr(unsigned long dabr) |
@@ -670,11 +672,11 @@ void flush_thread(void) | |||
670 | { | 672 | { |
671 | discard_lazy_cpu_state(); | 673 | discard_lazy_cpu_state(); |
672 | 674 | ||
673 | #ifdef CONFIG_HAVE_HW_BREAKPOINTS | 675 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
674 | flush_ptrace_hw_breakpoint(current); | 676 | flush_ptrace_hw_breakpoint(current); |
675 | #else /* CONFIG_HAVE_HW_BREAKPOINTS */ | 677 | #else /* CONFIG_HAVE_HW_BREAKPOINT */ |
676 | set_debug_reg_defaults(¤t->thread); | 678 | set_debug_reg_defaults(¤t->thread); |
677 | #endif /* CONFIG_HAVE_HW_BREAKPOINTS */ | 679 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
678 | } | 680 | } |
679 | 681 | ||
680 | void | 682 | void |
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 1ec06576f619..c14d09f614f3 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c | |||
@@ -38,13 +38,11 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); | |||
38 | * neesd to be flushed. This function will either perform the flush | 38 | * neesd to be flushed. This function will either perform the flush |
39 | * immediately or will batch it up if the current CPU has an active | 39 | * immediately or will batch it up if the current CPU has an active |
40 | * batch on it. | 40 | * batch on it. |
41 | * | ||
42 | * Must be called from within some kind of spinlock/non-preempt region... | ||
43 | */ | 41 | */ |
44 | void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | 42 | void hpte_need_flush(struct mm_struct *mm, unsigned long addr, |
45 | pte_t *ptep, unsigned long pte, int huge) | 43 | pte_t *ptep, unsigned long pte, int huge) |
46 | { | 44 | { |
47 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | 45 | struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); |
48 | unsigned long vsid, vaddr; | 46 | unsigned long vsid, vaddr; |
49 | unsigned int psize; | 47 | unsigned int psize; |
50 | int ssize; | 48 | int ssize; |
@@ -99,6 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | |||
99 | */ | 97 | */ |
100 | if (!batch->active) { | 98 | if (!batch->active) { |
101 | flush_hash_page(vaddr, rpte, psize, ssize, 0); | 99 | flush_hash_page(vaddr, rpte, psize, ssize, 0); |
100 | put_cpu_var(ppc64_tlb_batch); | ||
102 | return; | 101 | return; |
103 | } | 102 | } |
104 | 103 | ||
@@ -127,6 +126,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | |||
127 | batch->index = ++i; | 126 | batch->index = ++i; |
128 | if (i >= PPC64_TLB_BATCH_NR) | 127 | if (i >= PPC64_TLB_BATCH_NR) |
129 | __flush_tlb_pending(batch); | 128 | __flush_tlb_pending(batch); |
129 | put_cpu_var(ppc64_tlb_batch); | ||
130 | } | 130 | } |
131 | 131 | ||
132 | /* | 132 | /* |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 4d0dfa0d998e..43a18c77676d 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -36,6 +36,11 @@ | |||
36 | #define MSR_IA32_PERFCTR1 0x000000c2 | 36 | #define MSR_IA32_PERFCTR1 0x000000c2 |
37 | #define MSR_FSB_FREQ 0x000000cd | 37 | #define MSR_FSB_FREQ 0x000000cd |
38 | 38 | ||
39 | #define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 | ||
40 | #define NHM_C3_AUTO_DEMOTE (1UL << 25) | ||
41 | #define NHM_C1_AUTO_DEMOTE (1UL << 26) | ||
42 | #define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25) | ||
43 | |||
39 | #define MSR_MTRRcap 0x000000fe | 44 | #define MSR_MTRRcap 0x000000fe |
40 | #define MSR_IA32_BBL_CR_CTL 0x00000119 | 45 | #define MSR_IA32_BBL_CR_CTL 0x00000119 |
41 | 46 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index bd1cac747f67..52c93648e492 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | |||
@@ -158,9 +158,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) | |||
158 | { | 158 | { |
159 | if (c->x86 == 0x06) { | 159 | if (c->x86 == 0x06) { |
160 | if (cpu_has(c, X86_FEATURE_EST)) | 160 | if (cpu_has(c, X86_FEATURE_EST)) |
161 | printk(KERN_WARNING PFX "Warning: EST-capable CPU " | 161 | printk_once(KERN_WARNING PFX "Warning: EST-capable " |
162 | "detected. The acpi-cpufreq module offers " | 162 | "CPU detected. The acpi-cpufreq module offers " |
163 | "voltage scaling in addition of frequency " | 163 | "voltage scaling in addition to frequency " |
164 | "scaling. You should use that instead of " | 164 | "scaling. You should use that instead of " |
165 | "p4-clockmod, if possible.\n"); | 165 | "p4-clockmod, if possible.\n"); |
166 | switch (c->x86_model) { | 166 | switch (c->x86_model) { |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 35c7e65e59be..c567dec854f6 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -1537,6 +1537,7 @@ static struct notifier_block cpb_nb = { | |||
1537 | static int __cpuinit powernowk8_init(void) | 1537 | static int __cpuinit powernowk8_init(void) |
1538 | { | 1538 | { |
1539 | unsigned int i, supported_cpus = 0, cpu; | 1539 | unsigned int i, supported_cpus = 0, cpu; |
1540 | int rv; | ||
1540 | 1541 | ||
1541 | for_each_online_cpu(i) { | 1542 | for_each_online_cpu(i) { |
1542 | int rc; | 1543 | int rc; |
@@ -1555,14 +1556,14 @@ static int __cpuinit powernowk8_init(void) | |||
1555 | 1556 | ||
1556 | cpb_capable = true; | 1557 | cpb_capable = true; |
1557 | 1558 | ||
1558 | register_cpu_notifier(&cpb_nb); | ||
1559 | |||
1560 | msrs = msrs_alloc(); | 1559 | msrs = msrs_alloc(); |
1561 | if (!msrs) { | 1560 | if (!msrs) { |
1562 | printk(KERN_ERR "%s: Error allocating msrs!\n", __func__); | 1561 | printk(KERN_ERR "%s: Error allocating msrs!\n", __func__); |
1563 | return -ENOMEM; | 1562 | return -ENOMEM; |
1564 | } | 1563 | } |
1565 | 1564 | ||
1565 | register_cpu_notifier(&cpb_nb); | ||
1566 | |||
1566 | rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); | 1567 | rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); |
1567 | 1568 | ||
1568 | for_each_cpu(cpu, cpu_online_mask) { | 1569 | for_each_cpu(cpu, cpu_online_mask) { |
@@ -1574,7 +1575,13 @@ static int __cpuinit powernowk8_init(void) | |||
1574 | (cpb_enabled ? "on" : "off")); | 1575 | (cpb_enabled ? "on" : "off")); |
1575 | } | 1576 | } |
1576 | 1577 | ||
1577 | return cpufreq_register_driver(&cpufreq_amd64_driver); | 1578 | rv = cpufreq_register_driver(&cpufreq_amd64_driver); |
1579 | if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) { | ||
1580 | unregister_cpu_notifier(&cpb_nb); | ||
1581 | msrs_free(msrs); | ||
1582 | msrs = NULL; | ||
1583 | } | ||
1584 | return rv; | ||
1578 | } | 1585 | } |
1579 | 1586 | ||
1580 | /* driver entry point for term */ | 1587 | /* driver entry point for term */ |
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 54784bb42cec..edc25867ad9d 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h | |||
@@ -416,10 +416,15 @@ struct acpi_gpe_handler_info { | |||
416 | u8 originally_enabled; /* True if GPE was originally enabled */ | 416 | u8 originally_enabled; /* True if GPE was originally enabled */ |
417 | }; | 417 | }; |
418 | 418 | ||
419 | struct acpi_gpe_notify_object { | ||
420 | struct acpi_namespace_node *node; | ||
421 | struct acpi_gpe_notify_object *next; | ||
422 | }; | ||
423 | |||
419 | union acpi_gpe_dispatch_info { | 424 | union acpi_gpe_dispatch_info { |
420 | struct acpi_namespace_node *method_node; /* Method node for this GPE level */ | 425 | struct acpi_namespace_node *method_node; /* Method node for this GPE level */ |
421 | struct acpi_gpe_handler_info *handler; /* Installed GPE handler */ | 426 | struct acpi_gpe_handler_info *handler; /* Installed GPE handler */ |
422 | struct acpi_namespace_node *device_node; /* Parent _PRW device for implicit notify */ | 427 | struct acpi_gpe_notify_object device; /* List of _PRW devices for implicit notify */ |
423 | }; | 428 | }; |
424 | 429 | ||
425 | /* | 430 | /* |
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index 14988a86066f..f4725212eb48 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c | |||
@@ -457,6 +457,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
457 | acpi_status status; | 457 | acpi_status status; |
458 | struct acpi_gpe_event_info *local_gpe_event_info; | 458 | struct acpi_gpe_event_info *local_gpe_event_info; |
459 | struct acpi_evaluate_info *info; | 459 | struct acpi_evaluate_info *info; |
460 | struct acpi_gpe_notify_object *notify_object; | ||
460 | 461 | ||
461 | ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); | 462 | ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); |
462 | 463 | ||
@@ -508,10 +509,18 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
508 | * from this thread -- because handlers may in turn run other | 509 | * from this thread -- because handlers may in turn run other |
509 | * control methods. | 510 | * control methods. |
510 | */ | 511 | */ |
511 | status = | 512 | status = acpi_ev_queue_notify_request( |
512 | acpi_ev_queue_notify_request(local_gpe_event_info->dispatch. | 513 | local_gpe_event_info->dispatch.device.node, |
513 | device_node, | 514 | ACPI_NOTIFY_DEVICE_WAKE); |
514 | ACPI_NOTIFY_DEVICE_WAKE); | 515 | |
516 | notify_object = local_gpe_event_info->dispatch.device.next; | ||
517 | while (ACPI_SUCCESS(status) && notify_object) { | ||
518 | status = acpi_ev_queue_notify_request( | ||
519 | notify_object->node, | ||
520 | ACPI_NOTIFY_DEVICE_WAKE); | ||
521 | notify_object = notify_object->next; | ||
522 | } | ||
523 | |||
515 | break; | 524 | break; |
516 | 525 | ||
517 | case ACPI_GPE_DISPATCH_METHOD: | 526 | case ACPI_GPE_DISPATCH_METHOD: |
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index 3b20a3401b64..52aaff3df562 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c | |||
@@ -198,7 +198,9 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device, | |||
198 | acpi_status status = AE_BAD_PARAMETER; | 198 | acpi_status status = AE_BAD_PARAMETER; |
199 | struct acpi_gpe_event_info *gpe_event_info; | 199 | struct acpi_gpe_event_info *gpe_event_info; |
200 | struct acpi_namespace_node *device_node; | 200 | struct acpi_namespace_node *device_node; |
201 | struct acpi_gpe_notify_object *notify_object; | ||
201 | acpi_cpu_flags flags; | 202 | acpi_cpu_flags flags; |
203 | u8 gpe_dispatch_mask; | ||
202 | 204 | ||
203 | ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake); | 205 | ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake); |
204 | 206 | ||
@@ -221,27 +223,49 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device, | |||
221 | goto unlock_and_exit; | 223 | goto unlock_and_exit; |
222 | } | 224 | } |
223 | 225 | ||
226 | if (wake_device == ACPI_ROOT_OBJECT) { | ||
227 | goto out; | ||
228 | } | ||
229 | |||
224 | /* | 230 | /* |
225 | * If there is no method or handler for this GPE, then the | 231 | * If there is no method or handler for this GPE, then the |
226 | * wake_device will be notified whenever this GPE fires (aka | 232 | * wake_device will be notified whenever this GPE fires (aka |
227 | * "implicit notify") Note: The GPE is assumed to be | 233 | * "implicit notify") Note: The GPE is assumed to be |
228 | * level-triggered (for windows compatibility). | 234 | * level-triggered (for windows compatibility). |
229 | */ | 235 | */ |
230 | if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == | 236 | gpe_dispatch_mask = gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK; |
231 | ACPI_GPE_DISPATCH_NONE) && (wake_device != ACPI_ROOT_OBJECT)) { | 237 | if (gpe_dispatch_mask != ACPI_GPE_DISPATCH_NONE |
238 | && gpe_dispatch_mask != ACPI_GPE_DISPATCH_NOTIFY) { | ||
239 | goto out; | ||
240 | } | ||
232 | 241 | ||
233 | /* Validate wake_device is of type Device */ | 242 | /* Validate wake_device is of type Device */ |
234 | 243 | ||
235 | device_node = ACPI_CAST_PTR(struct acpi_namespace_node, | 244 | device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device); |
236 | wake_device); | 245 | if (device_node->type != ACPI_TYPE_DEVICE) { |
237 | if (device_node->type != ACPI_TYPE_DEVICE) { | 246 | goto unlock_and_exit; |
238 | goto unlock_and_exit; | 247 | } |
239 | } | 248 | |
249 | if (gpe_dispatch_mask == ACPI_GPE_DISPATCH_NONE) { | ||
240 | gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY | | 250 | gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY | |
241 | ACPI_GPE_LEVEL_TRIGGERED); | 251 | ACPI_GPE_LEVEL_TRIGGERED); |
242 | gpe_event_info->dispatch.device_node = device_node; | 252 | gpe_event_info->dispatch.device.node = device_node; |
253 | gpe_event_info->dispatch.device.next = NULL; | ||
254 | } else { | ||
255 | /* There are multiple devices to notify implicitly. */ | ||
256 | |||
257 | notify_object = ACPI_ALLOCATE_ZEROED(sizeof(*notify_object)); | ||
258 | if (!notify_object) { | ||
259 | status = AE_NO_MEMORY; | ||
260 | goto unlock_and_exit; | ||
261 | } | ||
262 | |||
263 | notify_object->node = device_node; | ||
264 | notify_object->next = gpe_event_info->dispatch.device.next; | ||
265 | gpe_event_info->dispatch.device.next = notify_object; | ||
243 | } | 266 | } |
244 | 267 | ||
268 | out: | ||
245 | gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; | 269 | gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; |
246 | status = AE_OK; | 270 | status = AE_OK; |
247 | 271 | ||
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c index 5df67f1d6c61..384f7abcff77 100644 --- a/drivers/acpi/debugfs.c +++ b/drivers/acpi/debugfs.c | |||
@@ -26,7 +26,9 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, | |||
26 | size_t count, loff_t *ppos) | 26 | size_t count, loff_t *ppos) |
27 | { | 27 | { |
28 | static char *buf; | 28 | static char *buf; |
29 | static int uncopied_bytes; | 29 | static u32 max_size; |
30 | static u32 uncopied_bytes; | ||
31 | |||
30 | struct acpi_table_header table; | 32 | struct acpi_table_header table; |
31 | acpi_status status; | 33 | acpi_status status; |
32 | 34 | ||
@@ -37,19 +39,24 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, | |||
37 | if (copy_from_user(&table, user_buf, | 39 | if (copy_from_user(&table, user_buf, |
38 | sizeof(struct acpi_table_header))) | 40 | sizeof(struct acpi_table_header))) |
39 | return -EFAULT; | 41 | return -EFAULT; |
40 | uncopied_bytes = table.length; | 42 | uncopied_bytes = max_size = table.length; |
41 | buf = kzalloc(uncopied_bytes, GFP_KERNEL); | 43 | buf = kzalloc(max_size, GFP_KERNEL); |
42 | if (!buf) | 44 | if (!buf) |
43 | return -ENOMEM; | 45 | return -ENOMEM; |
44 | } | 46 | } |
45 | 47 | ||
46 | if (uncopied_bytes < count) { | 48 | if (buf == NULL) |
47 | kfree(buf); | 49 | return -EINVAL; |
50 | |||
51 | if ((*ppos > max_size) || | ||
52 | (*ppos + count > max_size) || | ||
53 | (*ppos + count < count) || | ||
54 | (count > uncopied_bytes)) | ||
48 | return -EINVAL; | 55 | return -EINVAL; |
49 | } | ||
50 | 56 | ||
51 | if (copy_from_user(buf + (*ppos), user_buf, count)) { | 57 | if (copy_from_user(buf + (*ppos), user_buf, count)) { |
52 | kfree(buf); | 58 | kfree(buf); |
59 | buf = NULL; | ||
53 | return -EFAULT; | 60 | return -EFAULT; |
54 | } | 61 | } |
55 | 62 | ||
@@ -59,6 +66,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, | |||
59 | if (!uncopied_bytes) { | 66 | if (!uncopied_bytes) { |
60 | status = acpi_install_method(buf); | 67 | status = acpi_install_method(buf); |
61 | kfree(buf); | 68 | kfree(buf); |
69 | buf = NULL; | ||
62 | if (ACPI_FAILURE(status)) | 70 | if (ACPI_FAILURE(status)) |
63 | return -EINVAL; | 71 | return -EINVAL; |
64 | add_taint(TAINT_OVERRIDDEN_ACPI_TABLE); | 72 | add_taint(TAINT_OVERRIDDEN_ACPI_TABLE); |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 1109f6848a43..5cb4d09919d6 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -1919,8 +1919,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) | |||
1919 | 1919 | ||
1920 | ret = sysdev_driver_register(&cpu_sysdev_class, | 1920 | ret = sysdev_driver_register(&cpu_sysdev_class, |
1921 | &cpufreq_sysdev_driver); | 1921 | &cpufreq_sysdev_driver); |
1922 | if (ret) | ||
1923 | goto err_null_driver; | ||
1922 | 1924 | ||
1923 | if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) { | 1925 | if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) { |
1924 | int i; | 1926 | int i; |
1925 | ret = -ENODEV; | 1927 | ret = -ENODEV; |
1926 | 1928 | ||
@@ -1935,21 +1937,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) | |||
1935 | if (ret) { | 1937 | if (ret) { |
1936 | dprintk("no CPU initialized for driver %s\n", | 1938 | dprintk("no CPU initialized for driver %s\n", |
1937 | driver_data->name); | 1939 | driver_data->name); |
1938 | sysdev_driver_unregister(&cpu_sysdev_class, | 1940 | goto err_sysdev_unreg; |
1939 | &cpufreq_sysdev_driver); | ||
1940 | |||
1941 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
1942 | cpufreq_driver = NULL; | ||
1943 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1944 | } | 1941 | } |
1945 | } | 1942 | } |
1946 | 1943 | ||
1947 | if (!ret) { | 1944 | register_hotcpu_notifier(&cpufreq_cpu_notifier); |
1948 | register_hotcpu_notifier(&cpufreq_cpu_notifier); | 1945 | dprintk("driver %s up and running\n", driver_data->name); |
1949 | dprintk("driver %s up and running\n", driver_data->name); | 1946 | cpufreq_debug_enable_ratelimit(); |
1950 | cpufreq_debug_enable_ratelimit(); | ||
1951 | } | ||
1952 | 1947 | ||
1948 | return 0; | ||
1949 | err_sysdev_unreg: | ||
1950 | sysdev_driver_unregister(&cpu_sysdev_class, | ||
1951 | &cpufreq_sysdev_driver); | ||
1952 | err_null_driver: | ||
1953 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
1954 | cpufreq_driver = NULL; | ||
1955 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1953 | return ret; | 1956 | return ret; |
1954 | } | 1957 | } |
1955 | EXPORT_SYMBOL_GPL(cpufreq_register_driver); | 1958 | EXPORT_SYMBOL_GPL(cpufreq_register_driver); |
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 1fa091e05690..4a5c4a44ffb1 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c | |||
@@ -62,6 +62,7 @@ | |||
62 | #include <linux/notifier.h> | 62 | #include <linux/notifier.h> |
63 | #include <linux/cpu.h> | 63 | #include <linux/cpu.h> |
64 | #include <asm/mwait.h> | 64 | #include <asm/mwait.h> |
65 | #include <asm/msr.h> | ||
65 | 66 | ||
66 | #define INTEL_IDLE_VERSION "0.4" | 67 | #define INTEL_IDLE_VERSION "0.4" |
67 | #define PREFIX "intel_idle: " | 68 | #define PREFIX "intel_idle: " |
@@ -85,6 +86,12 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); | |||
85 | static struct cpuidle_state *cpuidle_state_table; | 86 | static struct cpuidle_state *cpuidle_state_table; |
86 | 87 | ||
87 | /* | 88 | /* |
89 | * Hardware C-state auto-demotion may not always be optimal. | ||
90 | * Indicate which enable bits to clear here. | ||
91 | */ | ||
92 | static unsigned long long auto_demotion_disable_flags; | ||
93 | |||
94 | /* | ||
88 | * Set this flag for states where the HW flushes the TLB for us | 95 | * Set this flag for states where the HW flushes the TLB for us |
89 | * and so we don't need cross-calls to keep it consistent. | 96 | * and so we don't need cross-calls to keep it consistent. |
90 | * If this flag is set, SW flushes the TLB, so even if the | 97 | * If this flag is set, SW flushes the TLB, so even if the |
@@ -281,6 +288,15 @@ static struct notifier_block setup_broadcast_notifier = { | |||
281 | .notifier_call = setup_broadcast_cpuhp_notify, | 288 | .notifier_call = setup_broadcast_cpuhp_notify, |
282 | }; | 289 | }; |
283 | 290 | ||
291 | static void auto_demotion_disable(void *dummy) | ||
292 | { | ||
293 | unsigned long long msr_bits; | ||
294 | |||
295 | rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); | ||
296 | msr_bits &= ~auto_demotion_disable_flags; | ||
297 | wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); | ||
298 | } | ||
299 | |||
284 | /* | 300 | /* |
285 | * intel_idle_probe() | 301 | * intel_idle_probe() |
286 | */ | 302 | */ |
@@ -324,11 +340,17 @@ static int intel_idle_probe(void) | |||
324 | case 0x25: /* Westmere */ | 340 | case 0x25: /* Westmere */ |
325 | case 0x2C: /* Westmere */ | 341 | case 0x2C: /* Westmere */ |
326 | cpuidle_state_table = nehalem_cstates; | 342 | cpuidle_state_table = nehalem_cstates; |
343 | auto_demotion_disable_flags = | ||
344 | (NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE); | ||
327 | break; | 345 | break; |
328 | 346 | ||
329 | case 0x1C: /* 28 - Atom Processor */ | 347 | case 0x1C: /* 28 - Atom Processor */ |
348 | cpuidle_state_table = atom_cstates; | ||
349 | break; | ||
350 | |||
330 | case 0x26: /* 38 - Lincroft Atom Processor */ | 351 | case 0x26: /* 38 - Lincroft Atom Processor */ |
331 | cpuidle_state_table = atom_cstates; | 352 | cpuidle_state_table = atom_cstates; |
353 | auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE; | ||
332 | break; | 354 | break; |
333 | 355 | ||
334 | case 0x2A: /* SNB */ | 356 | case 0x2A: /* SNB */ |
@@ -436,6 +458,8 @@ static int intel_idle_cpuidle_devices_init(void) | |||
436 | return -EIO; | 458 | return -EIO; |
437 | } | 459 | } |
438 | } | 460 | } |
461 | if (auto_demotion_disable_flags) | ||
462 | smp_call_function(auto_demotion_disable, NULL, 1); | ||
439 | 463 | ||
440 | return 0; | 464 | return 0; |
441 | } | 465 | } |
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 2e1d8341d827..adb91855ccd0 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c | |||
@@ -344,7 +344,6 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, | |||
344 | new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page); | 344 | new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page); |
345 | if (!new_de) | 345 | if (!new_de) |
346 | goto out_dir; | 346 | goto out_dir; |
347 | inode_inc_link_count(old_inode); | ||
348 | ext2_set_link(new_dir, new_de, new_page, old_inode, 1); | 347 | ext2_set_link(new_dir, new_de, new_page, old_inode, 1); |
349 | new_inode->i_ctime = CURRENT_TIME_SEC; | 348 | new_inode->i_ctime = CURRENT_TIME_SEC; |
350 | if (dir_de) | 349 | if (dir_de) |
@@ -356,12 +355,9 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, | |||
356 | if (new_dir->i_nlink >= EXT2_LINK_MAX) | 355 | if (new_dir->i_nlink >= EXT2_LINK_MAX) |
357 | goto out_dir; | 356 | goto out_dir; |
358 | } | 357 | } |
359 | inode_inc_link_count(old_inode); | ||
360 | err = ext2_add_link(new_dentry, old_inode); | 358 | err = ext2_add_link(new_dentry, old_inode); |
361 | if (err) { | 359 | if (err) |
362 | inode_dec_link_count(old_inode); | ||
363 | goto out_dir; | 360 | goto out_dir; |
364 | } | ||
365 | if (dir_de) | 361 | if (dir_de) |
366 | inode_inc_link_count(new_dir); | 362 | inode_inc_link_count(new_dir); |
367 | } | 363 | } |
@@ -369,12 +365,11 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, | |||
369 | /* | 365 | /* |
370 | * Like most other Unix systems, set the ctime for inodes on a | 366 | * Like most other Unix systems, set the ctime for inodes on a |
371 | * rename. | 367 | * rename. |
372 | * inode_dec_link_count() will mark the inode dirty. | ||
373 | */ | 368 | */ |
374 | old_inode->i_ctime = CURRENT_TIME_SEC; | 369 | old_inode->i_ctime = CURRENT_TIME_SEC; |
370 | mark_inode_dirty(old_inode); | ||
375 | 371 | ||
376 | ext2_delete_entry (old_de, old_page); | 372 | ext2_delete_entry (old_de, old_page); |
377 | inode_dec_link_count(old_inode); | ||
378 | 373 | ||
379 | if (dir_de) { | 374 | if (dir_de) { |
380 | if (old_dir != new_dir) | 375 | if (old_dir != new_dir) |