aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/kernel/acpi/boot.c8
-rw-r--r--arch/powerpc/kernel/prom_init.c2
-rw-r--r--arch/powerpc/platforms/pseries/setup.c2
-rw-r--r--arch/x86_64/kernel/pci-nommu.c7
-rw-r--r--arch/x86_64/kernel/traps.c21
-rw-r--r--arch/x86_64/mm/srat.c15
-rw-r--r--drivers/net/b44.c28
-rw-r--r--drivers/serial/serial_core.c9
-rw-r--r--mm/slab.c11
9 files changed, 73 insertions, 30 deletions
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index 40e5aba3ad3d..daee69579b1c 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -1066,6 +1066,14 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
1066 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), 1066 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
1067 }, 1067 },
1068 }, 1068 },
1069 {
1070 .callback = disable_acpi_pci,
1071 .ident = "HP xw9300",
1072 .matches = {
1073 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1074 DMI_MATCH(DMI_PRODUCT_NAME, "HP xw9300 Workstation"),
1075 },
1076 },
1069 {} 1077 {}
1070}; 1078};
1071 1079
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 078fb5533541..2d80653aa2af 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1636,7 +1636,7 @@ static int __init prom_find_machine_type(void)
1636 compat, sizeof(compat)-1); 1636 compat, sizeof(compat)-1);
1637 if (len <= 0) 1637 if (len <= 0)
1638 return PLATFORM_GENERIC; 1638 return PLATFORM_GENERIC;
1639 if (strncmp(compat, RELOC("chrp"), 4)) 1639 if (strcmp(compat, RELOC("chrp")))
1640 return PLATFORM_GENERIC; 1640 return PLATFORM_GENERIC;
1641 1641
1642 /* Default to pSeries. We need to know if we are running LPAR */ 1642 /* Default to pSeries. We need to know if we are running LPAR */
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 5eb55ef1c91c..5f79f01c44f2 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -255,7 +255,7 @@ static int __init pSeries_init_panel(void)
255{ 255{
256 /* Manually leave the kernel version on the panel. */ 256 /* Manually leave the kernel version on the panel. */
257 ppc_md.progress("Linux ppc64\n", 0); 257 ppc_md.progress("Linux ppc64\n", 0);
258 ppc_md.progress(system_utsname.version, 0); 258 ppc_md.progress(system_utsname.release, 0);
259 259
260 return 0; 260 return 0;
261} 261}
diff --git a/arch/x86_64/kernel/pci-nommu.c b/arch/x86_64/kernel/pci-nommu.c
index 44adcc2d5e5b..1f6ecc62061d 100644
--- a/arch/x86_64/kernel/pci-nommu.c
+++ b/arch/x86_64/kernel/pci-nommu.c
@@ -12,9 +12,10 @@ static int
12check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) 12check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
13{ 13{
14 if (hwdev && bus + size > *hwdev->dma_mask) { 14 if (hwdev && bus + size > *hwdev->dma_mask) {
15 printk(KERN_ERR 15 if (*hwdev->dma_mask >= 0xffffffffULL)
16 "nommu_%s: overflow %Lx+%lu of device mask %Lx\n", 16 printk(KERN_ERR
17 name, (long long)bus, size, (long long)*hwdev->dma_mask); 17 "nommu_%s: overflow %Lx+%lu of device mask %Lx\n",
18 name, (long long)bus, size, (long long)*hwdev->dma_mask);
18 return 0; 19 return 0;
19 } 20 }
20 return 1; 21 return 1;
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 6b87268c5c2e..cea335e8746c 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -102,6 +102,8 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
102{ 102{
103 if (regs->eflags & X86_EFLAGS_IF) 103 if (regs->eflags & X86_EFLAGS_IF)
104 local_irq_disable(); 104 local_irq_disable();
105 /* Make sure to not schedule here because we could be running
106 on an exception stack. */
105 preempt_enable_no_resched(); 107 preempt_enable_no_resched();
106} 108}
107 109
@@ -483,8 +485,6 @@ static void __kprobes do_trap(int trapnr, int signr, char *str,
483{ 485{
484 struct task_struct *tsk = current; 486 struct task_struct *tsk = current;
485 487
486 conditional_sti(regs);
487
488 tsk->thread.error_code = error_code; 488 tsk->thread.error_code = error_code;
489 tsk->thread.trap_no = trapnr; 489 tsk->thread.trap_no = trapnr;
490 490
@@ -521,6 +521,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
521 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ 521 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
522 == NOTIFY_STOP) \ 522 == NOTIFY_STOP) \
523 return; \ 523 return; \
524 conditional_sti(regs); \
524 do_trap(trapnr, signr, str, regs, error_code, NULL); \ 525 do_trap(trapnr, signr, str, regs, error_code, NULL); \
525} 526}
526 527
@@ -535,6 +536,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
535 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ 536 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
536 == NOTIFY_STOP) \ 537 == NOTIFY_STOP) \
537 return; \ 538 return; \
539 conditional_sti(regs); \
538 do_trap(trapnr, signr, str, regs, error_code, &info); \ 540 do_trap(trapnr, signr, str, regs, error_code, &info); \
539} 541}
540 542
@@ -548,7 +550,17 @@ DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
548DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) 550DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
549DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) 551DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
550DO_ERROR(18, SIGSEGV, "reserved", reserved) 552DO_ERROR(18, SIGSEGV, "reserved", reserved)
551DO_ERROR(12, SIGBUS, "stack segment", stack_segment) 553
554/* Runs on IST stack */
555asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
556{
557 if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
558 12, SIGBUS) == NOTIFY_STOP)
559 return;
560 preempt_conditional_sti(regs);
561 do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
562 preempt_conditional_cli(regs);
563}
552 564
553asmlinkage void do_double_fault(struct pt_regs * regs, long error_code) 565asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
554{ 566{
@@ -682,8 +694,9 @@ asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
682 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) { 694 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
683 return; 695 return;
684 } 696 }
697 preempt_conditional_sti(regs);
685 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); 698 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
686 return; 699 preempt_conditional_cli(regs);
687} 700}
688 701
689/* Help handler running on IST stack to switch back to user stack 702/* Help handler running on IST stack to switch back to user stack
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 15ae9fcd65a7..e1513532df29 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -34,7 +34,10 @@ static nodemask_t nodes_found __initdata;
34static struct bootnode nodes[MAX_NUMNODES] __initdata; 34static struct bootnode nodes[MAX_NUMNODES] __initdata;
35static struct bootnode nodes_add[MAX_NUMNODES] __initdata; 35static struct bootnode nodes_add[MAX_NUMNODES] __initdata;
36static int found_add_area __initdata; 36static int found_add_area __initdata;
37int hotadd_percent __initdata = 10; 37int hotadd_percent __initdata = 0;
38#ifndef RESERVE_HOTADD
39#define hotadd_percent 0 /* Ignore all settings */
40#endif
38static u8 pxm2node[256] = { [0 ... 255] = 0xff }; 41static u8 pxm2node[256] = { [0 ... 255] = 0xff };
39 42
40/* Too small nodes confuse the VM badly. Usually they result 43/* Too small nodes confuse the VM badly. Usually they result
@@ -103,6 +106,7 @@ static __init void bad_srat(void)
103 int i; 106 int i;
104 printk(KERN_ERR "SRAT: SRAT not used.\n"); 107 printk(KERN_ERR "SRAT: SRAT not used.\n");
105 acpi_numa = -1; 108 acpi_numa = -1;
109 found_add_area = 0;
106 for (i = 0; i < MAX_LOCAL_APIC; i++) 110 for (i = 0; i < MAX_LOCAL_APIC; i++)
107 apicid_to_node[i] = NUMA_NO_NODE; 111 apicid_to_node[i] = NUMA_NO_NODE;
108 for (i = 0; i < MAX_NUMNODES; i++) 112 for (i = 0; i < MAX_NUMNODES; i++)
@@ -154,7 +158,8 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
154 int pxm, node; 158 int pxm, node;
155 if (srat_disabled()) 159 if (srat_disabled())
156 return; 160 return;
157 if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { bad_srat(); 161 if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) {
162 bad_srat();
158 return; 163 return;
159 } 164 }
160 if (pa->flags.enabled == 0) 165 if (pa->flags.enabled == 0)
@@ -191,15 +196,17 @@ static int hotadd_enough_memory(struct bootnode *nd)
191 allowed = (end_pfn - e820_hole_size(0, end_pfn)) * PAGE_SIZE; 196 allowed = (end_pfn - e820_hole_size(0, end_pfn)) * PAGE_SIZE;
192 allowed = (allowed / 100) * hotadd_percent; 197 allowed = (allowed / 100) * hotadd_percent;
193 if (allocated + mem > allowed) { 198 if (allocated + mem > allowed) {
199 unsigned long range;
194 /* Give them at least part of their hotadd memory upto hotadd_percent 200 /* Give them at least part of their hotadd memory upto hotadd_percent
195 It would be better to spread the limit out 201 It would be better to spread the limit out
196 over multiple hotplug areas, but that is too complicated 202 over multiple hotplug areas, but that is too complicated
197 right now */ 203 right now */
198 if (allocated >= allowed) 204 if (allocated >= allowed)
199 return 0; 205 return 0;
200 pages = (allowed - allocated + mem) / sizeof(struct page); 206 range = allowed - allocated;
207 pages = (range / PAGE_SIZE);
201 mem = pages * sizeof(struct page); 208 mem = pages * sizeof(struct page);
202 nd->end = nd->start + pages*PAGE_SIZE; 209 nd->end = nd->start + range;
203 } 210 }
204 /* Not completely fool proof, but a good sanity check */ 211 /* Not completely fool proof, but a good sanity check */
205 addr = find_e820_area(last_area_end, end_pfn<<PAGE_SHIFT, mem); 212 addr = find_e820_area(last_area_end, end_pfn<<PAGE_SHIFT, mem);
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 3d306681919e..d8233e0b7899 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -650,9 +650,11 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
650 650
651 /* Hardware bug work-around, the chip is unable to do PCI DMA 651 /* Hardware bug work-around, the chip is unable to do PCI DMA
652 to/from anything above 1GB :-( */ 652 to/from anything above 1GB :-( */
653 if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) { 653 if (dma_mapping_error(mapping) ||
654 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
654 /* Sigh... */ 655 /* Sigh... */
655 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); 656 if (!dma_mapping_error(mapping))
657 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
656 dev_kfree_skb_any(skb); 658 dev_kfree_skb_any(skb);
657 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA); 659 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
658 if (skb == NULL) 660 if (skb == NULL)
@@ -660,8 +662,10 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
660 mapping = pci_map_single(bp->pdev, skb->data, 662 mapping = pci_map_single(bp->pdev, skb->data,
661 RX_PKT_BUF_SZ, 663 RX_PKT_BUF_SZ,
662 PCI_DMA_FROMDEVICE); 664 PCI_DMA_FROMDEVICE);
663 if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) { 665 if (dma_mapping_error(mapping) ||
664 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); 666 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
667 if (!dma_mapping_error(mapping))
668 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
665 dev_kfree_skb_any(skb); 669 dev_kfree_skb_any(skb);
666 return -ENOMEM; 670 return -ENOMEM;
667 } 671 }
@@ -967,9 +971,10 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
967 } 971 }
968 972
969 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); 973 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
970 if (mapping + len > B44_DMA_MASK) { 974 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
971 /* Chip can't handle DMA to/from >1GB, use bounce buffer */ 975 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
972 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE); 976 if (!dma_mapping_error(mapping))
977 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
973 978
974 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ, 979 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
975 GFP_ATOMIC|GFP_DMA); 980 GFP_ATOMIC|GFP_DMA);
@@ -978,8 +983,9 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
978 983
979 mapping = pci_map_single(bp->pdev, bounce_skb->data, 984 mapping = pci_map_single(bp->pdev, bounce_skb->data,
980 len, PCI_DMA_TODEVICE); 985 len, PCI_DMA_TODEVICE);
981 if (mapping + len > B44_DMA_MASK) { 986 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
982 pci_unmap_single(bp->pdev, mapping, 987 if (!dma_mapping_error(mapping))
988 pci_unmap_single(bp->pdev, mapping,
983 len, PCI_DMA_TODEVICE); 989 len, PCI_DMA_TODEVICE);
984 dev_kfree_skb_any(bounce_skb); 990 dev_kfree_skb_any(bounce_skb);
985 goto err_out; 991 goto err_out;
@@ -1203,7 +1209,8 @@ static int b44_alloc_consistent(struct b44 *bp)
1203 DMA_TABLE_BYTES, 1209 DMA_TABLE_BYTES,
1204 DMA_BIDIRECTIONAL); 1210 DMA_BIDIRECTIONAL);
1205 1211
1206 if (rx_ring_dma + size > B44_DMA_MASK) { 1212 if (dma_mapping_error(rx_ring_dma) ||
1213 rx_ring_dma + size > B44_DMA_MASK) {
1207 kfree(rx_ring); 1214 kfree(rx_ring);
1208 goto out_err; 1215 goto out_err;
1209 } 1216 }
@@ -1229,7 +1236,8 @@ static int b44_alloc_consistent(struct b44 *bp)
1229 DMA_TABLE_BYTES, 1236 DMA_TABLE_BYTES,
1230 DMA_TO_DEVICE); 1237 DMA_TO_DEVICE);
1231 1238
1232 if (tx_ring_dma + size > B44_DMA_MASK) { 1239 if (dma_mapping_error(tx_ring_dma) ||
1240 tx_ring_dma + size > B44_DMA_MASK) {
1233 kfree(tx_ring); 1241 kfree(tx_ring);
1234 goto out_err; 1242 goto out_err;
1235 } 1243 }
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index aeb8153ccf24..17839e753e4c 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -1907,9 +1907,12 @@ uart_set_options(struct uart_port *port, struct console *co,
1907static void uart_change_pm(struct uart_state *state, int pm_state) 1907static void uart_change_pm(struct uart_state *state, int pm_state)
1908{ 1908{
1909 struct uart_port *port = state->port; 1909 struct uart_port *port = state->port;
1910 if (port->ops->pm) 1910
1911 port->ops->pm(port, pm_state, state->pm_state); 1911 if (state->pm_state != pm_state) {
1912 state->pm_state = pm_state; 1912 if (port->ops->pm)
1913 port->ops->pm(port, pm_state, state->pm_state);
1914 state->pm_state = pm_state;
1915 }
1913} 1916}
1914 1917
1915int uart_suspend_port(struct uart_driver *drv, struct uart_port *port) 1918int uart_suspend_port(struct uart_driver *drv, struct uart_port *port)
diff --git a/mm/slab.c b/mm/slab.c
index b1d643b5238d..d31a06bfbea5 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2200,11 +2200,14 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
2200 check_irq_on(); 2200 check_irq_on();
2201 for_each_online_node(node) { 2201 for_each_online_node(node) {
2202 l3 = cachep->nodelists[node]; 2202 l3 = cachep->nodelists[node];
2203 if (l3) { 2203 if (l3 && l3->alien)
2204 drain_alien_cache(cachep, l3->alien);
2205 }
2206
2207 for_each_online_node(node) {
2208 l3 = cachep->nodelists[node];
2209 if (l3)
2204 drain_array(cachep, l3, l3->shared, 1, node); 2210 drain_array(cachep, l3, l3->shared, 1, node);
2205 if (l3->alien)
2206 drain_alien_cache(cachep, l3->alien);
2207 }
2208 } 2211 }
2209} 2212}
2210 2213