diff options
| -rw-r--r-- | arch/x86/include/asm/apic.h | 2 | ||||
| -rw-r--r-- | arch/x86/include/asm/cpufeature.h | 1 | ||||
| -rw-r--r-- | arch/x86/include/asm/i387.h | 12 | ||||
| -rw-r--r-- | arch/x86/include/asm/uv/uv_bau.h | 2 | ||||
| -rw-r--r-- | arch/x86/include/asm/uv/uv_hub.h | 6 | ||||
| -rw-r--r-- | arch/x86/kernel/apic/x2apic_uv_x.c | 15 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/amd.c | 10 | ||||
| -rw-r--r-- | arch/x86/kernel/reboot.c | 9 | ||||
| -rw-r--r-- | arch/x86/kernel/tlb_uv.c | 15 | ||||
| -rw-r--r-- | arch/x86/kernel/vm86_32.c | 9 | ||||
| -rw-r--r-- | arch/x86/mm/memtest.c | 14 |
11 files changed, 66 insertions, 29 deletions
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 229d0be184a2..bb7d47925847 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
| @@ -402,7 +402,7 @@ static inline unsigned default_get_apic_id(unsigned long x) | |||
| 402 | { | 402 | { |
| 403 | unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); | 403 | unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); |
| 404 | 404 | ||
| 405 | if (APIC_XAPIC(ver)) | 405 | if (APIC_XAPIC(ver) || boot_cpu_has(X86_FEATURE_EXTD_APICID)) |
| 406 | return (x >> 24) & 0xFF; | 406 | return (x >> 24) & 0xFF; |
| 407 | else | 407 | else |
| 408 | return (x >> 24) & 0x0F; | 408 | return (x >> 24) & 0x0F; |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 13cc6a503a02..19af42138f78 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
| @@ -94,6 +94,7 @@ | |||
| 94 | #define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */ | 94 | #define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */ |
| 95 | #define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */ | 95 | #define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */ |
| 96 | #define X86_FEATURE_CLFLUSH_MONITOR (3*32+25) /* "" clflush reqd with monitor */ | 96 | #define X86_FEATURE_CLFLUSH_MONITOR (3*32+25) /* "" clflush reqd with monitor */ |
| 97 | #define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */ | ||
| 97 | 98 | ||
| 98 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | 99 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
| 99 | #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ | 100 | #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ |
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 63d185087d91..175adf58dd4f 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
| @@ -304,18 +304,18 @@ static inline void kernel_fpu_end(void) | |||
| 304 | /* | 304 | /* |
| 305 | * Some instructions like VIA's padlock instructions generate a spurious | 305 | * Some instructions like VIA's padlock instructions generate a spurious |
| 306 | * DNA fault but don't modify SSE registers. And these instructions | 306 | * DNA fault but don't modify SSE registers. And these instructions |
| 307 | * get used from interrupt context aswell. To prevent these kernel instructions | 307 | * get used from interrupt context as well. To prevent these kernel instructions |
| 308 | * in interrupt context interact wrongly with other user/kernel fpu usage, we | 308 | * in interrupt context interacting wrongly with other user/kernel fpu usage, we |
| 309 | * should use them only in the context of irq_ts_save/restore() | 309 | * should use them only in the context of irq_ts_save/restore() |
| 310 | */ | 310 | */ |
| 311 | static inline int irq_ts_save(void) | 311 | static inline int irq_ts_save(void) |
| 312 | { | 312 | { |
| 313 | /* | 313 | /* |
| 314 | * If we are in process context, we are ok to take a spurious DNA fault. | 314 | * If in process context and not atomic, we can take a spurious DNA fault. |
| 315 | * Otherwise, doing clts() in process context require pre-emption to | 315 | * Otherwise, doing clts() in process context requires disabling preemption |
| 316 | * be disabled or some heavy lifting like kernel_fpu_begin() | 316 | * or some heavy lifting like kernel_fpu_begin() |
| 317 | */ | 317 | */ |
| 318 | if (!in_interrupt()) | 318 | if (!in_atomic()) |
| 319 | return 0; | 319 | return 0; |
| 320 | 320 | ||
| 321 | if (read_cr0() & X86_CR0_TS) { | 321 | if (read_cr0() & X86_CR0_TS) { |
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index 9b0e61bf7a88..bddd44f2f0ab 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h | |||
| @@ -37,7 +37,7 @@ | |||
| 37 | #define UV_CPUS_PER_ACT_STATUS 32 | 37 | #define UV_CPUS_PER_ACT_STATUS 32 |
| 38 | #define UV_ACT_STATUS_MASK 0x3 | 38 | #define UV_ACT_STATUS_MASK 0x3 |
| 39 | #define UV_ACT_STATUS_SIZE 2 | 39 | #define UV_ACT_STATUS_SIZE 2 |
| 40 | #define UV_ACTIVATION_DESCRIPTOR_SIZE 32 | 40 | #define UV_ADP_SIZE 32 |
| 41 | #define UV_DISTRIBUTION_SIZE 256 | 41 | #define UV_DISTRIBUTION_SIZE 256 |
| 42 | #define UV_SW_ACK_NPENDING 8 | 42 | #define UV_SW_ACK_NPENDING 8 |
| 43 | #define UV_NET_ENDPOINT_INTD 0x38 | 43 | #define UV_NET_ENDPOINT_INTD 0x38 |
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index d3a98ea1062e..341070f7ad5c 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h | |||
| @@ -133,6 +133,7 @@ struct uv_scir_s { | |||
| 133 | struct uv_hub_info_s { | 133 | struct uv_hub_info_s { |
| 134 | unsigned long global_mmr_base; | 134 | unsigned long global_mmr_base; |
| 135 | unsigned long gpa_mask; | 135 | unsigned long gpa_mask; |
| 136 | unsigned int gnode_extra; | ||
| 136 | unsigned long gnode_upper; | 137 | unsigned long gnode_upper; |
| 137 | unsigned long lowmem_remap_top; | 138 | unsigned long lowmem_remap_top; |
| 138 | unsigned long lowmem_remap_base; | 139 | unsigned long lowmem_remap_base; |
| @@ -159,7 +160,8 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); | |||
| 159 | * p - PNODE (local part of nsids, right shifted 1) | 160 | * p - PNODE (local part of nsids, right shifted 1) |
| 160 | */ | 161 | */ |
| 161 | #define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask) | 162 | #define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask) |
| 162 | #define UV_PNODE_TO_NASID(p) (((p) << 1) | uv_hub_info->gnode_upper) | 163 | #define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra) |
| 164 | #define UV_PNODE_TO_NASID(p) (UV_PNODE_TO_GNODE(p) << 1) | ||
| 163 | 165 | ||
| 164 | #define UV_LOCAL_MMR_BASE 0xf4000000UL | 166 | #define UV_LOCAL_MMR_BASE 0xf4000000UL |
| 165 | #define UV_GLOBAL_MMR32_BASE 0xf8000000UL | 167 | #define UV_GLOBAL_MMR32_BASE 0xf8000000UL |
| @@ -173,7 +175,7 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); | |||
| 173 | #define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT)) | 175 | #define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT)) |
| 174 | 176 | ||
| 175 | #define UV_GLOBAL_MMR64_PNODE_BITS(p) \ | 177 | #define UV_GLOBAL_MMR64_PNODE_BITS(p) \ |
| 176 | ((unsigned long)(p) << UV_GLOBAL_MMR64_PNODE_SHIFT) | 178 | ((unsigned long)(UV_PNODE_TO_GNODE(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT) |
| 177 | 179 | ||
| 178 | #define UV_APIC_PNODE_SHIFT 6 | 180 | #define UV_APIC_PNODE_SHIFT 6 |
| 179 | 181 | ||
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 780a733a5e7a..ef0ae207a7c8 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
| @@ -562,7 +562,7 @@ void __init uv_system_init(void) | |||
| 562 | union uvh_node_id_u node_id; | 562 | union uvh_node_id_u node_id; |
| 563 | unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; | 563 | unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; |
| 564 | int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; | 564 | int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; |
| 565 | int max_pnode = 0; | 565 | int gnode_extra, max_pnode = 0; |
| 566 | unsigned long mmr_base, present, paddr; | 566 | unsigned long mmr_base, present, paddr; |
| 567 | unsigned short pnode_mask; | 567 | unsigned short pnode_mask; |
| 568 | 568 | ||
| @@ -574,6 +574,13 @@ void __init uv_system_init(void) | |||
| 574 | mmr_base = | 574 | mmr_base = |
| 575 | uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & | 575 | uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & |
| 576 | ~UV_MMR_ENABLE; | 576 | ~UV_MMR_ENABLE; |
| 577 | pnode_mask = (1 << n_val) - 1; | ||
| 578 | node_id.v = uv_read_local_mmr(UVH_NODE_ID); | ||
| 579 | gnode_extra = (node_id.s.node_id & ~((1 << n_val) - 1)) >> 1; | ||
| 580 | gnode_upper = ((unsigned long)gnode_extra << m_val); | ||
| 581 | printk(KERN_DEBUG "UV: N %d, M %d, gnode_upper 0x%lx, gnode_extra 0x%x\n", | ||
| 582 | n_val, m_val, gnode_upper, gnode_extra); | ||
| 583 | |||
| 577 | printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); | 584 | printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); |
| 578 | 585 | ||
| 579 | for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) | 586 | for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) |
| @@ -610,11 +617,6 @@ void __init uv_system_init(void) | |||
| 610 | } | 617 | } |
| 611 | } | 618 | } |
| 612 | 619 | ||
| 613 | pnode_mask = (1 << n_val) - 1; | ||
| 614 | node_id.v = uv_read_local_mmr(UVH_NODE_ID); | ||
| 615 | gnode_upper = (((unsigned long)node_id.s.node_id) & | ||
| 616 | ~((1 << n_val) - 1)) << m_val; | ||
| 617 | |||
| 618 | uv_bios_init(); | 620 | uv_bios_init(); |
| 619 | uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, | 621 | uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, |
| 620 | &sn_coherency_id, &sn_region_size); | 622 | &sn_coherency_id, &sn_region_size); |
| @@ -637,6 +639,7 @@ void __init uv_system_init(void) | |||
| 637 | uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask; | 639 | uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask; |
| 638 | uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; | 640 | uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; |
| 639 | uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; | 641 | uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; |
| 642 | uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra; | ||
| 640 | uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; | 643 | uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; |
| 641 | uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id; | 644 | uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id; |
| 642 | uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu; | 645 | uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu; |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 728b3750a3e8..e5b27d8f1b47 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include <asm/processor.h> | 6 | #include <asm/processor.h> |
| 7 | #include <asm/apic.h> | 7 | #include <asm/apic.h> |
| 8 | #include <asm/cpu.h> | 8 | #include <asm/cpu.h> |
| 9 | #include <asm/pci-direct.h> | ||
| 9 | 10 | ||
| 10 | #ifdef CONFIG_X86_64 | 11 | #ifdef CONFIG_X86_64 |
| 11 | # include <asm/numa_64.h> | 12 | # include <asm/numa_64.h> |
| @@ -351,6 +352,15 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | |||
| 351 | (c->x86_model == 8 && c->x86_mask >= 8)) | 352 | (c->x86_model == 8 && c->x86_mask >= 8)) |
| 352 | set_cpu_cap(c, X86_FEATURE_K6_MTRR); | 353 | set_cpu_cap(c, X86_FEATURE_K6_MTRR); |
| 353 | #endif | 354 | #endif |
| 355 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) | ||
| 356 | /* check CPU config space for extended APIC ID */ | ||
| 357 | if (c->x86 >= 0xf) { | ||
| 358 | unsigned int val; | ||
| 359 | val = read_pci_config(0, 24, 0, 0x68); | ||
| 360 | if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18))) | ||
| 361 | set_cpu_cap(c, X86_FEATURE_EXTD_APICID); | ||
| 362 | } | ||
| 363 | #endif | ||
| 354 | } | 364 | } |
| 355 | 365 | ||
| 356 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) | 366 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 667188e0b5a0..d2d1ce8170f0 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
| @@ -192,6 +192,15 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
| 192 | DMI_MATCH(DMI_BOARD_NAME, "0KP561"), | 192 | DMI_MATCH(DMI_BOARD_NAME, "0KP561"), |
| 193 | }, | 193 | }, |
| 194 | }, | 194 | }, |
| 195 | { /* Handle problems with rebooting on Dell Optiplex 360 with 0T656F */ | ||
| 196 | .callback = set_bios_reboot, | ||
| 197 | .ident = "Dell OptiPlex 360", | ||
| 198 | .matches = { | ||
| 199 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
| 200 | DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 360"), | ||
| 201 | DMI_MATCH(DMI_BOARD_NAME, "0T656F"), | ||
| 202 | }, | ||
| 203 | }, | ||
| 195 | { /* Handle problems with rebooting on Dell 2400's */ | 204 | { /* Handle problems with rebooting on Dell 2400's */ |
| 196 | .callback = set_bios_reboot, | 205 | .callback = set_bios_reboot, |
| 197 | .ident = "Dell PowerEdge 2400", | 206 | .ident = "Dell PowerEdge 2400", |
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index 8c7b03b0cfcb..124d40c575df 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
| @@ -715,7 +715,12 @@ uv_activation_descriptor_init(int node, int pnode) | |||
| 715 | struct bau_desc *adp; | 715 | struct bau_desc *adp; |
| 716 | struct bau_desc *ad2; | 716 | struct bau_desc *ad2; |
| 717 | 717 | ||
| 718 | adp = (struct bau_desc *)kmalloc_node(16384, GFP_KERNEL, node); | 718 | /* |
| 719 | * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR) | ||
| 720 | * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per blade | ||
| 721 | */ | ||
| 722 | adp = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)* | ||
| 723 | UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node); | ||
| 719 | BUG_ON(!adp); | 724 | BUG_ON(!adp); |
| 720 | 725 | ||
| 721 | pa = uv_gpa(adp); /* need the real nasid*/ | 726 | pa = uv_gpa(adp); /* need the real nasid*/ |
| @@ -729,7 +734,13 @@ uv_activation_descriptor_init(int node, int pnode) | |||
| 729 | (n << UV_DESC_BASE_PNODE_SHIFT | m)); | 734 | (n << UV_DESC_BASE_PNODE_SHIFT | m)); |
| 730 | } | 735 | } |
| 731 | 736 | ||
| 732 | for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) { | 737 | /* |
| 738 | * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each | ||
| 739 | * cpu even though we only use the first one; one descriptor can | ||
| 740 | * describe a broadcast to 256 nodes. | ||
| 741 | */ | ||
| 742 | for (i = 0, ad2 = adp; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR); | ||
| 743 | i++, ad2++) { | ||
| 733 | memset(ad2, 0, sizeof(struct bau_desc)); | 744 | memset(ad2, 0, sizeof(struct bau_desc)); |
| 734 | ad2->header.sw_ack_flag = 1; | 745 | ad2->header.sw_ack_flag = 1; |
| 735 | /* | 746 | /* |
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index b8035a0f4048..9c4e62539058 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c | |||
| @@ -287,10 +287,9 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk | |||
| 287 | info->regs.pt.ds = 0; | 287 | info->regs.pt.ds = 0; |
| 288 | info->regs.pt.es = 0; | 288 | info->regs.pt.es = 0; |
| 289 | info->regs.pt.fs = 0; | 289 | info->regs.pt.fs = 0; |
| 290 | 290 | #ifndef CONFIG_X86_32_LAZY_GS | |
| 291 | /* we are clearing gs later just before "jmp resume_userspace", | 291 | info->regs.pt.gs = 0; |
| 292 | * because it is not saved/restored. | 292 | #endif |
| 293 | */ | ||
| 294 | 293 | ||
| 295 | /* | 294 | /* |
| 296 | * The flags register is also special: we cannot trust that the user | 295 | * The flags register is also special: we cannot trust that the user |
| @@ -343,7 +342,9 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk | |||
| 343 | __asm__ __volatile__( | 342 | __asm__ __volatile__( |
| 344 | "movl %0,%%esp\n\t" | 343 | "movl %0,%%esp\n\t" |
| 345 | "movl %1,%%ebp\n\t" | 344 | "movl %1,%%ebp\n\t" |
| 345 | #ifdef CONFIG_X86_32_LAZY_GS | ||
| 346 | "mov %2, %%gs\n\t" | 346 | "mov %2, %%gs\n\t" |
| 347 | #endif | ||
| 347 | "jmp resume_userspace" | 348 | "jmp resume_userspace" |
| 348 | : /* no outputs */ | 349 | : /* no outputs */ |
| 349 | :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0)); | 350 | :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0)); |
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c index 605c8be06217..c0bedcd10f97 100644 --- a/arch/x86/mm/memtest.c +++ b/arch/x86/mm/memtest.c | |||
| @@ -40,23 +40,23 @@ static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad) | |||
| 40 | 40 | ||
| 41 | static void __init memtest(u64 pattern, u64 start_phys, u64 size) | 41 | static void __init memtest(u64 pattern, u64 start_phys, u64 size) |
| 42 | { | 42 | { |
| 43 | u64 i, count; | 43 | u64 *p; |
| 44 | u64 *start; | 44 | void *start, *end; |
| 45 | u64 start_bad, last_bad; | 45 | u64 start_bad, last_bad; |
| 46 | u64 start_phys_aligned; | 46 | u64 start_phys_aligned; |
| 47 | size_t incr; | 47 | size_t incr; |
| 48 | 48 | ||
| 49 | incr = sizeof(pattern); | 49 | incr = sizeof(pattern); |
| 50 | start_phys_aligned = ALIGN(start_phys, incr); | 50 | start_phys_aligned = ALIGN(start_phys, incr); |
| 51 | count = (size - (start_phys_aligned - start_phys))/incr; | ||
| 52 | start = __va(start_phys_aligned); | 51 | start = __va(start_phys_aligned); |
| 52 | end = start + size - (start_phys_aligned - start_phys); | ||
| 53 | start_bad = 0; | 53 | start_bad = 0; |
| 54 | last_bad = 0; | 54 | last_bad = 0; |
| 55 | 55 | ||
| 56 | for (i = 0; i < count; i++) | 56 | for (p = start; p < end; p++) |
| 57 | start[i] = pattern; | 57 | *p = pattern; |
| 58 | for (i = 0; i < count; i++, start++, start_phys_aligned += incr) { | 58 | for (p = start; p < end; p++, start_phys_aligned += incr) { |
| 59 | if (*start == pattern) | 59 | if (*p == pattern) |
| 60 | continue; | 60 | continue; |
| 61 | if (start_phys_aligned == last_bad + incr) { | 61 | if (start_phys_aligned == last_bad + incr) { |
| 62 | last_bad += incr; | 62 | last_bad += incr; |
