aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/platform
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/platform')
-rw-r--r--arch/x86/platform/ce4100/falconfalls.dts6
-rw-r--r--arch/x86/platform/efi/efi.c78
-rw-r--r--arch/x86/platform/efi/efi_64.c34
-rw-r--r--arch/x86/platform/mrst/mrst.c4
-rw-r--r--arch/x86/platform/uv/tlb_uv.c92
-rw-r--r--arch/x86/platform/uv/uv_time.c6
6 files changed, 135 insertions, 85 deletions
diff --git a/arch/x86/platform/ce4100/falconfalls.dts b/arch/x86/platform/ce4100/falconfalls.dts
index 2d6d226f2b10..e70be38ce039 100644
--- a/arch/x86/platform/ce4100/falconfalls.dts
+++ b/arch/x86/platform/ce4100/falconfalls.dts
@@ -347,7 +347,7 @@
347 "pciclass0c03"; 347 "pciclass0c03";
348 348
349 reg = <0x16800 0x0 0x0 0x0 0x0>; 349 reg = <0x16800 0x0 0x0 0x0 0x0>;
350 interrupts = <22 3>; 350 interrupts = <22 1>;
351 }; 351 };
352 352
353 usb@d,1 { 353 usb@d,1 {
@@ -357,7 +357,7 @@
357 "pciclass0c03"; 357 "pciclass0c03";
358 358
359 reg = <0x16900 0x0 0x0 0x0 0x0>; 359 reg = <0x16900 0x0 0x0 0x0 0x0>;
360 interrupts = <22 3>; 360 interrupts = <22 1>;
361 }; 361 };
362 362
363 sata@e,0 { 363 sata@e,0 {
@@ -367,7 +367,7 @@
367 "pciclass0106"; 367 "pciclass0106";
368 368
369 reg = <0x17000 0x0 0x0 0x0 0x0>; 369 reg = <0x17000 0x0 0x0 0x0 0x0>;
370 interrupts = <23 3>; 370 interrupts = <23 1>;
371 }; 371 };
372 372
373 flash@f,0 { 373 flash@f,0 {
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 0fe27d7c6258..b30aa26a8df2 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -145,17 +145,6 @@ static void virt_efi_reset_system(int reset_type,
145 data_size, data); 145 data_size, data);
146} 146}
147 147
148static efi_status_t virt_efi_set_virtual_address_map(
149 unsigned long memory_map_size,
150 unsigned long descriptor_size,
151 u32 descriptor_version,
152 efi_memory_desc_t *virtual_map)
153{
154 return efi_call_virt4(set_virtual_address_map,
155 memory_map_size, descriptor_size,
156 descriptor_version, virtual_map);
157}
158
159static efi_status_t __init phys_efi_set_virtual_address_map( 148static efi_status_t __init phys_efi_set_virtual_address_map(
160 unsigned long memory_map_size, 149 unsigned long memory_map_size,
161 unsigned long descriptor_size, 150 unsigned long descriptor_size,
@@ -468,11 +457,25 @@ void __init efi_init(void)
468#endif 457#endif
469} 458}
470 459
460void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
461{
462 u64 addr, npages;
463
464 addr = md->virt_addr;
465 npages = md->num_pages;
466
467 memrange_efi_to_native(&addr, &npages);
468
469 if (executable)
470 set_memory_x(addr, npages);
471 else
472 set_memory_nx(addr, npages);
473}
474
471static void __init runtime_code_page_mkexec(void) 475static void __init runtime_code_page_mkexec(void)
472{ 476{
473 efi_memory_desc_t *md; 477 efi_memory_desc_t *md;
474 void *p; 478 void *p;
475 u64 addr, npages;
476 479
477 /* Make EFI runtime service code area executable */ 480 /* Make EFI runtime service code area executable */
478 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 481 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
@@ -481,10 +484,7 @@ static void __init runtime_code_page_mkexec(void)
481 if (md->type != EFI_RUNTIME_SERVICES_CODE) 484 if (md->type != EFI_RUNTIME_SERVICES_CODE)
482 continue; 485 continue;
483 486
484 addr = md->virt_addr; 487 efi_set_executable(md, true);
485 npages = md->num_pages;
486 memrange_efi_to_native(&addr, &npages);
487 set_memory_x(addr, npages);
488 } 488 }
489} 489}
490 490
@@ -498,13 +498,42 @@ static void __init runtime_code_page_mkexec(void)
498 */ 498 */
499void __init efi_enter_virtual_mode(void) 499void __init efi_enter_virtual_mode(void)
500{ 500{
501 efi_memory_desc_t *md; 501 efi_memory_desc_t *md, *prev_md = NULL;
502 efi_status_t status; 502 efi_status_t status;
503 unsigned long size; 503 unsigned long size;
504 u64 end, systab, addr, npages, end_pfn; 504 u64 end, systab, addr, npages, end_pfn;
505 void *p, *va; 505 void *p, *va, *new_memmap = NULL;
506 int count = 0;
506 507
507 efi.systab = NULL; 508 efi.systab = NULL;
509
510 /* Merge contiguous regions of the same type and attribute */
511 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
512 u64 prev_size;
513 md = p;
514
515 if (!prev_md) {
516 prev_md = md;
517 continue;
518 }
519
520 if (prev_md->type != md->type ||
521 prev_md->attribute != md->attribute) {
522 prev_md = md;
523 continue;
524 }
525
526 prev_size = prev_md->num_pages << EFI_PAGE_SHIFT;
527
528 if (md->phys_addr == (prev_md->phys_addr + prev_size)) {
529 prev_md->num_pages += md->num_pages;
530 md->type = EFI_RESERVED_TYPE;
531 md->attribute = 0;
532 continue;
533 }
534 prev_md = md;
535 }
536
508 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 537 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
509 md = p; 538 md = p;
510 if (!(md->attribute & EFI_MEMORY_RUNTIME)) 539 if (!(md->attribute & EFI_MEMORY_RUNTIME))
@@ -541,15 +570,21 @@ void __init efi_enter_virtual_mode(void)
541 systab += md->virt_addr - md->phys_addr; 570 systab += md->virt_addr - md->phys_addr;
542 efi.systab = (efi_system_table_t *) (unsigned long) systab; 571 efi.systab = (efi_system_table_t *) (unsigned long) systab;
543 } 572 }
573 new_memmap = krealloc(new_memmap,
574 (count + 1) * memmap.desc_size,
575 GFP_KERNEL);
576 memcpy(new_memmap + (count * memmap.desc_size), md,
577 memmap.desc_size);
578 count++;
544 } 579 }
545 580
546 BUG_ON(!efi.systab); 581 BUG_ON(!efi.systab);
547 582
548 status = phys_efi_set_virtual_address_map( 583 status = phys_efi_set_virtual_address_map(
549 memmap.desc_size * memmap.nr_map, 584 memmap.desc_size * count,
550 memmap.desc_size, 585 memmap.desc_size,
551 memmap.desc_version, 586 memmap.desc_version,
552 memmap.phys_map); 587 (efi_memory_desc_t *)__pa(new_memmap));
553 588
554 if (status != EFI_SUCCESS) { 589 if (status != EFI_SUCCESS) {
555 printk(KERN_ALERT "Unable to switch EFI into virtual mode " 590 printk(KERN_ALERT "Unable to switch EFI into virtual mode "
@@ -572,11 +607,12 @@ void __init efi_enter_virtual_mode(void)
572 efi.set_variable = virt_efi_set_variable; 607 efi.set_variable = virt_efi_set_variable;
573 efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count; 608 efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
574 efi.reset_system = virt_efi_reset_system; 609 efi.reset_system = virt_efi_reset_system;
575 efi.set_virtual_address_map = virt_efi_set_virtual_address_map; 610 efi.set_virtual_address_map = NULL;
576 if (__supported_pte_mask & _PAGE_NX) 611 if (__supported_pte_mask & _PAGE_NX)
577 runtime_code_page_mkexec(); 612 runtime_code_page_mkexec();
578 early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); 613 early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
579 memmap.map = NULL; 614 memmap.map = NULL;
615 kfree(new_memmap);
580} 616}
581 617
582/* 618/*
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index ac0621a7ac3d..2649426a7905 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -41,22 +41,7 @@
41static pgd_t save_pgd __initdata; 41static pgd_t save_pgd __initdata;
42static unsigned long efi_flags __initdata; 42static unsigned long efi_flags __initdata;
43 43
44static void __init early_mapping_set_exec(unsigned long start, 44static void __init early_code_mapping_set_exec(int executable)
45 unsigned long end,
46 int executable)
47{
48 unsigned long num_pages;
49
50 start &= PMD_MASK;
51 end = (end + PMD_SIZE - 1) & PMD_MASK;
52 num_pages = (end - start) >> PAGE_SHIFT;
53 if (executable)
54 set_memory_x((unsigned long)__va(start), num_pages);
55 else
56 set_memory_nx((unsigned long)__va(start), num_pages);
57}
58
59static void __init early_runtime_code_mapping_set_exec(int executable)
60{ 45{
61 efi_memory_desc_t *md; 46 efi_memory_desc_t *md;
62 void *p; 47 void *p;
@@ -67,11 +52,8 @@ static void __init early_runtime_code_mapping_set_exec(int executable)
67 /* Make EFI runtime service code area executable */ 52 /* Make EFI runtime service code area executable */
68 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 53 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
69 md = p; 54 md = p;
70 if (md->type == EFI_RUNTIME_SERVICES_CODE) { 55 if (md->type == EFI_RUNTIME_SERVICES_CODE)
71 unsigned long end; 56 efi_set_executable(md, executable);
72 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
73 early_mapping_set_exec(md->phys_addr, end, executable);
74 }
75 } 57 }
76} 58}
77 59
@@ -79,7 +61,7 @@ void __init efi_call_phys_prelog(void)
79{ 61{
80 unsigned long vaddress; 62 unsigned long vaddress;
81 63
82 early_runtime_code_mapping_set_exec(1); 64 early_code_mapping_set_exec(1);
83 local_irq_save(efi_flags); 65 local_irq_save(efi_flags);
84 vaddress = (unsigned long)__va(0x0UL); 66 vaddress = (unsigned long)__va(0x0UL);
85 save_pgd = *pgd_offset_k(0x0UL); 67 save_pgd = *pgd_offset_k(0x0UL);
@@ -95,7 +77,7 @@ void __init efi_call_phys_epilog(void)
95 set_pgd(pgd_offset_k(0x0UL), save_pgd); 77 set_pgd(pgd_offset_k(0x0UL), save_pgd);
96 __flush_tlb_all(); 78 __flush_tlb_all();
97 local_irq_restore(efi_flags); 79 local_irq_restore(efi_flags);
98 early_runtime_code_mapping_set_exec(0); 80 early_code_mapping_set_exec(0);
99} 81}
100 82
101void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, 83void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
@@ -107,8 +89,10 @@ void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
107 return ioremap(phys_addr, size); 89 return ioremap(phys_addr, size);
108 90
109 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); 91 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
110 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) 92 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
111 return NULL; 93 unsigned long top = last_map_pfn << PAGE_SHIFT;
94 efi_ioremap(top, size - (top - phys_addr), type);
95 }
112 96
113 return (void __iomem *)__va(phys_addr); 97 return (void __iomem *)__va(phys_addr);
114} 98}
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index 275dbc19e2cf..7000e74b3087 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -194,7 +194,7 @@ static unsigned long __init mrst_calibrate_tsc(void)
194 return 0; 194 return 0;
195} 195}
196 196
197void __init mrst_time_init(void) 197static void __init mrst_time_init(void)
198{ 198{
199 sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr); 199 sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr);
200 switch (mrst_timer_options) { 200 switch (mrst_timer_options) {
@@ -216,7 +216,7 @@ void __init mrst_time_init(void)
216 apbt_time_init(); 216 apbt_time_init();
217} 217}
218 218
219void __cpuinit mrst_arch_setup(void) 219static void __cpuinit mrst_arch_setup(void)
220{ 220{
221 if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27) 221 if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27)
222 __mrst_cpu_chip = MRST_CPU_CHIP_PENWELL; 222 __mrst_cpu_chip = MRST_CPU_CHIP_PENWELL;
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 7cb6424317f6..c58e0ea39ef5 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -699,16 +699,17 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
699 struct mm_struct *mm, 699 struct mm_struct *mm,
700 unsigned long va, unsigned int cpu) 700 unsigned long va, unsigned int cpu)
701{ 701{
702 int tcpu;
703 int uvhub;
704 int locals = 0; 702 int locals = 0;
705 int remotes = 0; 703 int remotes = 0;
706 int hubs = 0; 704 int hubs = 0;
705 int tcpu;
706 int tpnode;
707 struct bau_desc *bau_desc; 707 struct bau_desc *bau_desc;
708 struct cpumask *flush_mask; 708 struct cpumask *flush_mask;
709 struct ptc_stats *stat; 709 struct ptc_stats *stat;
710 struct bau_control *bcp; 710 struct bau_control *bcp;
711 struct bau_control *tbcp; 711 struct bau_control *tbcp;
712 struct hub_and_pnode *hpp;
712 713
713 /* kernel was booted 'nobau' */ 714 /* kernel was booted 'nobau' */
714 if (nobau) 715 if (nobau)
@@ -750,11 +751,18 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
750 bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; 751 bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu;
751 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); 752 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
752 753
753 /* cpu statistics */
754 for_each_cpu(tcpu, flush_mask) { 754 for_each_cpu(tcpu, flush_mask) {
755 uvhub = uv_cpu_to_blade_id(tcpu); 755 /*
756 bau_uvhub_set(uvhub, &bau_desc->distribution); 756 * The distribution vector is a bit map of pnodes, relative
757 if (uvhub == bcp->uvhub) 757 * to the partition base pnode (and the partition base nasid
758 * in the header).
759 * Translate cpu to pnode and hub using an array stored
760 * in local memory.
761 */
762 hpp = &bcp->socket_master->target_hub_and_pnode[tcpu];
763 tpnode = hpp->pnode - bcp->partition_base_pnode;
764 bau_uvhub_set(tpnode, &bau_desc->distribution);
765 if (hpp->uvhub == bcp->uvhub)
758 locals++; 766 locals++;
759 else 767 else
760 remotes++; 768 remotes++;
@@ -855,7 +863,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
855 * an interrupt, but causes an error message to be returned to 863 * an interrupt, but causes an error message to be returned to
856 * the sender. 864 * the sender.
857 */ 865 */
858static void uv_enable_timeouts(void) 866static void __init uv_enable_timeouts(void)
859{ 867{
860 int uvhub; 868 int uvhub;
861 int nuvhubs; 869 int nuvhubs;
@@ -1326,10 +1334,10 @@ static int __init uv_ptc_init(void)
1326} 1334}
1327 1335
1328/* 1336/*
1329 * initialize the sending side's sending buffers 1337 * Initialize the sending side's sending buffers.
1330 */ 1338 */
1331static void 1339static void
1332uv_activation_descriptor_init(int node, int pnode) 1340uv_activation_descriptor_init(int node, int pnode, int base_pnode)
1333{ 1341{
1334 int i; 1342 int i;
1335 int cpu; 1343 int cpu;
@@ -1352,11 +1360,11 @@ uv_activation_descriptor_init(int node, int pnode)
1352 n = pa >> uv_nshift; 1360 n = pa >> uv_nshift;
1353 m = pa & uv_mmask; 1361 m = pa & uv_mmask;
1354 1362
1363 /* the 14-bit pnode */
1355 uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, 1364 uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE,
1356 (n << UV_DESC_BASE_PNODE_SHIFT | m)); 1365 (n << UV_DESC_BASE_PNODE_SHIFT | m));
1357
1358 /* 1366 /*
1359 * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each 1367 * Initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
1360 * cpu even though we only use the first one; one descriptor can 1368 * cpu even though we only use the first one; one descriptor can
1361 * describe a broadcast to 256 uv hubs. 1369 * describe a broadcast to 256 uv hubs.
1362 */ 1370 */
@@ -1365,12 +1373,13 @@ uv_activation_descriptor_init(int node, int pnode)
1365 memset(bd2, 0, sizeof(struct bau_desc)); 1373 memset(bd2, 0, sizeof(struct bau_desc));
1366 bd2->header.sw_ack_flag = 1; 1374 bd2->header.sw_ack_flag = 1;
1367 /* 1375 /*
1368 * base_dest_nodeid is the nasid of the first uvhub 1376 * The base_dest_nasid set in the message header is the nasid
1369 * in the partition. The bit map will indicate uvhub numbers, 1377 * of the first uvhub in the partition. The bit map will
1370 * which are 0-N in a partition. Pnodes are unique system-wide. 1378 * indicate destination pnode numbers relative to that base.
1379 * They may not be consecutive if nasid striding is being used.
1371 */ 1380 */
1372 bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode); 1381 bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
1373 bd2->header.dest_subnodeid = 0x10; /* the LB */ 1382 bd2->header.dest_subnodeid = UV_LB_SUBNODEID;
1374 bd2->header.command = UV_NET_ENDPOINT_INTD; 1383 bd2->header.command = UV_NET_ENDPOINT_INTD;
1375 bd2->header.int_both = 1; 1384 bd2->header.int_both = 1;
1376 /* 1385 /*
@@ -1442,7 +1451,7 @@ uv_payload_queue_init(int node, int pnode)
1442/* 1451/*
1443 * Initialization of each UV hub's structures 1452 * Initialization of each UV hub's structures
1444 */ 1453 */
1445static void __init uv_init_uvhub(int uvhub, int vector) 1454static void __init uv_init_uvhub(int uvhub, int vector, int base_pnode)
1446{ 1455{
1447 int node; 1456 int node;
1448 int pnode; 1457 int pnode;
@@ -1450,11 +1459,11 @@ static void __init uv_init_uvhub(int uvhub, int vector)
1450 1459
1451 node = uvhub_to_first_node(uvhub); 1460 node = uvhub_to_first_node(uvhub);
1452 pnode = uv_blade_to_pnode(uvhub); 1461 pnode = uv_blade_to_pnode(uvhub);
1453 uv_activation_descriptor_init(node, pnode); 1462 uv_activation_descriptor_init(node, pnode, base_pnode);
1454 uv_payload_queue_init(node, pnode); 1463 uv_payload_queue_init(node, pnode);
1455 /* 1464 /*
1456 * the below initialization can't be in firmware because the 1465 * The below initialization can't be in firmware because the
1457 * messaging IRQ will be determined by the OS 1466 * messaging IRQ will be determined by the OS.
1458 */ 1467 */
1459 apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits; 1468 apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
1460 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, 1469 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
@@ -1491,10 +1500,11 @@ calculate_destination_timeout(void)
1491/* 1500/*
1492 * initialize the bau_control structure for each cpu 1501 * initialize the bau_control structure for each cpu
1493 */ 1502 */
1494static int __init uv_init_per_cpu(int nuvhubs) 1503static int __init uv_init_per_cpu(int nuvhubs, int base_part_pnode)
1495{ 1504{
1496 int i; 1505 int i;
1497 int cpu; 1506 int cpu;
1507 int tcpu;
1498 int pnode; 1508 int pnode;
1499 int uvhub; 1509 int uvhub;
1500 int have_hmaster; 1510 int have_hmaster;
@@ -1528,6 +1538,15 @@ static int __init uv_init_per_cpu(int nuvhubs)
1528 bcp = &per_cpu(bau_control, cpu); 1538 bcp = &per_cpu(bau_control, cpu);
1529 memset(bcp, 0, sizeof(struct bau_control)); 1539 memset(bcp, 0, sizeof(struct bau_control));
1530 pnode = uv_cpu_hub_info(cpu)->pnode; 1540 pnode = uv_cpu_hub_info(cpu)->pnode;
1541 if ((pnode - base_part_pnode) >= UV_DISTRIBUTION_SIZE) {
1542 printk(KERN_EMERG
1543 "cpu %d pnode %d-%d beyond %d; BAU disabled\n",
1544 cpu, pnode, base_part_pnode,
1545 UV_DISTRIBUTION_SIZE);
1546 return 1;
1547 }
1548 bcp->osnode = cpu_to_node(cpu);
1549 bcp->partition_base_pnode = uv_partition_base_pnode;
1531 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; 1550 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1532 *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8)); 1551 *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
1533 bdp = &uvhub_descs[uvhub]; 1552 bdp = &uvhub_descs[uvhub];
@@ -1536,7 +1555,7 @@ static int __init uv_init_per_cpu(int nuvhubs)
1536 bdp->pnode = pnode; 1555 bdp->pnode = pnode;
1537 /* kludge: 'assuming' one node per socket, and assuming that 1556 /* kludge: 'assuming' one node per socket, and assuming that
1538 disabling a socket just leaves a gap in node numbers */ 1557 disabling a socket just leaves a gap in node numbers */
1539 socket = (cpu_to_node(cpu) & 1); 1558 socket = bcp->osnode & 1;
1540 bdp->socket_mask |= (1 << socket); 1559 bdp->socket_mask |= (1 << socket);
1541 sdp = &bdp->socket[socket]; 1560 sdp = &bdp->socket[socket];
1542 sdp->cpu_number[sdp->num_cpus] = cpu; 1561 sdp->cpu_number[sdp->num_cpus] = cpu;
@@ -1585,6 +1604,20 @@ static int __init uv_init_per_cpu(int nuvhubs)
1585nextsocket: 1604nextsocket:
1586 socket++; 1605 socket++;
1587 socket_mask = (socket_mask >> 1); 1606 socket_mask = (socket_mask >> 1);
1607 /* each socket gets a local array of pnodes/hubs */
1608 bcp = smaster;
1609 bcp->target_hub_and_pnode = kmalloc_node(
1610 sizeof(struct hub_and_pnode) *
1611 num_possible_cpus(), GFP_KERNEL, bcp->osnode);
1612 memset(bcp->target_hub_and_pnode, 0,
1613 sizeof(struct hub_and_pnode) *
1614 num_possible_cpus());
1615 for_each_present_cpu(tcpu) {
1616 bcp->target_hub_and_pnode[tcpu].pnode =
1617 uv_cpu_hub_info(tcpu)->pnode;
1618 bcp->target_hub_and_pnode[tcpu].uvhub =
1619 uv_cpu_hub_info(tcpu)->numa_blade_id;
1620 }
1588 } 1621 }
1589 } 1622 }
1590 kfree(uvhub_descs); 1623 kfree(uvhub_descs);
@@ -1637,21 +1670,22 @@ static int __init uv_bau_init(void)
1637 spin_lock_init(&disable_lock); 1670 spin_lock_init(&disable_lock);
1638 congested_cycles = microsec_2_cycles(congested_response_us); 1671 congested_cycles = microsec_2_cycles(congested_response_us);
1639 1672
1640 if (uv_init_per_cpu(nuvhubs)) {
1641 nobau = 1;
1642 return 0;
1643 }
1644
1645 uv_partition_base_pnode = 0x7fffffff; 1673 uv_partition_base_pnode = 0x7fffffff;
1646 for (uvhub = 0; uvhub < nuvhubs; uvhub++) 1674 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1647 if (uv_blade_nr_possible_cpus(uvhub) && 1675 if (uv_blade_nr_possible_cpus(uvhub) &&
1648 (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode)) 1676 (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode))
1649 uv_partition_base_pnode = uv_blade_to_pnode(uvhub); 1677 uv_partition_base_pnode = uv_blade_to_pnode(uvhub);
1678 }
1679
1680 if (uv_init_per_cpu(nuvhubs, uv_partition_base_pnode)) {
1681 nobau = 1;
1682 return 0;
1683 }
1650 1684
1651 vector = UV_BAU_MESSAGE; 1685 vector = UV_BAU_MESSAGE;
1652 for_each_possible_blade(uvhub) 1686 for_each_possible_blade(uvhub)
1653 if (uv_blade_nr_possible_cpus(uvhub)) 1687 if (uv_blade_nr_possible_cpus(uvhub))
1654 uv_init_uvhub(uvhub, vector); 1688 uv_init_uvhub(uvhub, vector, uv_partition_base_pnode);
1655 1689
1656 uv_enable_timeouts(); 1690 uv_enable_timeouts();
1657 alloc_intr_gate(vector, uv_bau_message_intr1); 1691 alloc_intr_gate(vector, uv_bau_message_intr1);
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index 9daf5d1af9f1..0eb90184515f 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -40,7 +40,6 @@ static struct clocksource clocksource_uv = {
40 .rating = 400, 40 .rating = 400,
41 .read = uv_read_rtc, 41 .read = uv_read_rtc,
42 .mask = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK, 42 .mask = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK,
43 .shift = 10,
44 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 43 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
45}; 44};
46 45
@@ -372,14 +371,11 @@ static __init int uv_rtc_setup_clock(void)
372 if (!is_uv_system()) 371 if (!is_uv_system())
373 return -ENODEV; 372 return -ENODEV;
374 373
375 clocksource_uv.mult = clocksource_hz2mult(sn_rtc_cycles_per_second,
376 clocksource_uv.shift);
377
378 /* If single blade, prefer tsc */ 374 /* If single blade, prefer tsc */
379 if (uv_num_possible_blades() == 1) 375 if (uv_num_possible_blades() == 1)
380 clocksource_uv.rating = 250; 376 clocksource_uv.rating = 250;
381 377
382 rc = clocksource_register(&clocksource_uv); 378 rc = clocksource_register_hz(&clocksource_uv, sn_rtc_cycles_per_second);
383 if (rc) 379 if (rc)
384 printk(KERN_INFO "UV RTC clocksource failed rc %d\n", rc); 380 printk(KERN_INFO "UV RTC clocksource failed rc %d\n", rc);
385 else 381 else