aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/numa.c
diff options
context:
space:
mode:
authorJesse Larrew <jlarrew@linux.vnet.ibm.com>2013-04-24 02:00:35 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-04-26 02:08:23 -0400
commit5d88aa85c00bb4026dd986430dc496effc637d42 (patch)
tree206edc195104eb8258a36b25f5247055211164c2 /arch/powerpc/mm/numa.c
parent8002b0c54b16931ad71771e6c97e46aca1be4456 (diff)
powerpc/pseries: Update CPU maps when device tree is updated
Platform events such as partition migration or the new PRRN firmware feature can cause the NUMA characteristics of a CPU to change, and these changes will be reflected in the device tree nodes for the affected CPUs. This patch registers a handler for Open Firmware device tree updates and reconfigures the CPU and node maps whenever the associativity changes. Currently, this is accomplished by marking the affected CPUs in the cpu_associativity_changes_mask and allowing arch_update_cpu_topology() to retrieve the new associativity information using hcall_vphn(). Protecting the NUMA cpu maps from concurrent access during an update operation will be addressed in a subsequent patch in this series. Signed-off-by: Nathan Fontenot <nfont@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm/numa.c')
-rw-r--r--arch/powerpc/mm/numa.c99
1 files changed, 75 insertions, 24 deletions
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 4cee83592b0c..42f50c352242 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1257,7 +1257,8 @@ u64 memory_hotplug_max(void)
1257static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS]; 1257static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1258static cpumask_t cpu_associativity_changes_mask; 1258static cpumask_t cpu_associativity_changes_mask;
1259static int vphn_enabled; 1259static int vphn_enabled;
1260static void set_topology_timer(void); 1260static int prrn_enabled;
1261static void reset_topology_timer(void);
1261 1262
1262/* 1263/*
1263 * Store the current values of the associativity change counters in the 1264 * Store the current values of the associativity change counters in the
@@ -1293,11 +1294,9 @@ static void setup_cpu_associativity_change_counters(void)
1293 */ 1294 */
1294static int update_cpu_associativity_changes_mask(void) 1295static int update_cpu_associativity_changes_mask(void)
1295{ 1296{
1296 int cpu, nr_cpus = 0; 1297 int cpu;
1297 cpumask_t *changes = &cpu_associativity_changes_mask; 1298 cpumask_t *changes = &cpu_associativity_changes_mask;
1298 1299
1299 cpumask_clear(changes);
1300
1301 for_each_possible_cpu(cpu) { 1300 for_each_possible_cpu(cpu) {
1302 int i, changed = 0; 1301 int i, changed = 0;
1303 u8 *counts = vphn_cpu_change_counts[cpu]; 1302 u8 *counts = vphn_cpu_change_counts[cpu];
@@ -1311,11 +1310,10 @@ static int update_cpu_associativity_changes_mask(void)
1311 } 1310 }
1312 if (changed) { 1311 if (changed) {
1313 cpumask_set_cpu(cpu, changes); 1312 cpumask_set_cpu(cpu, changes);
1314 nr_cpus++;
1315 } 1313 }
1316 } 1314 }
1317 1315
1318 return nr_cpus; 1316 return cpumask_weight(changes);
1319} 1317}
1320 1318
1321/* 1319/*
@@ -1416,7 +1414,7 @@ int arch_update_cpu_topology(void)
1416 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; 1414 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
1417 struct device *dev; 1415 struct device *dev;
1418 1416
1419 for_each_cpu(cpu,&cpu_associativity_changes_mask) { 1417 for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1420 vphn_get_associativity(cpu, associativity); 1418 vphn_get_associativity(cpu, associativity);
1421 nid = associativity_to_nid(associativity); 1419 nid = associativity_to_nid(associativity);
1422 1420
@@ -1438,6 +1436,7 @@ int arch_update_cpu_topology(void)
1438 dev = get_cpu_device(cpu); 1436 dev = get_cpu_device(cpu);
1439 if (dev) 1437 if (dev)
1440 kobject_uevent(&dev->kobj, KOBJ_CHANGE); 1438 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1439 cpumask_clear_cpu(cpu, &cpu_associativity_changes_mask);
1441 changed = 1; 1440 changed = 1;
1442 } 1441 }
1443 1442
@@ -1457,37 +1456,80 @@ void topology_schedule_update(void)
1457 1456
1458static void topology_timer_fn(unsigned long ignored) 1457static void topology_timer_fn(unsigned long ignored)
1459{ 1458{
1460 if (!vphn_enabled) 1459 if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
1461 return;
1462 if (update_cpu_associativity_changes_mask() > 0)
1463 topology_schedule_update(); 1460 topology_schedule_update();
1464 set_topology_timer(); 1461 else if (vphn_enabled) {
1462 if (update_cpu_associativity_changes_mask() > 0)
1463 topology_schedule_update();
1464 reset_topology_timer();
1465 }
1465} 1466}
1466static struct timer_list topology_timer = 1467static struct timer_list topology_timer =
1467 TIMER_INITIALIZER(topology_timer_fn, 0, 0); 1468 TIMER_INITIALIZER(topology_timer_fn, 0, 0);
1468 1469
1469static void set_topology_timer(void) 1470static void reset_topology_timer(void)
1470{ 1471{
1471 topology_timer.data = 0; 1472 topology_timer.data = 0;
1472 topology_timer.expires = jiffies + 60 * HZ; 1473 topology_timer.expires = jiffies + 60 * HZ;
1473 add_timer(&topology_timer); 1474 mod_timer(&topology_timer, topology_timer.expires);
1475}
1476
1477static void stage_topology_update(int core_id)
1478{
1479 cpumask_or(&cpu_associativity_changes_mask,
1480 &cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
1481 reset_topology_timer();
1482}
1483
1484static int dt_update_callback(struct notifier_block *nb,
1485 unsigned long action, void *data)
1486{
1487 struct of_prop_reconfig *update;
1488 int rc = NOTIFY_DONE;
1489
1490 switch (action) {
1491 case OF_RECONFIG_ADD_PROPERTY:
1492 case OF_RECONFIG_UPDATE_PROPERTY:
1493 update = (struct of_prop_reconfig *)data;
1494 if (!of_prop_cmp(update->dn->type, "cpu")) {
1495 u32 core_id;
1496 of_property_read_u32(update->dn, "reg", &core_id);
1497 stage_topology_update(core_id);
1498 rc = NOTIFY_OK;
1499 }
1500 break;
1501 }
1502
1503 return rc;
1474} 1504}
1475 1505
1506static struct notifier_block dt_update_nb = {
1507 .notifier_call = dt_update_callback,
1508};
1509
1476/* 1510/*
1477 * Start polling for VPHN associativity changes. 1511 * Start polling for associativity changes.
1478 */ 1512 */
1479int start_topology_update(void) 1513int start_topology_update(void)
1480{ 1514{
1481 int rc = 0; 1515 int rc = 0;
1482 1516
1483 /* Disabled until races with load balancing are fixed */ 1517 if (firmware_has_feature(FW_FEATURE_PRRN)) {
1484 if (0 && firmware_has_feature(FW_FEATURE_VPHN) && 1518 if (!prrn_enabled) {
1485 get_lppaca()->shared_proc) { 1519 prrn_enabled = 1;
1486 vphn_enabled = 1; 1520 vphn_enabled = 0;
1487 setup_cpu_associativity_change_counters(); 1521 rc = of_reconfig_notifier_register(&dt_update_nb);
1488 init_timer_deferrable(&topology_timer); 1522 }
1489 set_topology_timer(); 1523 } else if (0 && firmware_has_feature(FW_FEATURE_VPHN) &&
1490 rc = 1; 1524 get_lppaca()->shared_proc) {
1525 /* Disabled until races with load balancing are fixed */
1526 if (!vphn_enabled) {
1527 prrn_enabled = 0;
1528 vphn_enabled = 1;
1529 setup_cpu_associativity_change_counters();
1530 init_timer_deferrable(&topology_timer);
1531 reset_topology_timer();
1532 }
1491 } 1533 }
1492 1534
1493 return rc; 1535 return rc;
@@ -1499,7 +1541,16 @@ __initcall(start_topology_update);
1499 */ 1541 */
1500int stop_topology_update(void) 1542int stop_topology_update(void)
1501{ 1543{
1502 vphn_enabled = 0; 1544 int rc = 0;
1503 return del_timer_sync(&topology_timer); 1545
1546 if (prrn_enabled) {
1547 prrn_enabled = 0;
1548 rc = of_reconfig_notifier_unregister(&dt_update_nb);
1549 } else if (vphn_enabled) {
1550 vphn_enabled = 0;
1551 rc = del_timer_sync(&topology_timer);
1552 }
1553
1554 return rc;
1504} 1555}
1505#endif /* CONFIG_PPC_SPLPAR */ 1556#endif /* CONFIG_PPC_SPLPAR */