aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-01-08 04:01:02 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-08 23:12:44 -0500
commit1a75a6c825c17249ca49f050a872a04ce0997ce3 (patch)
tree2ca8fc6513a20e5b4bec67686323ce1f5c8e237c
parent38e35860dbe6197a4b42eb6e8b47da940b7695dd (diff)
[PATCH] Fold numa_maps into mempolicies.c
First discussed at http://marc.theaimsgroup.com/?t=113149255100001&r=1&w=2 - Use the check_range() in mempolicy.c to gather statistics. - Improve the numa_maps code in general and fix some comments. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Andi Kleen <ak@muc.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--fs/proc/task_mmu.c127
-rw-r--r--mm/mempolicy.c138
2 files changed, 142 insertions, 123 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 50bd5a8f0446..0eaad41f4658 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -390,129 +390,12 @@ struct seq_operations proc_pid_smaps_op = {
390}; 390};
391 391
392#ifdef CONFIG_NUMA 392#ifdef CONFIG_NUMA
393 393extern int show_numa_map(struct seq_file *m, void *v);
394struct numa_maps {
395 unsigned long pages;
396 unsigned long anon;
397 unsigned long mapped;
398 unsigned long mapcount_max;
399 unsigned long node[MAX_NUMNODES];
400};
401
402/*
403 * Calculate numa node maps for a vma
404 */
405static struct numa_maps *get_numa_maps(struct vm_area_struct *vma)
406{
407 int i;
408 struct page *page;
409 unsigned long vaddr;
410 struct numa_maps *md = kmalloc(sizeof(struct numa_maps), GFP_KERNEL);
411
412 if (!md)
413 return NULL;
414 md->pages = 0;
415 md->anon = 0;
416 md->mapped = 0;
417 md->mapcount_max = 0;
418 for_each_node(i)
419 md->node[i] =0;
420
421 for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) {
422 page = follow_page(vma, vaddr, 0);
423 if (page) {
424 int count = page_mapcount(page);
425
426 if (count)
427 md->mapped++;
428 if (count > md->mapcount_max)
429 md->mapcount_max = count;
430 md->pages++;
431 if (PageAnon(page))
432 md->anon++;
433 md->node[page_to_nid(page)]++;
434 }
435 cond_resched();
436 }
437 return md;
438}
439
440static int show_numa_map(struct seq_file *m, void *v)
441{
442 struct task_struct *task = m->private;
443 struct vm_area_struct *vma = v;
444 struct mempolicy *pol;
445 struct numa_maps *md;
446 struct zone **z;
447 int n;
448 int first;
449
450 if (!vma->vm_mm)
451 return 0;
452
453 md = get_numa_maps(vma);
454 if (!md)
455 return 0;
456
457 seq_printf(m, "%08lx", vma->vm_start);
458 pol = get_vma_policy(task, vma, vma->vm_start);
459 /* Print policy */
460 switch (pol->policy) {
461 case MPOL_PREFERRED:
462 seq_printf(m, " prefer=%d", pol->v.preferred_node);
463 break;
464 case MPOL_BIND:
465 seq_printf(m, " bind={");
466 first = 1;
467 for (z = pol->v.zonelist->zones; *z; z++) {
468
469 if (!first)
470 seq_putc(m, ',');
471 else
472 first = 0;
473 seq_printf(m, "%d/%s", (*z)->zone_pgdat->node_id,
474 (*z)->name);
475 }
476 seq_putc(m, '}');
477 break;
478 case MPOL_INTERLEAVE:
479 seq_printf(m, " interleave={");
480 first = 1;
481 for_each_node(n) {
482 if (node_isset(n, pol->v.nodes)) {
483 if (!first)
484 seq_putc(m,',');
485 else
486 first = 0;
487 seq_printf(m, "%d",n);
488 }
489 }
490 seq_putc(m, '}');
491 break;
492 default:
493 seq_printf(m," default");
494 break;
495 }
496 seq_printf(m, " MaxRef=%lu Pages=%lu Mapped=%lu",
497 md->mapcount_max, md->pages, md->mapped);
498 if (md->anon)
499 seq_printf(m," Anon=%lu",md->anon);
500
501 for_each_online_node(n) {
502 if (md->node[n])
503 seq_printf(m, " N%d=%lu", n, md->node[n]);
504 }
505 seq_putc(m, '\n');
506 kfree(md);
507 if (m->count < m->size) /* vma is copied successfully */
508 m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;
509 return 0;
510}
511 394
512struct seq_operations proc_pid_numa_maps_op = { 395struct seq_operations proc_pid_numa_maps_op = {
513 .start = m_start, 396 .start = m_start,
514 .next = m_next, 397 .next = m_next,
515 .stop = m_stop, 398 .stop = m_stop,
516 .show = show_numa_map 399 .show = show_numa_map
517}; 400};
518#endif 401#endif
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 270e9a39ec15..44b9d69900bc 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -84,6 +84,8 @@
84#include <linux/compat.h> 84#include <linux/compat.h>
85#include <linux/mempolicy.h> 85#include <linux/mempolicy.h>
86#include <linux/swap.h> 86#include <linux/swap.h>
87#include <linux/seq_file.h>
88#include <linux/proc_fs.h>
87 89
88#include <asm/tlbflush.h> 90#include <asm/tlbflush.h>
89#include <asm/uaccess.h> 91#include <asm/uaccess.h>
@@ -91,6 +93,7 @@
91/* Internal flags */ 93/* Internal flags */
92#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 94#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
93#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 95#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
96#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
94 97
95static kmem_cache_t *policy_cache; 98static kmem_cache_t *policy_cache;
96static kmem_cache_t *sn_cache; 99static kmem_cache_t *sn_cache;
@@ -228,6 +231,8 @@ static void migrate_page_add(struct vm_area_struct *vma,
228 } 231 }
229} 232}
230 233
234static void gather_stats(struct page *, void *);
235
231/* Scan through pages checking if pages follow certain conditions. */ 236/* Scan through pages checking if pages follow certain conditions. */
232static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 237static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
233 unsigned long addr, unsigned long end, 238 unsigned long addr, unsigned long end,
@@ -252,7 +257,9 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
252 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) 257 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
253 continue; 258 continue;
254 259
255 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 260 if (flags & MPOL_MF_STATS)
261 gather_stats(page, private);
262 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
256 migrate_page_add(vma, page, private, flags); 263 migrate_page_add(vma, page, private, flags);
257 else 264 else
258 break; 265 break;
@@ -1460,3 +1467,132 @@ void numa_policy_rebind(const nodemask_t *old, const nodemask_t *new)
1460{ 1467{
1461 rebind_policy(current->mempolicy, old, new); 1468 rebind_policy(current->mempolicy, old, new);
1462} 1469}
1470
1471/*
1472 * Display pages allocated per node and memory policy via /proc.
1473 */
1474
1475static const char *policy_types[] = { "default", "prefer", "bind",
1476 "interleave" };
1477
1478/*
1479 * Convert a mempolicy into a string.
1480 * Returns the number of characters in buffer (if positive)
1481 * or an error (negative)
1482 */
1483static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1484{
1485 char *p = buffer;
1486 int l;
1487 nodemask_t nodes;
1488 int mode = pol ? pol->policy : MPOL_DEFAULT;
1489
1490 switch (mode) {
1491 case MPOL_DEFAULT:
1492 nodes_clear(nodes);
1493 break;
1494
1495 case MPOL_PREFERRED:
1496 nodes_clear(nodes);
1497 node_set(pol->v.preferred_node, nodes);
1498 break;
1499
1500 case MPOL_BIND:
1501 get_zonemask(pol, &nodes);
1502 break;
1503
1504 case MPOL_INTERLEAVE:
1505 nodes = pol->v.nodes;
1506 break;
1507
1508 default:
1509 BUG();
1510 return -EFAULT;
1511 }
1512
1513 l = strlen(policy_types[mode]);
1514 if (buffer + maxlen < p + l + 1)
1515 return -ENOSPC;
1516
1517 strcpy(p, policy_types[mode]);
1518 p += l;
1519
1520 if (!nodes_empty(nodes)) {
1521 if (buffer + maxlen < p + 2)
1522 return -ENOSPC;
1523 *p++ = '=';
1524 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
1525 }
1526 return p - buffer;
1527}
1528
1529struct numa_maps {
1530 unsigned long pages;
1531 unsigned long anon;
1532 unsigned long mapped;
1533 unsigned long mapcount_max;
1534 unsigned long node[MAX_NUMNODES];
1535};
1536
1537static void gather_stats(struct page *page, void *private)
1538{
1539 struct numa_maps *md = private;
1540 int count = page_mapcount(page);
1541
1542 if (count)
1543 md->mapped++;
1544
1545 if (count > md->mapcount_max)
1546 md->mapcount_max = count;
1547
1548 md->pages++;
1549
1550 if (PageAnon(page))
1551 md->anon++;
1552
1553 md->node[page_to_nid(page)]++;
1554 cond_resched();
1555}
1556
1557int show_numa_map(struct seq_file *m, void *v)
1558{
1559 struct task_struct *task = m->private;
1560 struct vm_area_struct *vma = v;
1561 struct numa_maps *md;
1562 int n;
1563 char buffer[50];
1564
1565 if (!vma->vm_mm)
1566 return 0;
1567
1568 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
1569 if (!md)
1570 return 0;
1571
1572 check_pgd_range(vma, vma->vm_start, vma->vm_end,
1573 &node_online_map, MPOL_MF_STATS, md);
1574
1575 if (md->pages) {
1576 mpol_to_str(buffer, sizeof(buffer),
1577 get_vma_policy(task, vma, vma->vm_start));
1578
1579 seq_printf(m, "%08lx %s pages=%lu mapped=%lu maxref=%lu",
1580 vma->vm_start, buffer, md->pages,
1581 md->mapped, md->mapcount_max);
1582
1583 if (md->anon)
1584 seq_printf(m," anon=%lu",md->anon);
1585
1586 for_each_online_node(n)
1587 if (md->node[n])
1588 seq_printf(m, " N%d=%lu", n, md->node[n]);
1589
1590 seq_putc(m, '\n');
1591 }
1592 kfree(md);
1593
1594 if (m->count < m->size)
1595 m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;
1596 return 0;
1597}
1598