diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-01-08 04:01:02 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-08 23:12:44 -0500 |
commit | 1a75a6c825c17249ca49f050a872a04ce0997ce3 (patch) | |
tree | 2ca8fc6513a20e5b4bec67686323ce1f5c8e237c /mm/mempolicy.c | |
parent | 38e35860dbe6197a4b42eb6e8b47da940b7695dd (diff) |
[PATCH] Fold numa_maps into mempolicies.c
First discussed at http://marc.theaimsgroup.com/?t=113149255100001&r=1&w=2
- Use the check_range() in mempolicy.c to gather statistics.
- Improve the numa_maps code in general and fix some comments.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r-- | mm/mempolicy.c | 138 |
1 files changed, 137 insertions, 1 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 270e9a39ec15..44b9d69900bc 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -84,6 +84,8 @@ | |||
84 | #include <linux/compat.h> | 84 | #include <linux/compat.h> |
85 | #include <linux/mempolicy.h> | 85 | #include <linux/mempolicy.h> |
86 | #include <linux/swap.h> | 86 | #include <linux/swap.h> |
87 | #include <linux/seq_file.h> | ||
88 | #include <linux/proc_fs.h> | ||
87 | 89 | ||
88 | #include <asm/tlbflush.h> | 90 | #include <asm/tlbflush.h> |
89 | #include <asm/uaccess.h> | 91 | #include <asm/uaccess.h> |
@@ -91,6 +93,7 @@ | |||
91 | /* Internal flags */ | 93 | /* Internal flags */ |
92 | #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ | 94 | #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ |
93 | #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ | 95 | #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ |
96 | #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */ | ||
94 | 97 | ||
95 | static kmem_cache_t *policy_cache; | 98 | static kmem_cache_t *policy_cache; |
96 | static kmem_cache_t *sn_cache; | 99 | static kmem_cache_t *sn_cache; |
@@ -228,6 +231,8 @@ static void migrate_page_add(struct vm_area_struct *vma, | |||
228 | } | 231 | } |
229 | } | 232 | } |
230 | 233 | ||
234 | static void gather_stats(struct page *, void *); | ||
235 | |||
231 | /* Scan through pages checking if pages follow certain conditions. */ | 236 | /* Scan through pages checking if pages follow certain conditions. */ |
232 | static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | 237 | static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, |
233 | unsigned long addr, unsigned long end, | 238 | unsigned long addr, unsigned long end, |
@@ -252,7 +257,9 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
252 | if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) | 257 | if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) |
253 | continue; | 258 | continue; |
254 | 259 | ||
255 | if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) | 260 | if (flags & MPOL_MF_STATS) |
261 | gather_stats(page, private); | ||
262 | else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) | ||
256 | migrate_page_add(vma, page, private, flags); | 263 | migrate_page_add(vma, page, private, flags); |
257 | else | 264 | else |
258 | break; | 265 | break; |
@@ -1460,3 +1467,132 @@ void numa_policy_rebind(const nodemask_t *old, const nodemask_t *new) | |||
1460 | { | 1467 | { |
1461 | rebind_policy(current->mempolicy, old, new); | 1468 | rebind_policy(current->mempolicy, old, new); |
1462 | } | 1469 | } |
1470 | |||
1471 | /* | ||
1472 | * Display pages allocated per node and memory policy via /proc. | ||
1473 | */ | ||
1474 | |||
1475 | static const char *policy_types[] = { "default", "prefer", "bind", | ||
1476 | "interleave" }; | ||
1477 | |||
1478 | /* | ||
1479 | * Convert a mempolicy into a string. | ||
1480 | * Returns the number of characters in buffer (if positive) | ||
1481 | * or an error (negative) | ||
1482 | */ | ||
1483 | static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) | ||
1484 | { | ||
1485 | char *p = buffer; | ||
1486 | int l; | ||
1487 | nodemask_t nodes; | ||
1488 | int mode = pol ? pol->policy : MPOL_DEFAULT; | ||
1489 | |||
1490 | switch (mode) { | ||
1491 | case MPOL_DEFAULT: | ||
1492 | nodes_clear(nodes); | ||
1493 | break; | ||
1494 | |||
1495 | case MPOL_PREFERRED: | ||
1496 | nodes_clear(nodes); | ||
1497 | node_set(pol->v.preferred_node, nodes); | ||
1498 | break; | ||
1499 | |||
1500 | case MPOL_BIND: | ||
1501 | get_zonemask(pol, &nodes); | ||
1502 | break; | ||
1503 | |||
1504 | case MPOL_INTERLEAVE: | ||
1505 | nodes = pol->v.nodes; | ||
1506 | break; | ||
1507 | |||
1508 | default: | ||
1509 | BUG(); | ||
1510 | return -EFAULT; | ||
1511 | } | ||
1512 | |||
1513 | l = strlen(policy_types[mode]); | ||
1514 | if (buffer + maxlen < p + l + 1) | ||
1515 | return -ENOSPC; | ||
1516 | |||
1517 | strcpy(p, policy_types[mode]); | ||
1518 | p += l; | ||
1519 | |||
1520 | if (!nodes_empty(nodes)) { | ||
1521 | if (buffer + maxlen < p + 2) | ||
1522 | return -ENOSPC; | ||
1523 | *p++ = '='; | ||
1524 | p += nodelist_scnprintf(p, buffer + maxlen - p, nodes); | ||
1525 | } | ||
1526 | return p - buffer; | ||
1527 | } | ||
1528 | |||
1529 | struct numa_maps { | ||
1530 | unsigned long pages; | ||
1531 | unsigned long anon; | ||
1532 | unsigned long mapped; | ||
1533 | unsigned long mapcount_max; | ||
1534 | unsigned long node[MAX_NUMNODES]; | ||
1535 | }; | ||
1536 | |||
1537 | static void gather_stats(struct page *page, void *private) | ||
1538 | { | ||
1539 | struct numa_maps *md = private; | ||
1540 | int count = page_mapcount(page); | ||
1541 | |||
1542 | if (count) | ||
1543 | md->mapped++; | ||
1544 | |||
1545 | if (count > md->mapcount_max) | ||
1546 | md->mapcount_max = count; | ||
1547 | |||
1548 | md->pages++; | ||
1549 | |||
1550 | if (PageAnon(page)) | ||
1551 | md->anon++; | ||
1552 | |||
1553 | md->node[page_to_nid(page)]++; | ||
1554 | cond_resched(); | ||
1555 | } | ||
1556 | |||
1557 | int show_numa_map(struct seq_file *m, void *v) | ||
1558 | { | ||
1559 | struct task_struct *task = m->private; | ||
1560 | struct vm_area_struct *vma = v; | ||
1561 | struct numa_maps *md; | ||
1562 | int n; | ||
1563 | char buffer[50]; | ||
1564 | |||
1565 | if (!vma->vm_mm) | ||
1566 | return 0; | ||
1567 | |||
1568 | md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL); | ||
1569 | if (!md) | ||
1570 | return 0; | ||
1571 | |||
1572 | check_pgd_range(vma, vma->vm_start, vma->vm_end, | ||
1573 | &node_online_map, MPOL_MF_STATS, md); | ||
1574 | |||
1575 | if (md->pages) { | ||
1576 | mpol_to_str(buffer, sizeof(buffer), | ||
1577 | get_vma_policy(task, vma, vma->vm_start)); | ||
1578 | |||
1579 | seq_printf(m, "%08lx %s pages=%lu mapped=%lu maxref=%lu", | ||
1580 | vma->vm_start, buffer, md->pages, | ||
1581 | md->mapped, md->mapcount_max); | ||
1582 | |||
1583 | if (md->anon) | ||
1584 | seq_printf(m," anon=%lu",md->anon); | ||
1585 | |||
1586 | for_each_online_node(n) | ||
1587 | if (md->node[n]) | ||
1588 | seq_printf(m, " N%d=%lu", n, md->node[n]); | ||
1589 | |||
1590 | seq_putc(m, '\n'); | ||
1591 | } | ||
1592 | kfree(md); | ||
1593 | |||
1594 | if (m->count < m->size) | ||
1595 | m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0; | ||
1596 | return 0; | ||
1597 | } | ||
1598 | |||