diff options
Diffstat (limited to 'arch/tile/mm/pgtable.c')
-rw-r--r-- | arch/tile/mm/pgtable.c | 46 |
1 files changed, 5 insertions, 41 deletions
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c index 289e729bbd76..28c23140c947 100644 --- a/arch/tile/mm/pgtable.c +++ b/arch/tile/mm/pgtable.c | |||
@@ -46,7 +46,7 @@ void show_mem(void) | |||
46 | { | 46 | { |
47 | struct zone *zone; | 47 | struct zone *zone; |
48 | 48 | ||
49 | printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu" | 49 | pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu" |
50 | " free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu" | 50 | " free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu" |
51 | " pagecache:%lu swap:%lu\n", | 51 | " pagecache:%lu swap:%lu\n", |
52 | (global_page_state(NR_ACTIVE_ANON) + | 52 | (global_page_state(NR_ACTIVE_ANON) + |
@@ -71,7 +71,6 @@ void show_mem(void) | |||
71 | if (!populated_zone(zone)) | 71 | if (!populated_zone(zone)) |
72 | continue; | 72 | continue; |
73 | 73 | ||
74 | printk("Node %d %7s: ", zone_to_nid(zone), zone->name); | ||
75 | spin_lock_irqsave(&zone->lock, flags); | 74 | spin_lock_irqsave(&zone->lock, flags); |
76 | for (order = 0; order < MAX_ORDER; order++) { | 75 | for (order = 0; order < MAX_ORDER; order++) { |
77 | int nr = zone->free_area[order].nr_free; | 76 | int nr = zone->free_area[order].nr_free; |
@@ -80,7 +79,8 @@ void show_mem(void) | |||
80 | largest_order = order; | 79 | largest_order = order; |
81 | } | 80 | } |
82 | spin_unlock_irqrestore(&zone->lock, flags); | 81 | spin_unlock_irqrestore(&zone->lock, flags); |
83 | printk("%lukB (largest %luKb)\n", | 82 | pr_err("Node %d %7s: %lukB (largest %luKb)\n", |
83 | zone_to_nid(zone), zone->name, | ||
84 | K(total), largest_order ? K(1UL) << largest_order : 0); | 84 | K(total), largest_order ? K(1UL) << largest_order : 0); |
85 | } | 85 | } |
86 | } | 86 | } |
@@ -123,42 +123,6 @@ static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | |||
123 | local_flush_tlb_page(NULL, vaddr, PAGE_SIZE); | 123 | local_flush_tlb_page(NULL, vaddr, PAGE_SIZE); |
124 | } | 124 | } |
125 | 125 | ||
126 | /* | ||
127 | * Associate a huge virtual page frame with a given physical page frame | ||
128 | * and protection flags for that frame. pfn is for the base of the page, | ||
129 | * vaddr is what the page gets mapped to - both must be properly aligned. | ||
130 | * The pmd must already be instantiated. | ||
131 | */ | ||
132 | void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | ||
133 | { | ||
134 | pgd_t *pgd; | ||
135 | pud_t *pud; | ||
136 | pmd_t *pmd; | ||
137 | |||
138 | if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */ | ||
139 | printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n"); | ||
140 | return; /* BUG(); */ | ||
141 | } | ||
142 | if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */ | ||
143 | printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n"); | ||
144 | return; /* BUG(); */ | ||
145 | } | ||
146 | pgd = swapper_pg_dir + pgd_index(vaddr); | ||
147 | if (pgd_none(*pgd)) { | ||
148 | printk(KERN_WARNING "set_pmd_pfn: pgd_none\n"); | ||
149 | return; /* BUG(); */ | ||
150 | } | ||
151 | pud = pud_offset(pgd, vaddr); | ||
152 | pmd = pmd_offset(pud, vaddr); | ||
153 | set_pmd(pmd, ptfn_pmd(HV_PFN_TO_PTFN(pfn), flags)); | ||
154 | /* | ||
155 | * It's enough to flush this one mapping. | ||
156 | * We flush both small and huge TSBs to be sure. | ||
157 | */ | ||
158 | local_flush_tlb_page(NULL, vaddr, HPAGE_SIZE); | ||
159 | local_flush_tlb_pages(NULL, vaddr, PAGE_SIZE, HPAGE_SIZE); | ||
160 | } | ||
161 | |||
162 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags) | 126 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags) |
163 | { | 127 | { |
164 | unsigned long address = __fix_to_virt(idx); | 128 | unsigned long address = __fix_to_virt(idx); |
@@ -257,7 +221,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
257 | 221 | ||
258 | struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) | 222 | struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) |
259 | { | 223 | { |
260 | int flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO|__GFP_COMP; | 224 | gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO|__GFP_COMP; |
261 | struct page *p; | 225 | struct page *p; |
262 | 226 | ||
263 | #ifdef CONFIG_HIGHPTE | 227 | #ifdef CONFIG_HIGHPTE |
@@ -550,7 +514,7 @@ void iounmap(volatile void __iomem *addr_in) | |||
550 | read_unlock(&vmlist_lock); | 514 | read_unlock(&vmlist_lock); |
551 | 515 | ||
552 | if (!p) { | 516 | if (!p) { |
553 | printk("iounmap: bad address %p\n", addr); | 517 | pr_err("iounmap: bad address %p\n", addr); |
554 | dump_stack(); | 518 | dump_stack(); |
555 | return; | 519 | return; |
556 | } | 520 | } |