aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-mn10300/pgtable.h
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2008-02-08 07:19:31 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-08 12:22:30 -0500
commitb920de1b77b72ca9432ac3f97edb26541e65e5dd (patch)
tree40fa9be1470e929c47927dea7eddf184c0204229 /include/asm-mn10300/pgtable.h
parentef3d534754f31fed9c3b976fee1ece1b3bc38282 (diff)
mn10300: add the MN10300/AM33 architecture to the kernel
Add architecture support for the MN10300/AM33 CPUs produced by MEI to the kernel. This patch also adds board support for the ASB2303 with the ASB2308 daughter board, and the ASB2305. The only processor supported is the MN103E010, which is an AM33v2 core plus on-chip devices. [akpm@linux-foundation.org: nuke cvs control strings] Signed-off-by: Masakazu Urade <urade.masakazu@jp.panasonic.com> Signed-off-by: Koichi Yasutake <yasutake.koichi@jp.panasonic.com> Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-mn10300/pgtable.h')
-rw-r--r--include/asm-mn10300/pgtable.h489
1 files changed, 489 insertions, 0 deletions
diff --git a/include/asm-mn10300/pgtable.h b/include/asm-mn10300/pgtable.h
new file mode 100644
index 000000000000..375c4941deda
--- /dev/null
+++ b/include/asm-mn10300/pgtable.h
@@ -0,0 +1,489 @@
1/* MN10300 Page table manipulators and constants
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 *
11 *
12 * The Linux memory management assumes a three-level page table setup. On
13 * the i386, we use that, but "fold" the mid level into the top-level page
14 * table, so that we physically have the same two-level page table as the
15 * i386 mmu expects.
16 *
17 * This file contains the functions and defines necessary to modify and use
18 * the i386 page table tree for the purposes of the MN10300 TLB handler
19 * functions.
20 */
21#ifndef _ASM_PGTABLE_H
22#define _ASM_PGTABLE_H
23
24#include <asm/cpu-regs.h>
25
26#ifndef __ASSEMBLY__
27#include <asm/processor.h>
28#include <asm/cache.h>
29#include <linux/threads.h>
30
31#include <asm/bitops.h>
32
33#include <linux/slab.h>
34#include <linux/list.h>
35#include <linux/spinlock.h>
36
37/*
38 * ZERO_PAGE is a global shared page that is always zero: used
39 * for zero-mapped memory areas etc..
40 */
41#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
42extern unsigned long empty_zero_page[1024];
43extern spinlock_t pgd_lock;
44extern struct page *pgd_list;
45
46extern void pmd_ctor(void *, struct kmem_cache *, unsigned long);
47extern void pgtable_cache_init(void);
48extern void paging_init(void);
49
50#endif /* !__ASSEMBLY__ */
51
52/*
53 * The Linux mn10300 paging architecture only implements both the traditional
54 * 2-level page tables
55 */
56#define PGDIR_SHIFT 22
57#define PTRS_PER_PGD 1024
58#define PTRS_PER_PUD 1 /* we don't really have any PUD physically */
59#define PTRS_PER_PMD 1 /* we don't really have any PMD physically */
60#define PTRS_PER_PTE 1024
61
62#define PGD_SIZE PAGE_SIZE
63#define PMD_SIZE (1UL << PMD_SHIFT)
64#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
65#define PGDIR_MASK (~(PGDIR_SIZE - 1))
66
67#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
68#define FIRST_USER_ADDRESS 0
69
70#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
71#define KERNEL_PGD_PTRS (PTRS_PER_PGD - USER_PGD_PTRS)
72
73#define TWOLEVEL_PGDIR_SHIFT 22
74#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
75#define BOOT_KERNEL_PGD_PTRS (1024 - BOOT_USER_PGD_PTRS)
76
77#ifndef __ASSEMBLY__
78extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
79#endif
80
81/*
82 * Unfortunately, due to the way the MMU works on the MN10300, the vmalloc VM
83 * area has to be in the lower half of the virtual address range (the upper
84 * half is not translated through the TLB).
85 *
86 * So in this case, the vmalloc area goes at the bottom of the address map
87 * (leaving a hole at the very bottom to catch addressing errors), and
88 * userspace starts immediately above.
89 *
90 * The vmalloc() routines also leaves a hole of 4kB between each vmalloced
91 * area to catch addressing errors.
92 */
93#define VMALLOC_OFFSET (8 * 1024 * 1024)
94#define VMALLOC_START (0x70000000)
95#define VMALLOC_END (0x7C000000)
96
97#ifndef __ASSEMBLY__
98extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE];
99#endif
100
101/* IPTEL/DPTEL bit assignments */
102#define _PAGE_BIT_VALID xPTEL_V_BIT
103#define _PAGE_BIT_ACCESSED xPTEL_UNUSED1_BIT /* mustn't be loaded into IPTEL/DPTEL */
104#define _PAGE_BIT_NX xPTEL_UNUSED2_BIT /* mustn't be loaded into IPTEL/DPTEL */
105#define _PAGE_BIT_CACHE xPTEL_C_BIT
106#define _PAGE_BIT_PRESENT xPTEL_PV_BIT
107#define _PAGE_BIT_DIRTY xPTEL_D_BIT
108#define _PAGE_BIT_GLOBAL xPTEL_G_BIT
109
110#define _PAGE_VALID xPTEL_V
111#define _PAGE_ACCESSED xPTEL_UNUSED1
112#define _PAGE_NX xPTEL_UNUSED2 /* no-execute bit */
113#define _PAGE_CACHE xPTEL_C
114#define _PAGE_PRESENT xPTEL_PV
115#define _PAGE_DIRTY xPTEL_D
116#define _PAGE_PROT xPTEL_PR
117#define _PAGE_PROT_RKNU xPTEL_PR_ROK
118#define _PAGE_PROT_WKNU xPTEL_PR_RWK
119#define _PAGE_PROT_RKRU xPTEL_PR_ROK_ROU
120#define _PAGE_PROT_WKRU xPTEL_PR_RWK_ROU
121#define _PAGE_PROT_WKWU xPTEL_PR_RWK_RWU
122#define _PAGE_GLOBAL xPTEL_G
123#define _PAGE_PSE xPTEL_PS_4Mb /* 4MB page */
124
125#define _PAGE_FILE xPTEL_UNUSED1_BIT /* set:pagecache unset:swap */
126
127#define __PAGE_PROT_UWAUX 0x040
128#define __PAGE_PROT_USER 0x080
129#define __PAGE_PROT_WRITE 0x100
130
131#define _PAGE_PRESENTV (_PAGE_PRESENT|_PAGE_VALID)
132#define _PAGE_PROTNONE 0x000 /* If not present */
133
134#ifndef __ASSEMBLY__
135
136#define VMALLOC_VMADDR(x) ((unsigned long)(x))
137
138#define _PAGE_TABLE (_PAGE_PRESENTV | _PAGE_PROT_WKNU | _PAGE_ACCESSED | _PAGE_DIRTY)
139#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
140
141#define __PAGE_NONE (_PAGE_PRESENTV | _PAGE_PROT_RKNU | _PAGE_ACCESSED | _PAGE_CACHE)
142#define __PAGE_SHARED (_PAGE_PRESENTV | _PAGE_PROT_WKWU | _PAGE_ACCESSED | _PAGE_CACHE)
143#define __PAGE_COPY (_PAGE_PRESENTV | _PAGE_PROT_RKRU | _PAGE_ACCESSED | _PAGE_CACHE)
144#define __PAGE_READONLY (_PAGE_PRESENTV | _PAGE_PROT_RKRU | _PAGE_ACCESSED | _PAGE_CACHE)
145
146#define PAGE_NONE __pgprot(__PAGE_NONE | _PAGE_NX)
147#define PAGE_SHARED_NOEXEC __pgprot(__PAGE_SHARED | _PAGE_NX)
148#define PAGE_COPY_NOEXEC __pgprot(__PAGE_COPY | _PAGE_NX)
149#define PAGE_READONLY_NOEXEC __pgprot(__PAGE_READONLY | _PAGE_NX)
150#define PAGE_SHARED_EXEC __pgprot(__PAGE_SHARED)
151#define PAGE_COPY_EXEC __pgprot(__PAGE_COPY)
152#define PAGE_READONLY_EXEC __pgprot(__PAGE_READONLY)
153#define PAGE_COPY PAGE_COPY_NOEXEC
154#define PAGE_READONLY PAGE_READONLY_NOEXEC
155#define PAGE_SHARED PAGE_SHARED_EXEC
156
157#define __PAGE_KERNEL_BASE (_PAGE_PRESENTV | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
158
159#define __PAGE_KERNEL (__PAGE_KERNEL_BASE | _PAGE_PROT_WKNU | _PAGE_CACHE | _PAGE_NX)
160#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL_BASE | _PAGE_PROT_WKNU | _PAGE_NX)
161#define __PAGE_KERNEL_EXEC (__PAGE_KERNEL & ~_PAGE_NX)
162#define __PAGE_KERNEL_RO (__PAGE_KERNEL_BASE | _PAGE_PROT_RKNU | _PAGE_CACHE | _PAGE_NX)
163#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
164#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
165
166#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
167#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
168#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
169#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
170#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
171#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
172
173/*
174 * Whilst the MN10300 can do page protection for execute (given separate data
175 * and insn TLBs), we are not supporting it at the moment. Write permission,
176 * however, always implies read permission (but not execute permission).
177 */
178#define __P000 PAGE_NONE
179#define __P001 PAGE_READONLY_NOEXEC
180#define __P010 PAGE_COPY_NOEXEC
181#define __P011 PAGE_COPY_NOEXEC
182#define __P100 PAGE_READONLY_EXEC
183#define __P101 PAGE_READONLY_EXEC
184#define __P110 PAGE_COPY_EXEC
185#define __P111 PAGE_COPY_EXEC
186
187#define __S000 PAGE_NONE
188#define __S001 PAGE_READONLY_NOEXEC
189#define __S010 PAGE_SHARED_NOEXEC
190#define __S011 PAGE_SHARED_NOEXEC
191#define __S100 PAGE_READONLY_EXEC
192#define __S101 PAGE_READONLY_EXEC
193#define __S110 PAGE_SHARED_EXEC
194#define __S111 PAGE_SHARED_EXEC
195
196/*
197 * Define this to warn about kernel memory accesses that are
198 * done without a 'verify_area(VERIFY_WRITE,..)'
199 */
200#undef TEST_VERIFY_AREA
201
202#define pte_present(x) (pte_val(x) & _PAGE_VALID)
203#define pte_clear(mm, addr, xp) \
204do { \
205 set_pte_at((mm), (addr), (xp), __pte(0)); \
206} while (0)
207
208#define pmd_none(x) (!pmd_val(x))
209#define pmd_present(x) (!pmd_none(x))
210#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
211#define pmd_bad(x) 0
212
213
214#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
215
216#ifndef __ASSEMBLY__
217
218/*
219 * The following only work if pte_present() is true.
220 * Undefined behaviour if not..
221 */
222static inline int pte_user(pte_t pte) { return pte_val(pte) & __PAGE_PROT_USER; }
223static inline int pte_read(pte_t pte) { return pte_val(pte) & __PAGE_PROT_USER; }
224static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
225static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
226static inline int pte_write(pte_t pte) { return pte_val(pte) & __PAGE_PROT_WRITE; }
227
228/*
229 * The following only works if pte_present() is not true.
230 */
231static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
232
233static inline pte_t pte_rdprotect(pte_t pte)
234{
235 pte_val(pte) &= ~(__PAGE_PROT_USER|__PAGE_PROT_UWAUX); return pte;
236}
237static inline pte_t pte_exprotect(pte_t pte)
238{
239 pte_val(pte) |= _PAGE_NX; return pte;
240}
241
242static inline pte_t pte_wrprotect(pte_t pte)
243{
244 pte_val(pte) &= ~(__PAGE_PROT_WRITE|__PAGE_PROT_UWAUX); return pte;
245}
246
247static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
248static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
249static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
250static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
251static inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) &= ~_PAGE_NX; return pte; }
252
253static inline pte_t pte_mkread(pte_t pte)
254{
255 pte_val(pte) |= __PAGE_PROT_USER;
256 if (pte_write(pte))
257 pte_val(pte) |= __PAGE_PROT_UWAUX;
258 return pte;
259}
260static inline pte_t pte_mkwrite(pte_t pte)
261{
262 pte_val(pte) |= __PAGE_PROT_WRITE;
263 if (pte_val(pte) & __PAGE_PROT_USER)
264 pte_val(pte) |= __PAGE_PROT_UWAUX;
265 return pte;
266}
267
268#define pte_ERROR(e) \
269 printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \
270 __FILE__, __LINE__, pte_val(e))
271#define pgd_ERROR(e) \
272 printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
273 __FILE__, __LINE__, pgd_val(e))
274
275/*
276 * The "pgd_xxx()" functions here are trivial for a folded two-level
277 * setup: the pgd is never bad, and a pmd always exists (as it's folded
278 * into the pgd entry)
279 */
280#define pgd_clear(xp) do { } while (0)
281
282/*
283 * Certain architectures need to do special things when PTEs
284 * within a page table are directly modified. Thus, the following
285 * hook is made available.
286 */
287#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
288#define set_pte_at(mm, addr, ptep, pteval) set_pte((ptep), (pteval))
289#define set_pte_atomic(pteptr, pteval) set_pte((pteptr), (pteval))
290
291/*
292 * (pmds are folded into pgds so this doesn't get actually called,
293 * but the define is needed for a generic inline function.)
294 */
295#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
296
297#define ptep_get_and_clear(mm, addr, ptep) \
298 __pte(xchg(&(ptep)->pte, 0))
299#define pte_same(a, b) (pte_val(a) == pte_val(b))
300#define pte_page(x) pfn_to_page(pte_pfn(x))
301#define pte_none(x) (!pte_val(x))
302#define pte_pfn(x) ((unsigned long) (pte_val(x) >> PAGE_SHIFT))
303#define __pfn_addr(pfn) ((pfn) << PAGE_SHIFT)
304#define pfn_pte(pfn, prot) __pte(__pfn_addr(pfn) | pgprot_val(prot))
305#define pfn_pmd(pfn, prot) __pmd(__pfn_addr(pfn) | pgprot_val(prot))
306
307/*
308 * All present user pages are user-executable:
309 */
310static inline int pte_exec(pte_t pte)
311{
312 return pte_user(pte);
313}
314
315/*
316 * All present pages are kernel-executable:
317 */
318static inline int pte_exec_kernel(pte_t pte)
319{
320 return 1;
321}
322
323/*
324 * Bits 0 and 1 are taken, split up the 29 bits of offset
325 * into this range:
326 */
327#define PTE_FILE_MAX_BITS 29
328
329#define pte_to_pgoff(pte) (pte_val(pte) >> 2)
330#define pgoff_to_pte(off) __pte((off) << 2 | _PAGE_FILE)
331
332/* Encode and de-code a swap entry */
333#define __swp_type(x) (((x).val >> 2) & 0x3f)
334#define __swp_offset(x) ((x).val >> 8)
335#define __swp_entry(type, offset) \
336 ((swp_entry_t) { ((type) << 2) | ((offset) << 8) })
337#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
338#define __swp_entry_to_pte(x) __pte((x).val)
339
340static inline
341int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr,
342 pte_t *ptep)
343{
344 if (!pte_dirty(*ptep))
345 return 0;
346 return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte);
347}
348
349static inline
350int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
351 pte_t *ptep)
352{
353 if (!pte_young(*ptep))
354 return 0;
355 return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte);
356}
357
358static inline
359void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
360{
361 pte_val(*ptep) &= ~(__PAGE_PROT_WRITE|__PAGE_PROT_UWAUX);
362}
363
364static inline void ptep_mkdirty(pte_t *ptep)
365{
366 set_bit(_PAGE_BIT_DIRTY, &ptep->pte);
367}
368
369/*
370 * Macro to mark a page protection value as "uncacheable". On processors which
371 * do not support it, this is a no-op.
372 */
373#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_CACHE)
374
375
376/*
377 * Conversion functions: convert a page and protection to a page entry,
378 * and a page entry and page directory to the page they refer to.
379 */
380
381#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
382#define mk_pte_huge(entry) \
383 ((entry).pte |= _PAGE_PRESENT | _PAGE_PSE | _PAGE_VALID)
384
385static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
386{
387 pte_val(pte) &= _PAGE_CHG_MASK;
388 pte_val(pte) |= pgprot_val(newprot);
389 return pte;
390}
391
392#define page_pte(page) page_pte_prot((page), __pgprot(0))
393
394#define pmd_page_kernel(pmd) \
395 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
396
397#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
398
399#define pmd_large(pmd) \
400 ((pmd_val(pmd) & (_PAGE_PSE | _PAGE_PRESENT)) == \
401 (_PAGE_PSE | _PAGE_PRESENT))
402
403/*
404 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
405 *
406 * this macro returns the index of the entry in the pgd page which would
407 * control the given virtual address
408 */
409#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
410
411/*
412 * pgd_offset() returns a (pgd_t *)
413 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
414 */
415#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
416
417/*
418 * a shortcut which implies the use of the kernel's pgd, instead
419 * of a process's
420 */
421#define pgd_offset_k(address) pgd_offset(&init_mm, address)
422
423/*
424 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
425 *
426 * this macro returns the index of the entry in the pmd page which would
427 * control the given virtual address
428 */
429#define pmd_index(address) \
430 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
431
432/*
433 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
434 *
435 * this macro returns the index of the entry in the pte page which would
436 * control the given virtual address
437 */
438#define pte_index(address) \
439 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
440
441#define pte_offset_kernel(dir, address) \
442 ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address))
443
444/*
445 * Make a given kernel text page executable/non-executable.
446 * Returns the previous executability setting of that page (which
447 * is used to restore the previous state). Used by the SMP bootup code.
448 * NOTE: this is an __init function for security reasons.
449 */
450static inline int set_kernel_exec(unsigned long vaddr, int enable)
451{
452 return 0;
453}
454
455#define pte_offset_map(dir, address) \
456 ((pte_t *) page_address(pmd_page(*(dir))) + pte_index(address))
457#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
458#define pte_unmap(pte) do {} while (0)
459#define pte_unmap_nested(pte) do {} while (0)
460
461/*
462 * The MN10300 has external MMU info in the form of a TLB: this is adapted from
463 * the kernel page tables containing the necessary information by tlb-mn10300.S
464 */
465extern void update_mmu_cache(struct vm_area_struct *vma,
466 unsigned long address, pte_t pte);
467
468#endif /* !__ASSEMBLY__ */
469
470#define kern_addr_valid(addr) (1)
471
472#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
473 remap_pfn_range((vma), (vaddr), (pfn), (size), (prot))
474
475#define MK_IOSPACE_PFN(space, pfn) (pfn)
476#define GET_IOSPACE(pfn) 0
477#define GET_PFN(pfn) (pfn)
478
479#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
480#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
481#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
482#define __HAVE_ARCH_PTEP_SET_WRPROTECT
483#define __HAVE_ARCH_PTEP_MKDIRTY
484#define __HAVE_ARCH_PTE_SAME
485#include <asm-generic/pgtable.h>
486
487#endif /* !__ASSEMBLY__ */
488
489#endif /* _ASM_PGTABLE_H */