aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jffs2/os-linux.h
Commit message (Expand)AuthorAge
* convert remaining ->clear_inode() to ->evict_inode()Al Viro2010-08-09
* drop unused dentry argument to ->fsyncChristoph Hellwig2010-05-27
* jffs2: Rename jffs2_erase_pending_trigger() to jffs2_dirty_trigger()Joakim Tjernlund2010-05-19
* switch jffs2 to inode->i_aclAl Viro2009-06-24
* jffs2: move jffs2_write_super to super.cChristoph Hellwig2009-06-11
* [JFFS2] Use .unlocked_ioctlStoyan Gaydarov2008-07-11
* [JFFS2] Invert last argument of jffs2_gc_fetch_inode(), make it boolean.David Woodhouse2008-05-01
* iget: stop JFFS2 from using iget() and read_inode()David Howells2008-02-07
* [JFFS2] Tidy up fix for ACL/permissions problem.KaiGai Kohei2007-10-20
* [JFFS2] Fix ACL vs. mode handling.David Woodhouse2007-08-22
* Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torval...David Woodhouse2007-04-27
|\
| * JFFS2: add UBI supportArtem Bityutskiy2007-04-27
* | [JFFS2] Tidy up licensing/copyright boilerplate.David Woodhouse2007-04-25
|/
* [PATCH] mark struct inode_operations const 2Arjan van de Ven2007-02-12
* [PATCH] mark address_space_operations constChristoph Hellwig2006-06-28
* [PATCH] VFS: Permit filesystem to perform statfs with a known root dentryDavid Howells2006-06-23
* [JFFS2] Switch to using an array of jffs2_raw_node_refs instead of a list.David Woodhouse2006-05-26
* [MTD] Introduce MTD_BIT_WRITEABLEJoern Engel2006-05-22
* [MTD] Merge STMicro NOR_ECC code with Intel Sibley codeJoern Engel2006-05-22
* Merge git://git.infradead.org/jffs2-xattr-2.6David Woodhouse2006-05-20
|\
| * [JFFS2][XATTR] XATTR support on JFFS2 (version. 5)KaiGai Kohei2006-05-13
* | [JFFS2] Support new device nodesDavid Woodhouse2006-05-18
|/
* [PATCH] Make most file operations structs in fs/ constArjan van de Ven2006-03-28
* [JFFS2] Clean up trailing white spacesThomas Gleixner2005-11-07
* [JFFS2] Fix dataflash supportArtem B. Bityutskiy2005-11-06
* [JFFS2] Debug code simplification, update TODOArtem B. Bityutskiy2005-11-06
* [JFFS2] Add erase block summary support (mount time improvement)Ferenc Havasi2005-11-06
* [JFFS2] Teach JFFS2 about Sibley flashNicolas Pitre2005-11-06
* [JFFS2] Use f->target instead of f->dents for symlink targetArtem B. Bityutskiy2005-11-06
* [JFFS2] Avoid compiler warnings when JFFS2_FS_WRITEBUFFER=nTodd Poynor2005-07-12
* [JFFS2] Remove compatibilty cruft for ancient kernelsDavid Woodhouse2005-07-06
* [JFFS2] Fix NOR only compileArtem B. Bityuckiy2005-05-23
* [JFFS2] Use a single config option for write buffer supportAndrew Victor2005-05-23
* [JFFS2] Add support for JFFS2-on-Dataflash devices.Andrew Victor2005-05-23
* [JFFS2] Core changes required to support JFFS2-on-Dataflash devices.Andrew Victor2005-05-23
* Linux-2.6.12-rc2v2.6.12-rc2Linus Torvalds2005-04-16
pc">#define vma_policy(vma) ((vma)->vm_policy) #define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol)) static inline void mpol_get(struct mempolicy *pol) { if (pol) atomic_inc(&pol->refcnt); } extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b); static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) { if (a == b) return true; return __mpol_equal(a, b); } /* * Tree of shared policies for a shared memory region. * Maintain the policies in a pseudo mm that contains vmas. The vmas * carry the policy. As a special twist the pseudo mm is indexed in pages, not * bytes, so that we can work with shared memory segments bigger than * unsigned long. */ struct sp_node { struct rb_node nd; unsigned long start, end; struct mempolicy *policy; }; struct shared_policy { struct rb_root root; spinlock_t lock; }; void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); int mpol_set_shared_policy(struct shared_policy *info, struct vm_area_struct *vma, struct mempolicy *new); void mpol_free_shared_policy(struct shared_policy *p); struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx); struct mempolicy *get_vma_policy(struct task_struct *tsk, struct vm_area_struct *vma, unsigned long addr); extern void numa_default_policy(void); extern void numa_policy_init(void); extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, enum mpol_rebind_step step); extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); extern void mpol_fix_fork_child_flag(struct task_struct *p); extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask); extern bool init_nodemask_of_mempolicy(nodemask_t *mask); extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, const nodemask_t *mask); extern unsigned slab_node(void); extern enum zone_type policy_zone; static inline void check_highest_zone(enum zone_type k) { if (k > policy_zone && k != ZONE_MOVABLE) policy_zone = k; } int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, int flags); #ifdef CONFIG_TMPFS extern int mpol_parse_str(char *str, struct mempolicy **mpol); #endif extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); /* Check if a vma is migratable */ static inline int vma_migratable(struct vm_area_struct *vma) { if (vma->vm_flags & (VM_IO | VM_HUGETLB | VM_PFNMAP)) return 0; /* * Migration allocates pages in the highest zone. If we cannot * do so then migration (at least from node to node) is not * possible. */ if (vma->vm_file && gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) < policy_zone) return 0; return 1; } extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); #else struct mempolicy {}; static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) { return true; } static inline void mpol_put(struct mempolicy *p) { } static inline void mpol_cond_put(struct mempolicy *pol) { } static inline void mpol_get(struct mempolicy *pol) { } static inline struct mempolicy *mpol_dup(struct mempolicy *old) { return NULL; } struct shared_policy {}; static inline int mpol_set_shared_policy(struct shared_policy *info, struct vm_area_struct *vma, struct mempolicy *new) { return -EINVAL; } static inline void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) { } static inline void mpol_free_shared_policy(struct shared_policy *p) { } static inline struct mempolicy * mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) { return NULL; } #define vma_policy(vma) NULL #define vma_set_policy(vma, pol) do {} while(0) static inline void numa_policy_init(void) { } static inline void numa_default_policy(void) { } static inline void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, enum mpol_rebind_step step) { } static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) { } static inline void mpol_fix_fork_child_flag(struct task_struct *p) { } static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask) { *mpol = NULL; *nodemask = NULL; return node_zonelist(0, gfp_flags); } static inline bool init_nodemask_of_mempolicy(nodemask_t *m) { return false; } static inline bool mempolicy_nodemask_intersects(struct task_struct *tsk, const nodemask_t *mask) { return false; } static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, int flags) { return 0; } static inline void check_highest_zone(int k) { } #ifdef CONFIG_TMPFS static inline int mpol_parse_str(char *str, struct mempolicy **mpol) { return 1; /* error */ } #endif static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) { return 0; } static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long address) { return -1; /* no node preference */ } #endif /* CONFIG_NUMA */ #endif