diff options
| author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-06-20 20:46:21 -0400 |
|---|---|---|
| committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-06-20 20:46:21 -0400 |
| commit | 70ac4385a13f78bc478f26d317511893741b05bd (patch) | |
| tree | dafc7f3018295fc4ee00339889e4f35d5b9d7743 /fs | |
| parent | d59bf96cdde5b874a57bfd1425faa45da915d0b7 (diff) | |
| parent | 077e98945db7e54a9865b5f29a1f02f531eca414 (diff) | |
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/
Conflicts:
include/linux/nfs_fs.h
Fixed up conflict with kernel header updates.
Diffstat (limited to 'fs')
47 files changed, 5422 insertions, 2102 deletions
diff --git a/fs/Kconfig b/fs/Kconfig index f9b5842c8d2d..20f9b557732d 100644 --- a/fs/Kconfig +++ b/fs/Kconfig | |||
| @@ -393,18 +393,30 @@ config INOTIFY | |||
| 393 | bool "Inotify file change notification support" | 393 | bool "Inotify file change notification support" |
| 394 | default y | 394 | default y |
| 395 | ---help--- | 395 | ---help--- |
| 396 | Say Y here to enable inotify support and the associated system | 396 | Say Y here to enable inotify support. Inotify is a file change |
| 397 | calls. Inotify is a file change notification system and a | 397 | notification system and a replacement for dnotify. Inotify fixes |
| 398 | replacement for dnotify. Inotify fixes numerous shortcomings in | 398 | numerous shortcomings in dnotify and introduces several new features |
| 399 | dnotify and introduces several new features. It allows monitoring | 399 | including multiple file events, one-shot support, and unmount |
| 400 | of both files and directories via a single open fd. Other features | ||
| 401 | include multiple file events, one-shot support, and unmount | ||
| 402 | notification. | 400 | notification. |
| 403 | 401 | ||
| 404 | For more information, see Documentation/filesystems/inotify.txt | 402 | For more information, see Documentation/filesystems/inotify.txt |
| 405 | 403 | ||
| 406 | If unsure, say Y. | 404 | If unsure, say Y. |
| 407 | 405 | ||
| 406 | config INOTIFY_USER | ||
| 407 | bool "Inotify support for userspace" | ||
| 408 | depends on INOTIFY | ||
| 409 | default y | ||
| 410 | ---help--- | ||
| 411 | Say Y here to enable inotify support for userspace, including the | ||
| 412 | associated system calls. Inotify allows monitoring of both files and | ||
| 413 | directories via a single open fd. Events are read from the file | ||
| 414 | descriptor, which is also select()- and poll()-able. | ||
| 415 | |||
| 416 | For more information, see Documentation/filesystems/inotify.txt | ||
| 417 | |||
| 418 | If unsure, say Y. | ||
| 419 | |||
| 408 | config QUOTA | 420 | config QUOTA |
| 409 | bool "Quota support" | 421 | bool "Quota support" |
| 410 | help | 422 | help |
| @@ -1101,6 +1113,44 @@ config JFFS2_SUMMARY | |||
| 1101 | 1113 | ||
| 1102 | If unsure, say 'N'. | 1114 | If unsure, say 'N'. |
| 1103 | 1115 | ||
| 1116 | config JFFS2_FS_XATTR | ||
| 1117 | bool "JFFS2 XATTR support (EXPERIMENTAL)" | ||
| 1118 | depends on JFFS2_FS && EXPERIMENTAL && !JFFS2_FS_WRITEBUFFER | ||
| 1119 | default n | ||
| 1120 | help | ||
| 1121 | Extended attributes are name:value pairs associated with inodes by | ||
| 1122 | the kernel or by users (see the attr(5) manual page, or visit | ||
| 1123 | <http://acl.bestbits.at/> for details). | ||
| 1124 | |||
| 1125 | If unsure, say N. | ||
| 1126 | |||
| 1127 | config JFFS2_FS_POSIX_ACL | ||
| 1128 | bool "JFFS2 POSIX Access Control Lists" | ||
| 1129 | depends on JFFS2_FS_XATTR | ||
| 1130 | default y | ||
| 1131 | select FS_POSIX_ACL | ||
| 1132 | help | ||
| 1133 | Posix Access Control Lists (ACLs) support permissions for users and | ||
| 1134 | groups beyond the owner/group/world scheme. | ||
| 1135 | |||
| 1136 | To learn more about Access Control Lists, visit the Posix ACLs for | ||
| 1137 | Linux website <http://acl.bestbits.at/>. | ||
| 1138 | |||
| 1139 | If you don't know what Access Control Lists are, say N | ||
| 1140 | |||
| 1141 | config JFFS2_FS_SECURITY | ||
| 1142 | bool "JFFS2 Security Labels" | ||
| 1143 | depends on JFFS2_FS_XATTR | ||
| 1144 | default y | ||
| 1145 | help | ||
| 1146 | Security labels support alternative access control models | ||
| 1147 | implemented by security modules like SELinux. This option | ||
| 1148 | enables an extended attribute handler for file security | ||
| 1149 | labels in the jffs2 filesystem. | ||
| 1150 | |||
| 1151 | If you are not using a security module that requires using | ||
| 1152 | extended attributes for file security labels, say N. | ||
| 1153 | |||
| 1104 | config JFFS2_COMPRESSION_OPTIONS | 1154 | config JFFS2_COMPRESSION_OPTIONS |
| 1105 | bool "Advanced compression options for JFFS2" | 1155 | bool "Advanced compression options for JFFS2" |
| 1106 | depends on JFFS2_FS | 1156 | depends on JFFS2_FS |
diff --git a/fs/Makefile b/fs/Makefile index 078d3d1191a5..d0ea6bfccf29 100644 --- a/fs/Makefile +++ b/fs/Makefile | |||
| @@ -13,6 +13,7 @@ obj-y := open.o read_write.o file_table.o buffer.o bio.o super.o \ | |||
| 13 | ioprio.o pnode.o drop_caches.o splice.o sync.o | 13 | ioprio.o pnode.o drop_caches.o splice.o sync.o |
| 14 | 14 | ||
| 15 | obj-$(CONFIG_INOTIFY) += inotify.o | 15 | obj-$(CONFIG_INOTIFY) += inotify.o |
| 16 | obj-$(CONFIG_INOTIFY_USER) += inotify_user.o | ||
| 16 | obj-$(CONFIG_EPOLL) += eventpoll.o | 17 | obj-$(CONFIG_EPOLL) += eventpoll.o |
| 17 | obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o | 18 | obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o |
| 18 | 19 | ||
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 1b4491cdd115..2695337d4d64 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
| @@ -337,20 +337,20 @@ static inline int ep_cmp_ffd(struct epoll_filefd *p1, | |||
| 337 | /* Special initialization for the rb-tree node to detect linkage */ | 337 | /* Special initialization for the rb-tree node to detect linkage */ |
| 338 | static inline void ep_rb_initnode(struct rb_node *n) | 338 | static inline void ep_rb_initnode(struct rb_node *n) |
| 339 | { | 339 | { |
| 340 | n->rb_parent = n; | 340 | rb_set_parent(n, n); |
| 341 | } | 341 | } |
| 342 | 342 | ||
| 343 | /* Removes a node from the rb-tree and marks it for a fast is-linked check */ | 343 | /* Removes a node from the rb-tree and marks it for a fast is-linked check */ |
| 344 | static inline void ep_rb_erase(struct rb_node *n, struct rb_root *r) | 344 | static inline void ep_rb_erase(struct rb_node *n, struct rb_root *r) |
| 345 | { | 345 | { |
| 346 | rb_erase(n, r); | 346 | rb_erase(n, r); |
| 347 | n->rb_parent = n; | 347 | rb_set_parent(n, n); |
| 348 | } | 348 | } |
| 349 | 349 | ||
| 350 | /* Fast check to verify that the item is linked to the main rb-tree */ | 350 | /* Fast check to verify that the item is linked to the main rb-tree */ |
| 351 | static inline int ep_rb_linked(struct rb_node *n) | 351 | static inline int ep_rb_linked(struct rb_node *n) |
| 352 | { | 352 | { |
| 353 | return n->rb_parent != n; | 353 | return rb_parent(n) != n; |
| 354 | } | 354 | } |
| 355 | 355 | ||
| 356 | /* | 356 | /* |
| @@ -49,6 +49,7 @@ | |||
| 49 | #include <linux/rmap.h> | 49 | #include <linux/rmap.h> |
| 50 | #include <linux/acct.h> | 50 | #include <linux/acct.h> |
| 51 | #include <linux/cn_proc.h> | 51 | #include <linux/cn_proc.h> |
| 52 | #include <linux/audit.h> | ||
| 52 | 53 | ||
| 53 | #include <asm/uaccess.h> | 54 | #include <asm/uaccess.h> |
| 54 | #include <asm/mmu_context.h> | 55 | #include <asm/mmu_context.h> |
| @@ -1085,6 +1086,11 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) | |||
| 1085 | /* kernel module loader fixup */ | 1086 | /* kernel module loader fixup */ |
| 1086 | /* so we don't try to load run modprobe in kernel space. */ | 1087 | /* so we don't try to load run modprobe in kernel space. */ |
| 1087 | set_fs(USER_DS); | 1088 | set_fs(USER_DS); |
| 1089 | |||
| 1090 | retval = audit_bprm(bprm); | ||
| 1091 | if (retval) | ||
| 1092 | return retval; | ||
| 1093 | |||
| 1088 | retval = -ENOENT; | 1094 | retval = -ENOENT; |
| 1089 | for (try=0; try<2; try++) { | 1095 | for (try=0; try<2; try++) { |
| 1090 | read_lock(&binfmt_lock); | 1096 | read_lock(&binfmt_lock); |
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c index f37528ed222e..fbb0d4ed07d4 100644 --- a/fs/ext3/dir.c +++ b/fs/ext3/dir.c | |||
| @@ -284,7 +284,7 @@ static void free_rb_tree_fname(struct rb_root *root) | |||
| 284 | * beginning of the loop and try to free the parent | 284 | * beginning of the loop and try to free the parent |
| 285 | * node. | 285 | * node. |
| 286 | */ | 286 | */ |
| 287 | parent = n->rb_parent; | 287 | parent = rb_parent(n); |
| 288 | fname = rb_entry(n, struct fname, rb_hash); | 288 | fname = rb_entry(n, struct fname, rb_hash); |
| 289 | while (fname) { | 289 | while (fname) { |
| 290 | struct fname * old = fname; | 290 | struct fname * old = fname; |
diff --git a/fs/inotify.c b/fs/inotify.c index 732ec4bd5774..723836a1f718 100644 --- a/fs/inotify.c +++ b/fs/inotify.c | |||
| @@ -5,7 +5,10 @@ | |||
| 5 | * John McCutchan <ttb@tentacle.dhs.org> | 5 | * John McCutchan <ttb@tentacle.dhs.org> |
| 6 | * Robert Love <rml@novell.com> | 6 | * Robert Love <rml@novell.com> |
| 7 | * | 7 | * |
| 8 | * Kernel API added by: Amy Griffis <amy.griffis@hp.com> | ||
| 9 | * | ||
| 8 | * Copyright (C) 2005 John McCutchan | 10 | * Copyright (C) 2005 John McCutchan |
| 11 | * Copyright 2006 Hewlett-Packard Development Company, L.P. | ||
| 9 | * | 12 | * |
| 10 | * This program is free software; you can redistribute it and/or modify it | 13 | * This program is free software; you can redistribute it and/or modify it |
| 11 | * under the terms of the GNU General Public License as published by the | 14 | * under the terms of the GNU General Public License as published by the |
| @@ -20,35 +23,17 @@ | |||
| 20 | 23 | ||
| 21 | #include <linux/module.h> | 24 | #include <linux/module.h> |
| 22 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
| 23 | #include <linux/sched.h> | ||
| 24 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
| 25 | #include <linux/idr.h> | 27 | #include <linux/idr.h> |
| 26 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
| 27 | #include <linux/fs.h> | 29 | #include <linux/fs.h> |
| 28 | #include <linux/file.h> | ||
| 29 | #include <linux/mount.h> | ||
| 30 | #include <linux/namei.h> | ||
| 31 | #include <linux/poll.h> | ||
| 32 | #include <linux/init.h> | 30 | #include <linux/init.h> |
| 33 | #include <linux/list.h> | 31 | #include <linux/list.h> |
| 34 | #include <linux/writeback.h> | 32 | #include <linux/writeback.h> |
| 35 | #include <linux/inotify.h> | 33 | #include <linux/inotify.h> |
| 36 | #include <linux/syscalls.h> | ||
| 37 | |||
| 38 | #include <asm/ioctls.h> | ||
| 39 | 34 | ||
| 40 | static atomic_t inotify_cookie; | 35 | static atomic_t inotify_cookie; |
| 41 | 36 | ||
| 42 | static kmem_cache_t *watch_cachep __read_mostly; | ||
| 43 | static kmem_cache_t *event_cachep __read_mostly; | ||
| 44 | |||
| 45 | static struct vfsmount *inotify_mnt __read_mostly; | ||
| 46 | |||
| 47 | /* these are configurable via /proc/sys/fs/inotify/ */ | ||
| 48 | int inotify_max_user_instances __read_mostly; | ||
| 49 | int inotify_max_user_watches __read_mostly; | ||
| 50 | int inotify_max_queued_events __read_mostly; | ||
| 51 | |||
| 52 | /* | 37 | /* |
| 53 | * Lock ordering: | 38 | * Lock ordering: |
| 54 | * | 39 | * |
| @@ -56,327 +41,108 @@ int inotify_max_queued_events __read_mostly; | |||
| 56 | * iprune_mutex (synchronize shrink_icache_memory()) | 41 | * iprune_mutex (synchronize shrink_icache_memory()) |
| 57 | * inode_lock (protects the super_block->s_inodes list) | 42 | * inode_lock (protects the super_block->s_inodes list) |
| 58 | * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list) | 43 | * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list) |
| 59 | * inotify_dev->mutex (protects inotify_device and watches->d_list) | 44 | * inotify_handle->mutex (protects inotify_handle and watches->h_list) |
| 45 | * | ||
| 46 | * The inode->inotify_mutex and inotify_handle->mutex and held during execution | ||
| 47 | * of a caller's event handler. Thus, the caller must not hold any locks | ||
| 48 | * taken in their event handler while calling any of the published inotify | ||
| 49 | * interfaces. | ||
| 60 | */ | 50 | */ |
| 61 | 51 | ||
| 62 | /* | 52 | /* |
| 63 | * Lifetimes of the three main data structures--inotify_device, inode, and | 53 | * Lifetimes of the three main data structures--inotify_handle, inode, and |
| 64 | * inotify_watch--are managed by reference count. | 54 | * inotify_watch--are managed by reference count. |
| 65 | * | 55 | * |
| 66 | * inotify_device: Lifetime is from inotify_init() until release. Additional | 56 | * inotify_handle: Lifetime is from inotify_init() to inotify_destroy(). |
| 67 | * references can bump the count via get_inotify_dev() and drop the count via | 57 | * Additional references can bump the count via get_inotify_handle() and drop |
| 68 | * put_inotify_dev(). | 58 | * the count via put_inotify_handle(). |
| 69 | * | 59 | * |
| 70 | * inotify_watch: Lifetime is from create_watch() to destory_watch(). | 60 | * inotify_watch: for inotify's purposes, lifetime is from inotify_add_watch() |
| 71 | * Additional references can bump the count via get_inotify_watch() and drop | 61 | * to remove_watch_no_event(). Additional references can bump the count via |
| 72 | * the count via put_inotify_watch(). | 62 | * get_inotify_watch() and drop the count via put_inotify_watch(). The caller |
| 63 | * is reponsible for the final put after receiving IN_IGNORED, or when using | ||
| 64 | * IN_ONESHOT after receiving the first event. Inotify does the final put if | ||
| 65 | * inotify_destroy() is called. | ||
| 73 | * | 66 | * |
| 74 | * inode: Pinned so long as the inode is associated with a watch, from | 67 | * inode: Pinned so long as the inode is associated with a watch, from |
| 75 | * create_watch() to put_inotify_watch(). | 68 | * inotify_add_watch() to the final put_inotify_watch(). |
| 76 | */ | 69 | */ |
| 77 | 70 | ||
| 78 | /* | 71 | /* |
| 79 | * struct inotify_device - represents an inotify instance | 72 | * struct inotify_handle - represents an inotify instance |
| 80 | * | 73 | * |
| 81 | * This structure is protected by the mutex 'mutex'. | 74 | * This structure is protected by the mutex 'mutex'. |
| 82 | */ | 75 | */ |
| 83 | struct inotify_device { | 76 | struct inotify_handle { |
| 84 | wait_queue_head_t wq; /* wait queue for i/o */ | ||
| 85 | struct idr idr; /* idr mapping wd -> watch */ | 77 | struct idr idr; /* idr mapping wd -> watch */ |
| 86 | struct mutex mutex; /* protects this bad boy */ | 78 | struct mutex mutex; /* protects this bad boy */ |
| 87 | struct list_head events; /* list of queued events */ | ||
| 88 | struct list_head watches; /* list of watches */ | 79 | struct list_head watches; /* list of watches */ |
| 89 | atomic_t count; /* reference count */ | 80 | atomic_t count; /* reference count */ |
| 90 | struct user_struct *user; /* user who opened this dev */ | ||
| 91 | unsigned int queue_size; /* size of the queue (bytes) */ | ||
| 92 | unsigned int event_count; /* number of pending events */ | ||
| 93 | unsigned int max_events; /* maximum number of events */ | ||
| 94 | u32 last_wd; /* the last wd allocated */ | 81 | u32 last_wd; /* the last wd allocated */ |
| 82 | const struct inotify_operations *in_ops; /* inotify caller operations */ | ||
| 95 | }; | 83 | }; |
| 96 | 84 | ||
| 97 | /* | 85 | static inline void get_inotify_handle(struct inotify_handle *ih) |
| 98 | * struct inotify_kernel_event - An inotify event, originating from a watch and | ||
| 99 | * queued for user-space. A list of these is attached to each instance of the | ||
| 100 | * device. In read(), this list is walked and all events that can fit in the | ||
| 101 | * buffer are returned. | ||
| 102 | * | ||
| 103 | * Protected by dev->mutex of the device in which we are queued. | ||
| 104 | */ | ||
| 105 | struct inotify_kernel_event { | ||
| 106 | struct inotify_event event; /* the user-space event */ | ||
| 107 | struct list_head list; /* entry in inotify_device's list */ | ||
| 108 | char *name; /* filename, if any */ | ||
| 109 | }; | ||
| 110 | |||
| 111 | /* | ||
| 112 | * struct inotify_watch - represents a watch request on a specific inode | ||
| 113 | * | ||
| 114 | * d_list is protected by dev->mutex of the associated watch->dev. | ||
| 115 | * i_list and mask are protected by inode->inotify_mutex of the associated inode. | ||
| 116 | * dev, inode, and wd are never written to once the watch is created. | ||
| 117 | */ | ||
| 118 | struct inotify_watch { | ||
| 119 | struct list_head d_list; /* entry in inotify_device's list */ | ||
| 120 | struct list_head i_list; /* entry in inode's list */ | ||
| 121 | atomic_t count; /* reference count */ | ||
| 122 | struct inotify_device *dev; /* associated device */ | ||
| 123 | struct inode *inode; /* associated inode */ | ||
| 124 | s32 wd; /* watch descriptor */ | ||
| 125 | u32 mask; /* event mask for this watch */ | ||
| 126 | }; | ||
| 127 | |||
| 128 | #ifdef CONFIG_SYSCTL | ||
| 129 | |||
| 130 | #include <linux/sysctl.h> | ||
| 131 | |||
| 132 | static int zero; | ||
| 133 | |||
| 134 | ctl_table inotify_table[] = { | ||
| 135 | { | ||
| 136 | .ctl_name = INOTIFY_MAX_USER_INSTANCES, | ||
| 137 | .procname = "max_user_instances", | ||
| 138 | .data = &inotify_max_user_instances, | ||
| 139 | .maxlen = sizeof(int), | ||
| 140 | .mode = 0644, | ||
| 141 | .proc_handler = &proc_dointvec_minmax, | ||
| 142 | .strategy = &sysctl_intvec, | ||
| 143 | .extra1 = &zero, | ||
| 144 | }, | ||
| 145 | { | ||
| 146 | .ctl_name = INOTIFY_MAX_USER_WATCHES, | ||
| 147 | .procname = "max_user_watches", | ||
| 148 | .data = &inotify_max_user_watches, | ||
| 149 | .maxlen = sizeof(int), | ||
| 150 | .mode = 0644, | ||
| 151 | .proc_handler = &proc_dointvec_minmax, | ||
| 152 | .strategy = &sysctl_intvec, | ||
| 153 | .extra1 = &zero, | ||
| 154 | }, | ||
| 155 | { | ||
| 156 | .ctl_name = INOTIFY_MAX_QUEUED_EVENTS, | ||
| 157 | .procname = "max_queued_events", | ||
| 158 | .data = &inotify_max_queued_events, | ||
| 159 | .maxlen = sizeof(int), | ||
| 160 | .mode = 0644, | ||
| 161 | .proc_handler = &proc_dointvec_minmax, | ||
| 162 | .strategy = &sysctl_intvec, | ||
| 163 | .extra1 = &zero | ||
| 164 | }, | ||
| 165 | { .ctl_name = 0 } | ||
| 166 | }; | ||
| 167 | #endif /* CONFIG_SYSCTL */ | ||
| 168 | |||
| 169 | static inline void get_inotify_dev(struct inotify_device *dev) | ||
| 170 | { | 86 | { |
| 171 | atomic_inc(&dev->count); | 87 | atomic_inc(&ih->count); |
| 172 | } | 88 | } |
| 173 | 89 | ||
| 174 | static inline void put_inotify_dev(struct inotify_device *dev) | 90 | static inline void put_inotify_handle(struct inotify_handle *ih) |
| 175 | { | 91 | { |
| 176 | if (atomic_dec_and_test(&dev->count)) { | 92 | if (atomic_dec_and_test(&ih->count)) { |
| 177 | atomic_dec(&dev->user->inotify_devs); | 93 | idr_destroy(&ih->idr); |
| 178 | free_uid(dev->user); | 94 | kfree(ih); |
| 179 | idr_destroy(&dev->idr); | ||
| 180 | kfree(dev); | ||
| 181 | } | 95 | } |
| 182 | } | 96 | } |
| 183 | 97 | ||
| 184 | static inline void get_inotify_watch(struct inotify_watch *watch) | 98 | /** |
| 99 | * get_inotify_watch - grab a reference to an inotify_watch | ||
| 100 | * @watch: watch to grab | ||
| 101 | */ | ||
| 102 | void get_inotify_watch(struct inotify_watch *watch) | ||
| 185 | { | 103 | { |
| 186 | atomic_inc(&watch->count); | 104 | atomic_inc(&watch->count); |
| 187 | } | 105 | } |
| 106 | EXPORT_SYMBOL_GPL(get_inotify_watch); | ||
| 188 | 107 | ||
| 189 | /* | 108 | /** |
| 190 | * put_inotify_watch - decrements the ref count on a given watch. cleans up | 109 | * put_inotify_watch - decrements the ref count on a given watch. cleans up |
| 191 | * the watch and its references if the count reaches zero. | 110 | * watch references if the count reaches zero. inotify_watch is freed by |
| 111 | * inotify callers via the destroy_watch() op. | ||
| 112 | * @watch: watch to release | ||
| 192 | */ | 113 | */ |
| 193 | static inline void put_inotify_watch(struct inotify_watch *watch) | 114 | void put_inotify_watch(struct inotify_watch *watch) |
| 194 | { | 115 | { |
| 195 | if (atomic_dec_and_test(&watch->count)) { | 116 | if (atomic_dec_and_test(&watch->count)) { |
| 196 | put_inotify_dev(watch->dev); | 117 | struct inotify_handle *ih = watch->ih; |
| 197 | iput(watch->inode); | ||
| 198 | kmem_cache_free(watch_cachep, watch); | ||
| 199 | } | ||
| 200 | } | ||
| 201 | |||
| 202 | /* | ||
| 203 | * kernel_event - create a new kernel event with the given parameters | ||
| 204 | * | ||
| 205 | * This function can sleep. | ||
| 206 | */ | ||
| 207 | static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie, | ||
| 208 | const char *name) | ||
| 209 | { | ||
| 210 | struct inotify_kernel_event *kevent; | ||
| 211 | |||
| 212 | kevent = kmem_cache_alloc(event_cachep, GFP_KERNEL); | ||
| 213 | if (unlikely(!kevent)) | ||
| 214 | return NULL; | ||
| 215 | |||
| 216 | /* we hand this out to user-space, so zero it just in case */ | ||
| 217 | memset(&kevent->event, 0, sizeof(struct inotify_event)); | ||
| 218 | |||
| 219 | kevent->event.wd = wd; | ||
| 220 | kevent->event.mask = mask; | ||
| 221 | kevent->event.cookie = cookie; | ||
| 222 | |||
| 223 | INIT_LIST_HEAD(&kevent->list); | ||
| 224 | |||
| 225 | if (name) { | ||
| 226 | size_t len, rem, event_size = sizeof(struct inotify_event); | ||
| 227 | |||
| 228 | /* | ||
| 229 | * We need to pad the filename so as to properly align an | ||
| 230 | * array of inotify_event structures. Because the structure is | ||
| 231 | * small and the common case is a small filename, we just round | ||
| 232 | * up to the next multiple of the structure's sizeof. This is | ||
| 233 | * simple and safe for all architectures. | ||
| 234 | */ | ||
| 235 | len = strlen(name) + 1; | ||
| 236 | rem = event_size - len; | ||
| 237 | if (len > event_size) { | ||
| 238 | rem = event_size - (len % event_size); | ||
| 239 | if (len % event_size == 0) | ||
| 240 | rem = 0; | ||
| 241 | } | ||
| 242 | |||
| 243 | kevent->name = kmalloc(len + rem, GFP_KERNEL); | ||
| 244 | if (unlikely(!kevent->name)) { | ||
| 245 | kmem_cache_free(event_cachep, kevent); | ||
| 246 | return NULL; | ||
| 247 | } | ||
| 248 | memcpy(kevent->name, name, len); | ||
| 249 | if (rem) | ||
| 250 | memset(kevent->name + len, 0, rem); | ||
| 251 | kevent->event.len = len + rem; | ||
| 252 | } else { | ||
| 253 | kevent->event.len = 0; | ||
| 254 | kevent->name = NULL; | ||
| 255 | } | ||
| 256 | |||
| 257 | return kevent; | ||
| 258 | } | ||
| 259 | |||
| 260 | /* | ||
| 261 | * inotify_dev_get_event - return the next event in the given dev's queue | ||
| 262 | * | ||
| 263 | * Caller must hold dev->mutex. | ||
| 264 | */ | ||
| 265 | static inline struct inotify_kernel_event * | ||
| 266 | inotify_dev_get_event(struct inotify_device *dev) | ||
| 267 | { | ||
| 268 | return list_entry(dev->events.next, struct inotify_kernel_event, list); | ||
| 269 | } | ||
| 270 | |||
| 271 | /* | ||
| 272 | * inotify_dev_queue_event - add a new event to the given device | ||
| 273 | * | ||
| 274 | * Caller must hold dev->mutex. Can sleep (calls kernel_event()). | ||
| 275 | */ | ||
| 276 | static void inotify_dev_queue_event(struct inotify_device *dev, | ||
| 277 | struct inotify_watch *watch, u32 mask, | ||
| 278 | u32 cookie, const char *name) | ||
| 279 | { | ||
| 280 | struct inotify_kernel_event *kevent, *last; | ||
| 281 | |||
| 282 | /* coalescing: drop this event if it is a dupe of the previous */ | ||
| 283 | last = inotify_dev_get_event(dev); | ||
| 284 | if (last && last->event.mask == mask && last->event.wd == watch->wd && | ||
| 285 | last->event.cookie == cookie) { | ||
| 286 | const char *lastname = last->name; | ||
| 287 | |||
| 288 | if (!name && !lastname) | ||
| 289 | return; | ||
| 290 | if (name && lastname && !strcmp(lastname, name)) | ||
| 291 | return; | ||
| 292 | } | ||
| 293 | |||
| 294 | /* the queue overflowed and we already sent the Q_OVERFLOW event */ | ||
| 295 | if (unlikely(dev->event_count > dev->max_events)) | ||
| 296 | return; | ||
| 297 | |||
| 298 | /* if the queue overflows, we need to notify user space */ | ||
| 299 | if (unlikely(dev->event_count == dev->max_events)) | ||
| 300 | kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL); | ||
| 301 | else | ||
| 302 | kevent = kernel_event(watch->wd, mask, cookie, name); | ||
| 303 | |||
| 304 | if (unlikely(!kevent)) | ||
| 305 | return; | ||
| 306 | |||
| 307 | /* queue the event and wake up anyone waiting */ | ||
| 308 | dev->event_count++; | ||
| 309 | dev->queue_size += sizeof(struct inotify_event) + kevent->event.len; | ||
| 310 | list_add_tail(&kevent->list, &dev->events); | ||
| 311 | wake_up_interruptible(&dev->wq); | ||
| 312 | } | ||
| 313 | |||
| 314 | /* | ||
| 315 | * remove_kevent - cleans up and ultimately frees the given kevent | ||
| 316 | * | ||
| 317 | * Caller must hold dev->mutex. | ||
| 318 | */ | ||
| 319 | static void remove_kevent(struct inotify_device *dev, | ||
| 320 | struct inotify_kernel_event *kevent) | ||
| 321 | { | ||
| 322 | list_del(&kevent->list); | ||
| 323 | |||
| 324 | dev->event_count--; | ||
| 325 | dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len; | ||
| 326 | |||
| 327 | kfree(kevent->name); | ||
| 328 | kmem_cache_free(event_cachep, kevent); | ||
| 329 | } | ||
| 330 | 118 | ||
| 331 | /* | 119 | iput(watch->inode); |
| 332 | * inotify_dev_event_dequeue - destroy an event on the given device | 120 | ih->in_ops->destroy_watch(watch); |
| 333 | * | 121 | put_inotify_handle(ih); |
| 334 | * Caller must hold dev->mutex. | ||
| 335 | */ | ||
| 336 | static void inotify_dev_event_dequeue(struct inotify_device *dev) | ||
| 337 | { | ||
| 338 | if (!list_empty(&dev->events)) { | ||
| 339 | struct inotify_kernel_event *kevent; | ||
| 340 | kevent = inotify_dev_get_event(dev); | ||
| 341 | remove_kevent(dev, kevent); | ||
| 342 | } | 122 | } |
| 343 | } | 123 | } |
| 124 | EXPORT_SYMBOL_GPL(put_inotify_watch); | ||
| 344 | 125 | ||
| 345 | /* | 126 | /* |
| 346 | * inotify_dev_get_wd - returns the next WD for use by the given dev | 127 | * inotify_handle_get_wd - returns the next WD for use by the given handle |
| 347 | * | 128 | * |
| 348 | * Callers must hold dev->mutex. This function can sleep. | 129 | * Callers must hold ih->mutex. This function can sleep. |
| 349 | */ | 130 | */ |
| 350 | static int inotify_dev_get_wd(struct inotify_device *dev, | 131 | static int inotify_handle_get_wd(struct inotify_handle *ih, |
| 351 | struct inotify_watch *watch) | 132 | struct inotify_watch *watch) |
| 352 | { | 133 | { |
| 353 | int ret; | 134 | int ret; |
| 354 | 135 | ||
| 355 | do { | 136 | do { |
| 356 | if (unlikely(!idr_pre_get(&dev->idr, GFP_KERNEL))) | 137 | if (unlikely(!idr_pre_get(&ih->idr, GFP_KERNEL))) |
| 357 | return -ENOSPC; | 138 | return -ENOSPC; |
| 358 | ret = idr_get_new_above(&dev->idr, watch, dev->last_wd+1, &watch->wd); | 139 | ret = idr_get_new_above(&ih->idr, watch, ih->last_wd+1, &watch->wd); |
| 359 | } while (ret == -EAGAIN); | 140 | } while (ret == -EAGAIN); |
| 360 | 141 | ||
| 361 | return ret; | 142 | if (likely(!ret)) |
| 362 | } | 143 | ih->last_wd = watch->wd; |
| 363 | 144 | ||
| 364 | /* | 145 | return ret; |
| 365 | * find_inode - resolve a user-given path to a specific inode and return a nd | ||
| 366 | */ | ||
| 367 | static int find_inode(const char __user *dirname, struct nameidata *nd, | ||
| 368 | unsigned flags) | ||
| 369 | { | ||
| 370 | int error; | ||
| 371 | |||
| 372 | error = __user_walk(dirname, flags, nd); | ||
| 373 | if (error) | ||
| 374 | return error; | ||
| 375 | /* you can only watch an inode if you have read permissions on it */ | ||
| 376 | error = vfs_permission(nd, MAY_READ); | ||
| 377 | if (error) | ||
| 378 | path_release(nd); | ||
| 379 | return error; | ||
| 380 | } | 146 | } |
| 381 | 147 | ||
| 382 | /* | 148 | /* |
| @@ -422,67 +188,18 @@ static void set_dentry_child_flags(struct inode *inode, int watched) | |||
| 422 | } | 188 | } |
| 423 | 189 | ||
| 424 | /* | 190 | /* |
| 425 | * create_watch - creates a watch on the given device. | 191 | * inotify_find_handle - find the watch associated with the given inode and |
| 426 | * | 192 | * handle |
| 427 | * Callers must hold dev->mutex. Calls inotify_dev_get_wd() so may sleep. | ||
| 428 | * Both 'dev' and 'inode' (by way of nameidata) need to be pinned. | ||
| 429 | */ | ||
| 430 | static struct inotify_watch *create_watch(struct inotify_device *dev, | ||
| 431 | u32 mask, struct inode *inode) | ||
| 432 | { | ||
| 433 | struct inotify_watch *watch; | ||
| 434 | int ret; | ||
| 435 | |||
| 436 | if (atomic_read(&dev->user->inotify_watches) >= | ||
| 437 | inotify_max_user_watches) | ||
| 438 | return ERR_PTR(-ENOSPC); | ||
| 439 | |||
| 440 | watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL); | ||
| 441 | if (unlikely(!watch)) | ||
| 442 | return ERR_PTR(-ENOMEM); | ||
| 443 | |||
| 444 | ret = inotify_dev_get_wd(dev, watch); | ||
| 445 | if (unlikely(ret)) { | ||
| 446 | kmem_cache_free(watch_cachep, watch); | ||
| 447 | return ERR_PTR(ret); | ||
| 448 | } | ||
| 449 | |||
| 450 | dev->last_wd = watch->wd; | ||
| 451 | watch->mask = mask; | ||
| 452 | atomic_set(&watch->count, 0); | ||
| 453 | INIT_LIST_HEAD(&watch->d_list); | ||
| 454 | INIT_LIST_HEAD(&watch->i_list); | ||
| 455 | |||
| 456 | /* save a reference to device and bump the count to make it official */ | ||
| 457 | get_inotify_dev(dev); | ||
| 458 | watch->dev = dev; | ||
| 459 | |||
| 460 | /* | ||
| 461 | * Save a reference to the inode and bump the ref count to make it | ||
| 462 | * official. We hold a reference to nameidata, which makes this safe. | ||
| 463 | */ | ||
| 464 | watch->inode = igrab(inode); | ||
| 465 | |||
| 466 | /* bump our own count, corresponding to our entry in dev->watches */ | ||
| 467 | get_inotify_watch(watch); | ||
| 468 | |||
| 469 | atomic_inc(&dev->user->inotify_watches); | ||
| 470 | |||
| 471 | return watch; | ||
| 472 | } | ||
| 473 | |||
| 474 | /* | ||
| 475 | * inotify_find_dev - find the watch associated with the given inode and dev | ||
| 476 | * | 193 | * |
| 477 | * Callers must hold inode->inotify_mutex. | 194 | * Callers must hold inode->inotify_mutex. |
| 478 | */ | 195 | */ |
| 479 | static struct inotify_watch *inode_find_dev(struct inode *inode, | 196 | static struct inotify_watch *inode_find_handle(struct inode *inode, |
| 480 | struct inotify_device *dev) | 197 | struct inotify_handle *ih) |
| 481 | { | 198 | { |
| 482 | struct inotify_watch *watch; | 199 | struct inotify_watch *watch; |
| 483 | 200 | ||
| 484 | list_for_each_entry(watch, &inode->inotify_watches, i_list) { | 201 | list_for_each_entry(watch, &inode->inotify_watches, i_list) { |
| 485 | if (watch->dev == dev) | 202 | if (watch->ih == ih) |
| 486 | return watch; | 203 | return watch; |
| 487 | } | 204 | } |
| 488 | 205 | ||
| @@ -490,40 +207,40 @@ static struct inotify_watch *inode_find_dev(struct inode *inode, | |||
| 490 | } | 207 | } |
| 491 | 208 | ||
| 492 | /* | 209 | /* |
| 493 | * remove_watch_no_event - remove_watch() without the IN_IGNORED event. | 210 | * remove_watch_no_event - remove watch without the IN_IGNORED event. |
| 211 | * | ||
| 212 | * Callers must hold both inode->inotify_mutex and ih->mutex. | ||
| 494 | */ | 213 | */ |
| 495 | static void remove_watch_no_event(struct inotify_watch *watch, | 214 | static void remove_watch_no_event(struct inotify_watch *watch, |
| 496 | struct inotify_device *dev) | 215 | struct inotify_handle *ih) |
| 497 | { | 216 | { |
| 498 | list_del(&watch->i_list); | 217 | list_del(&watch->i_list); |
| 499 | list_del(&watch->d_list); | 218 | list_del(&watch->h_list); |
| 500 | 219 | ||
| 501 | if (!inotify_inode_watched(watch->inode)) | 220 | if (!inotify_inode_watched(watch->inode)) |
| 502 | set_dentry_child_flags(watch->inode, 0); | 221 | set_dentry_child_flags(watch->inode, 0); |
| 503 | 222 | ||
| 504 | atomic_dec(&dev->user->inotify_watches); | 223 | idr_remove(&ih->idr, watch->wd); |
| 505 | idr_remove(&dev->idr, watch->wd); | ||
| 506 | put_inotify_watch(watch); | ||
| 507 | } | 224 | } |
| 508 | 225 | ||
| 509 | /* | 226 | /** |
| 510 | * remove_watch - Remove a watch from both the device and the inode. Sends | 227 | * inotify_remove_watch_locked - Remove a watch from both the handle and the |
| 511 | * the IN_IGNORED event to the given device signifying that the inode is no | 228 | * inode. Sends the IN_IGNORED event signifying that the inode is no longer |
| 512 | * longer watched. | 229 | * watched. May be invoked from a caller's event handler. |
| 513 | * | 230 | * @ih: inotify handle associated with watch |
| 514 | * Callers must hold both inode->inotify_mutex and dev->mutex. We drop a | 231 | * @watch: watch to remove |
| 515 | * reference to the inode before returning. | ||
| 516 | * | 232 | * |
| 517 | * The inode is not iput() so as to remain atomic. If the inode needs to be | 233 | * Callers must hold both inode->inotify_mutex and ih->mutex. |
| 518 | * iput(), the call returns one. Otherwise, it returns zero. | ||
| 519 | */ | 234 | */ |
| 520 | static void remove_watch(struct inotify_watch *watch,struct inotify_device *dev) | 235 | void inotify_remove_watch_locked(struct inotify_handle *ih, |
| 236 | struct inotify_watch *watch) | ||
| 521 | { | 237 | { |
| 522 | inotify_dev_queue_event(dev, watch, IN_IGNORED, 0, NULL); | 238 | remove_watch_no_event(watch, ih); |
| 523 | remove_watch_no_event(watch, dev); | 239 | ih->in_ops->handle_event(watch, watch->wd, IN_IGNORED, 0, NULL, NULL); |
| 524 | } | 240 | } |
| 241 | EXPORT_SYMBOL_GPL(inotify_remove_watch_locked); | ||
| 525 | 242 | ||
| 526 | /* Kernel API */ | 243 | /* Kernel API for producing events */ |
| 527 | 244 | ||
| 528 | /* | 245 | /* |
| 529 | * inotify_d_instantiate - instantiate dcache entry for inode | 246 | * inotify_d_instantiate - instantiate dcache entry for inode |
| @@ -563,9 +280,10 @@ void inotify_d_move(struct dentry *entry) | |||
| 563 | * @mask: event mask describing this event | 280 | * @mask: event mask describing this event |
| 564 | * @cookie: cookie for synchronization, or zero | 281 | * @cookie: cookie for synchronization, or zero |
| 565 | * @name: filename, if any | 282 | * @name: filename, if any |
| 283 | * @n_inode: inode associated with name | ||
| 566 | */ | 284 | */ |
| 567 | void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie, | 285 | void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie, |
| 568 | const char *name) | 286 | const char *name, struct inode *n_inode) |
| 569 | { | 287 | { |
| 570 | struct inotify_watch *watch, *next; | 288 | struct inotify_watch *watch, *next; |
| 571 | 289 | ||
| @@ -576,14 +294,13 @@ void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie, | |||
| 576 | list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { | 294 | list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { |
| 577 | u32 watch_mask = watch->mask; | 295 | u32 watch_mask = watch->mask; |
| 578 | if (watch_mask & mask) { | 296 | if (watch_mask & mask) { |
| 579 | struct inotify_device *dev = watch->dev; | 297 | struct inotify_handle *ih= watch->ih; |
| 580 | get_inotify_watch(watch); | 298 | mutex_lock(&ih->mutex); |
| 581 | mutex_lock(&dev->mutex); | ||
| 582 | inotify_dev_queue_event(dev, watch, mask, cookie, name); | ||
| 583 | if (watch_mask & IN_ONESHOT) | 299 | if (watch_mask & IN_ONESHOT) |
| 584 | remove_watch_no_event(watch, dev); | 300 | remove_watch_no_event(watch, ih); |
| 585 | mutex_unlock(&dev->mutex); | 301 | ih->in_ops->handle_event(watch, watch->wd, mask, cookie, |
| 586 | put_inotify_watch(watch); | 302 | name, n_inode); |
| 303 | mutex_unlock(&ih->mutex); | ||
| 587 | } | 304 | } |
| 588 | } | 305 | } |
| 589 | mutex_unlock(&inode->inotify_mutex); | 306 | mutex_unlock(&inode->inotify_mutex); |
| @@ -613,7 +330,8 @@ void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask, | |||
| 613 | if (inotify_inode_watched(inode)) { | 330 | if (inotify_inode_watched(inode)) { |
| 614 | dget(parent); | 331 | dget(parent); |
| 615 | spin_unlock(&dentry->d_lock); | 332 | spin_unlock(&dentry->d_lock); |
| 616 | inotify_inode_queue_event(inode, mask, cookie, name); | 333 | inotify_inode_queue_event(inode, mask, cookie, name, |
| 334 | dentry->d_inode); | ||
| 617 | dput(parent); | 335 | dput(parent); |
| 618 | } else | 336 | } else |
| 619 | spin_unlock(&dentry->d_lock); | 337 | spin_unlock(&dentry->d_lock); |
| @@ -665,7 +383,7 @@ void inotify_unmount_inodes(struct list_head *list) | |||
| 665 | 383 | ||
| 666 | need_iput_tmp = need_iput; | 384 | need_iput_tmp = need_iput; |
| 667 | need_iput = NULL; | 385 | need_iput = NULL; |
| 668 | /* In case the remove_watch() drops a reference. */ | 386 | /* In case inotify_remove_watch_locked() drops a reference. */ |
| 669 | if (inode != need_iput_tmp) | 387 | if (inode != need_iput_tmp) |
| 670 | __iget(inode); | 388 | __iget(inode); |
| 671 | else | 389 | else |
| @@ -694,11 +412,12 @@ void inotify_unmount_inodes(struct list_head *list) | |||
| 694 | mutex_lock(&inode->inotify_mutex); | 412 | mutex_lock(&inode->inotify_mutex); |
| 695 | watches = &inode->inotify_watches; | 413 | watches = &inode->inotify_watches; |
| 696 | list_for_each_entry_safe(watch, next_w, watches, i_list) { | 414 | list_for_each_entry_safe(watch, next_w, watches, i_list) { |
| 697 | struct inotify_device *dev = watch->dev; | 415 | struct inotify_handle *ih= watch->ih; |
| 698 | mutex_lock(&dev->mutex); | 416 | mutex_lock(&ih->mutex); |
| 699 | inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL); | 417 | ih->in_ops->handle_event(watch, watch->wd, IN_UNMOUNT, 0, |
| 700 | remove_watch(watch, dev); | 418 | NULL, NULL); |
| 701 | mutex_unlock(&dev->mutex); | 419 | inotify_remove_watch_locked(ih, watch); |
| 420 | mutex_unlock(&ih->mutex); | ||
| 702 | } | 421 | } |
| 703 | mutex_unlock(&inode->inotify_mutex); | 422 | mutex_unlock(&inode->inotify_mutex); |
| 704 | iput(inode); | 423 | iput(inode); |
| @@ -718,432 +437,292 @@ void inotify_inode_is_dead(struct inode *inode) | |||
| 718 | 437 | ||
| 719 | mutex_lock(&inode->inotify_mutex); | 438 | mutex_lock(&inode->inotify_mutex); |
| 720 | list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { | 439 | list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { |
| 721 | struct inotify_device *dev = watch->dev; | 440 | struct inotify_handle *ih = watch->ih; |
| 722 | mutex_lock(&dev->mutex); | 441 | mutex_lock(&ih->mutex); |
| 723 | remove_watch(watch, dev); | 442 | inotify_remove_watch_locked(ih, watch); |
| 724 | mutex_unlock(&dev->mutex); | 443 | mutex_unlock(&ih->mutex); |
| 725 | } | 444 | } |
| 726 | mutex_unlock(&inode->inotify_mutex); | 445 | mutex_unlock(&inode->inotify_mutex); |
| 727 | } | 446 | } |
| 728 | EXPORT_SYMBOL_GPL(inotify_inode_is_dead); | 447 | EXPORT_SYMBOL_GPL(inotify_inode_is_dead); |
| 729 | 448 | ||
| 730 | /* Device Interface */ | 449 | /* Kernel Consumer API */ |
| 731 | 450 | ||
| 732 | static unsigned int inotify_poll(struct file *file, poll_table *wait) | 451 | /** |
| 452 | * inotify_init - allocate and initialize an inotify instance | ||
| 453 | * @ops: caller's inotify operations | ||
| 454 | */ | ||
| 455 | struct inotify_handle *inotify_init(const struct inotify_operations *ops) | ||
| 733 | { | 456 | { |
| 734 | struct inotify_device *dev = file->private_data; | 457 | struct inotify_handle *ih; |
| 735 | int ret = 0; | ||
| 736 | 458 | ||
| 737 | poll_wait(file, &dev->wq, wait); | 459 | ih = kmalloc(sizeof(struct inotify_handle), GFP_KERNEL); |
| 738 | mutex_lock(&dev->mutex); | 460 | if (unlikely(!ih)) |
| 739 | if (!list_empty(&dev->events)) | 461 | return ERR_PTR(-ENOMEM); |
| 740 | ret = POLLIN | POLLRDNORM; | ||
| 741 | mutex_unlock(&dev->mutex); | ||
| 742 | 462 | ||
| 743 | return ret; | 463 | idr_init(&ih->idr); |
| 464 | INIT_LIST_HEAD(&ih->watches); | ||
| 465 | mutex_init(&ih->mutex); | ||
| 466 | ih->last_wd = 0; | ||
| 467 | ih->in_ops = ops; | ||
| 468 | atomic_set(&ih->count, 0); | ||
| 469 | get_inotify_handle(ih); | ||
| 470 | |||
| 471 | return ih; | ||
| 744 | } | 472 | } |
| 473 | EXPORT_SYMBOL_GPL(inotify_init); | ||
| 745 | 474 | ||
| 746 | static ssize_t inotify_read(struct file *file, char __user *buf, | 475 | /** |
| 747 | size_t count, loff_t *pos) | 476 | * inotify_init_watch - initialize an inotify watch |
| 477 | * @watch: watch to initialize | ||
| 478 | */ | ||
| 479 | void inotify_init_watch(struct inotify_watch *watch) | ||
| 748 | { | 480 | { |
| 749 | size_t event_size = sizeof (struct inotify_event); | 481 | INIT_LIST_HEAD(&watch->h_list); |
| 750 | struct inotify_device *dev; | 482 | INIT_LIST_HEAD(&watch->i_list); |
| 751 | char __user *start; | 483 | atomic_set(&watch->count, 0); |
| 752 | int ret; | 484 | get_inotify_watch(watch); /* initial get */ |
| 753 | DEFINE_WAIT(wait); | ||
| 754 | |||
| 755 | start = buf; | ||
| 756 | dev = file->private_data; | ||
| 757 | |||
| 758 | while (1) { | ||
| 759 | int events; | ||
| 760 | |||
| 761 | prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); | ||
| 762 | |||
| 763 | mutex_lock(&dev->mutex); | ||
| 764 | events = !list_empty(&dev->events); | ||
| 765 | mutex_unlock(&dev->mutex); | ||
| 766 | if (events) { | ||
| 767 | ret = 0; | ||
| 768 | break; | ||
| 769 | } | ||
| 770 | |||
| 771 | if (file->f_flags & O_NONBLOCK) { | ||
| 772 | ret = -EAGAIN; | ||
| 773 | break; | ||
| 774 | } | ||
| 775 | |||
| 776 | if (signal_pending(current)) { | ||
| 777 | ret = -EINTR; | ||
| 778 | break; | ||
| 779 | } | ||
| 780 | |||
| 781 | schedule(); | ||
| 782 | } | ||
| 783 | |||
| 784 | finish_wait(&dev->wq, &wait); | ||
| 785 | if (ret) | ||
| 786 | return ret; | ||
| 787 | |||
| 788 | mutex_lock(&dev->mutex); | ||
| 789 | while (1) { | ||
| 790 | struct inotify_kernel_event *kevent; | ||
| 791 | |||
| 792 | ret = buf - start; | ||
| 793 | if (list_empty(&dev->events)) | ||
| 794 | break; | ||
| 795 | |||
| 796 | kevent = inotify_dev_get_event(dev); | ||
| 797 | if (event_size + kevent->event.len > count) | ||
| 798 | break; | ||
| 799 | |||
| 800 | if (copy_to_user(buf, &kevent->event, event_size)) { | ||
| 801 | ret = -EFAULT; | ||
| 802 | break; | ||
| 803 | } | ||
| 804 | buf += event_size; | ||
| 805 | count -= event_size; | ||
| 806 | |||
| 807 | if (kevent->name) { | ||
| 808 | if (copy_to_user(buf, kevent->name, kevent->event.len)){ | ||
| 809 | ret = -EFAULT; | ||
| 810 | break; | ||
| 811 | } | ||
| 812 | buf += kevent->event.len; | ||
| 813 | count -= kevent->event.len; | ||
| 814 | } | ||
| 815 | |||
| 816 | remove_kevent(dev, kevent); | ||
| 817 | } | ||
| 818 | mutex_unlock(&dev->mutex); | ||
| 819 | |||
| 820 | return ret; | ||
| 821 | } | 485 | } |
| 486 | EXPORT_SYMBOL_GPL(inotify_init_watch); | ||
| 822 | 487 | ||
| 823 | static int inotify_release(struct inode *ignored, struct file *file) | 488 | /** |
| 489 | * inotify_destroy - clean up and destroy an inotify instance | ||
| 490 | * @ih: inotify handle | ||
| 491 | */ | ||
| 492 | void inotify_destroy(struct inotify_handle *ih) | ||
| 824 | { | 493 | { |
| 825 | struct inotify_device *dev = file->private_data; | ||
| 826 | |||
| 827 | /* | 494 | /* |
| 828 | * Destroy all of the watches on this device. Unfortunately, not very | 495 | * Destroy all of the watches for this handle. Unfortunately, not very |
| 829 | * pretty. We cannot do a simple iteration over the list, because we | 496 | * pretty. We cannot do a simple iteration over the list, because we |
| 830 | * do not know the inode until we iterate to the watch. But we need to | 497 | * do not know the inode until we iterate to the watch. But we need to |
| 831 | * hold inode->inotify_mutex before dev->mutex. The following works. | 498 | * hold inode->inotify_mutex before ih->mutex. The following works. |
| 832 | */ | 499 | */ |
| 833 | while (1) { | 500 | while (1) { |
| 834 | struct inotify_watch *watch; | 501 | struct inotify_watch *watch; |
| 835 | struct list_head *watches; | 502 | struct list_head *watches; |
| 836 | struct inode *inode; | 503 | struct inode *inode; |
| 837 | 504 | ||
| 838 | mutex_lock(&dev->mutex); | 505 | mutex_lock(&ih->mutex); |
| 839 | watches = &dev->watches; | 506 | watches = &ih->watches; |
| 840 | if (list_empty(watches)) { | 507 | if (list_empty(watches)) { |
| 841 | mutex_unlock(&dev->mutex); | 508 | mutex_unlock(&ih->mutex); |
| 842 | break; | 509 | break; |
| 843 | } | 510 | } |
| 844 | watch = list_entry(watches->next, struct inotify_watch, d_list); | 511 | watch = list_entry(watches->next, struct inotify_watch, h_list); |
| 845 | get_inotify_watch(watch); | 512 | get_inotify_watch(watch); |
| 846 | mutex_unlock(&dev->mutex); | 513 | mutex_unlock(&ih->mutex); |
| 847 | 514 | ||
| 848 | inode = watch->inode; | 515 | inode = watch->inode; |
| 849 | mutex_lock(&inode->inotify_mutex); | 516 | mutex_lock(&inode->inotify_mutex); |
| 850 | mutex_lock(&dev->mutex); | 517 | mutex_lock(&ih->mutex); |
| 851 | 518 | ||
| 852 | /* make sure we didn't race with another list removal */ | 519 | /* make sure we didn't race with another list removal */ |
| 853 | if (likely(idr_find(&dev->idr, watch->wd))) | 520 | if (likely(idr_find(&ih->idr, watch->wd))) { |
| 854 | remove_watch_no_event(watch, dev); | 521 | remove_watch_no_event(watch, ih); |
| 522 | put_inotify_watch(watch); | ||
| 523 | } | ||
| 855 | 524 | ||
| 856 | mutex_unlock(&dev->mutex); | 525 | mutex_unlock(&ih->mutex); |
| 857 | mutex_unlock(&inode->inotify_mutex); | 526 | mutex_unlock(&inode->inotify_mutex); |
| 858 | put_inotify_watch(watch); | 527 | put_inotify_watch(watch); |
| 859 | } | 528 | } |
| 860 | 529 | ||
| 861 | /* destroy all of the events on this device */ | 530 | /* free this handle: the put matching the get in inotify_init() */ |
| 862 | mutex_lock(&dev->mutex); | 531 | put_inotify_handle(ih); |
| 863 | while (!list_empty(&dev->events)) | ||
| 864 | inotify_dev_event_dequeue(dev); | ||
| 865 | mutex_unlock(&dev->mutex); | ||
| 866 | |||
| 867 | /* free this device: the put matching the get in inotify_init() */ | ||
| 868 | put_inotify_dev(dev); | ||
| 869 | |||
| 870 | return 0; | ||
| 871 | } | 532 | } |
| 533 | EXPORT_SYMBOL_GPL(inotify_destroy); | ||
| 872 | 534 | ||
| 873 | /* | 535 | /** |
| 874 | * inotify_ignore - remove a given wd from this inotify instance. | 536 | * inotify_find_watch - find an existing watch for an (ih,inode) pair |
| 537 | * @ih: inotify handle | ||
| 538 | * @inode: inode to watch | ||
| 539 | * @watchp: pointer to existing inotify_watch | ||
| 875 | * | 540 | * |
| 876 | * Can sleep. | 541 | * Caller must pin given inode (via nameidata). |
| 877 | */ | 542 | */ |
| 878 | static int inotify_ignore(struct inotify_device *dev, s32 wd) | 543 | s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode, |
| 544 | struct inotify_watch **watchp) | ||
| 879 | { | 545 | { |
| 880 | struct inotify_watch *watch; | 546 | struct inotify_watch *old; |
| 881 | struct inode *inode; | 547 | int ret = -ENOENT; |
| 882 | |||
| 883 | mutex_lock(&dev->mutex); | ||
| 884 | watch = idr_find(&dev->idr, wd); | ||
| 885 | if (unlikely(!watch)) { | ||
| 886 | mutex_unlock(&dev->mutex); | ||
| 887 | return -EINVAL; | ||
| 888 | } | ||
| 889 | get_inotify_watch(watch); | ||
| 890 | inode = watch->inode; | ||
| 891 | mutex_unlock(&dev->mutex); | ||
| 892 | 548 | ||
| 893 | mutex_lock(&inode->inotify_mutex); | 549 | mutex_lock(&inode->inotify_mutex); |
| 894 | mutex_lock(&dev->mutex); | 550 | mutex_lock(&ih->mutex); |
| 895 | 551 | ||
| 896 | /* make sure that we did not race */ | 552 | old = inode_find_handle(inode, ih); |
| 897 | if (likely(idr_find(&dev->idr, wd) == watch)) | 553 | if (unlikely(old)) { |
| 898 | remove_watch(watch, dev); | 554 | get_inotify_watch(old); /* caller must put watch */ |
| 555 | *watchp = old; | ||
| 556 | ret = old->wd; | ||
| 557 | } | ||
| 899 | 558 | ||
| 900 | mutex_unlock(&dev->mutex); | 559 | mutex_unlock(&ih->mutex); |
| 901 | mutex_unlock(&inode->inotify_mutex); | 560 | mutex_unlock(&inode->inotify_mutex); |
| 902 | put_inotify_watch(watch); | ||
| 903 | 561 | ||
| 904 | return 0; | 562 | return ret; |
| 905 | } | 563 | } |
| 564 | EXPORT_SYMBOL_GPL(inotify_find_watch); | ||
| 906 | 565 | ||
| 907 | static long inotify_ioctl(struct file *file, unsigned int cmd, | 566 | /** |
| 908 | unsigned long arg) | 567 | * inotify_find_update_watch - find and update the mask of an existing watch |
| 568 | * @ih: inotify handle | ||
| 569 | * @inode: inode's watch to update | ||
| 570 | * @mask: mask of events to watch | ||
| 571 | * | ||
| 572 | * Caller must pin given inode (via nameidata). | ||
| 573 | */ | ||
| 574 | s32 inotify_find_update_watch(struct inotify_handle *ih, struct inode *inode, | ||
| 575 | u32 mask) | ||
| 909 | { | 576 | { |
| 910 | struct inotify_device *dev; | 577 | struct inotify_watch *old; |
| 911 | void __user *p; | 578 | int mask_add = 0; |
| 912 | int ret = -ENOTTY; | 579 | int ret; |
| 913 | |||
| 914 | dev = file->private_data; | ||
| 915 | p = (void __user *) arg; | ||
| 916 | |||
| 917 | switch (cmd) { | ||
| 918 | case FIONREAD: | ||
| 919 | ret = put_user(dev->queue_size, (int __user *) p); | ||
| 920 | break; | ||
| 921 | } | ||
| 922 | |||
| 923 | return ret; | ||
| 924 | } | ||
| 925 | 580 | ||
| 926 | static const struct file_operations inotify_fops = { | 581 | if (mask & IN_MASK_ADD) |
| 927 | .poll = inotify_poll, | 582 | mask_add = 1; |
| 928 | .read = inotify_read, | ||
| 929 | .release = inotify_release, | ||
| 930 | .unlocked_ioctl = inotify_ioctl, | ||
| 931 | .compat_ioctl = inotify_ioctl, | ||
| 932 | }; | ||
| 933 | 583 | ||
| 934 | asmlinkage long sys_inotify_init(void) | 584 | /* don't allow invalid bits: we don't want flags set */ |
| 935 | { | 585 | mask &= IN_ALL_EVENTS | IN_ONESHOT; |
| 936 | struct inotify_device *dev; | 586 | if (unlikely(!mask)) |
| 937 | struct user_struct *user; | 587 | return -EINVAL; |
| 938 | struct file *filp; | ||
| 939 | int fd, ret; | ||
| 940 | |||
| 941 | fd = get_unused_fd(); | ||
| 942 | if (fd < 0) | ||
| 943 | return fd; | ||
| 944 | |||
| 945 | filp = get_empty_filp(); | ||
| 946 | if (!filp) { | ||
| 947 | ret = -ENFILE; | ||
| 948 | goto out_put_fd; | ||
| 949 | } | ||
| 950 | 588 | ||
| 951 | user = get_uid(current->user); | 589 | mutex_lock(&inode->inotify_mutex); |
| 952 | if (unlikely(atomic_read(&user->inotify_devs) >= | 590 | mutex_lock(&ih->mutex); |
| 953 | inotify_max_user_instances)) { | ||
| 954 | ret = -EMFILE; | ||
| 955 | goto out_free_uid; | ||
| 956 | } | ||
| 957 | 591 | ||
| 958 | dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL); | 592 | /* |
| 959 | if (unlikely(!dev)) { | 593 | * Handle the case of re-adding a watch on an (inode,ih) pair that we |
| 960 | ret = -ENOMEM; | 594 | * are already watching. We just update the mask and return its wd. |
| 961 | goto out_free_uid; | 595 | */ |
| 596 | old = inode_find_handle(inode, ih); | ||
| 597 | if (unlikely(!old)) { | ||
| 598 | ret = -ENOENT; | ||
| 599 | goto out; | ||
| 962 | } | 600 | } |
| 963 | 601 | ||
| 964 | filp->f_op = &inotify_fops; | 602 | if (mask_add) |
| 965 | filp->f_vfsmnt = mntget(inotify_mnt); | 603 | old->mask |= mask; |
| 966 | filp->f_dentry = dget(inotify_mnt->mnt_root); | 604 | else |
| 967 | filp->f_mapping = filp->f_dentry->d_inode->i_mapping; | 605 | old->mask = mask; |
| 968 | filp->f_mode = FMODE_READ; | 606 | ret = old->wd; |
| 969 | filp->f_flags = O_RDONLY; | 607 | out: |
| 970 | filp->private_data = dev; | 608 | mutex_unlock(&ih->mutex); |
| 971 | 609 | mutex_unlock(&inode->inotify_mutex); | |
| 972 | idr_init(&dev->idr); | ||
| 973 | INIT_LIST_HEAD(&dev->events); | ||
| 974 | INIT_LIST_HEAD(&dev->watches); | ||
| 975 | init_waitqueue_head(&dev->wq); | ||
| 976 | mutex_init(&dev->mutex); | ||
| 977 | dev->event_count = 0; | ||
| 978 | dev->queue_size = 0; | ||
| 979 | dev->max_events = inotify_max_queued_events; | ||
| 980 | dev->user = user; | ||
| 981 | dev->last_wd = 0; | ||
| 982 | atomic_set(&dev->count, 0); | ||
| 983 | |||
| 984 | get_inotify_dev(dev); | ||
| 985 | atomic_inc(&user->inotify_devs); | ||
| 986 | fd_install(fd, filp); | ||
| 987 | |||
| 988 | return fd; | ||
| 989 | out_free_uid: | ||
| 990 | free_uid(user); | ||
| 991 | put_filp(filp); | ||
| 992 | out_put_fd: | ||
| 993 | put_unused_fd(fd); | ||
| 994 | return ret; | 610 | return ret; |
| 995 | } | 611 | } |
| 612 | EXPORT_SYMBOL_GPL(inotify_find_update_watch); | ||
| 996 | 613 | ||
| 997 | asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) | 614 | /** |
| 615 | * inotify_add_watch - add a watch to an inotify instance | ||
| 616 | * @ih: inotify handle | ||
| 617 | * @watch: caller allocated watch structure | ||
| 618 | * @inode: inode to watch | ||
| 619 | * @mask: mask of events to watch | ||
| 620 | * | ||
| 621 | * Caller must pin given inode (via nameidata). | ||
| 622 | * Caller must ensure it only calls inotify_add_watch() once per watch. | ||
| 623 | * Calls inotify_handle_get_wd() so may sleep. | ||
| 624 | */ | ||
| 625 | s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch, | ||
| 626 | struct inode *inode, u32 mask) | ||
| 998 | { | 627 | { |
| 999 | struct inotify_watch *watch, *old; | 628 | int ret = 0; |
| 1000 | struct inode *inode; | ||
| 1001 | struct inotify_device *dev; | ||
| 1002 | struct nameidata nd; | ||
| 1003 | struct file *filp; | ||
| 1004 | int ret, fput_needed; | ||
| 1005 | int mask_add = 0; | ||
| 1006 | unsigned flags = 0; | ||
| 1007 | |||
| 1008 | filp = fget_light(fd, &fput_needed); | ||
| 1009 | if (unlikely(!filp)) | ||
| 1010 | return -EBADF; | ||
| 1011 | |||
| 1012 | /* verify that this is indeed an inotify instance */ | ||
| 1013 | if (unlikely(filp->f_op != &inotify_fops)) { | ||
| 1014 | ret = -EINVAL; | ||
| 1015 | goto fput_and_out; | ||
| 1016 | } | ||
| 1017 | |||
| 1018 | if (!(mask & IN_DONT_FOLLOW)) | ||
| 1019 | flags |= LOOKUP_FOLLOW; | ||
| 1020 | if (mask & IN_ONLYDIR) | ||
| 1021 | flags |= LOOKUP_DIRECTORY; | ||
| 1022 | |||
| 1023 | ret = find_inode(path, &nd, flags); | ||
| 1024 | if (unlikely(ret)) | ||
| 1025 | goto fput_and_out; | ||
| 1026 | 629 | ||
| 1027 | /* inode held in place by reference to nd; dev by fget on fd */ | 630 | /* don't allow invalid bits: we don't want flags set */ |
| 1028 | inode = nd.dentry->d_inode; | 631 | mask &= IN_ALL_EVENTS | IN_ONESHOT; |
| 1029 | dev = filp->private_data; | 632 | if (unlikely(!mask)) |
| 633 | return -EINVAL; | ||
| 634 | watch->mask = mask; | ||
| 1030 | 635 | ||
| 1031 | mutex_lock(&inode->inotify_mutex); | 636 | mutex_lock(&inode->inotify_mutex); |
| 1032 | mutex_lock(&dev->mutex); | 637 | mutex_lock(&ih->mutex); |
| 1033 | |||
| 1034 | if (mask & IN_MASK_ADD) | ||
| 1035 | mask_add = 1; | ||
| 1036 | 638 | ||
| 1037 | /* don't let user-space set invalid bits: we don't want flags set */ | 639 | /* Initialize a new watch */ |
| 1038 | mask &= IN_ALL_EVENTS | IN_ONESHOT; | 640 | ret = inotify_handle_get_wd(ih, watch); |
| 1039 | if (unlikely(!mask)) { | 641 | if (unlikely(ret)) |
| 1040 | ret = -EINVAL; | ||
| 1041 | goto out; | 642 | goto out; |
| 1042 | } | 643 | ret = watch->wd; |
| 644 | |||
| 645 | /* save a reference to handle and bump the count to make it official */ | ||
| 646 | get_inotify_handle(ih); | ||
| 647 | watch->ih = ih; | ||
| 1043 | 648 | ||
| 1044 | /* | 649 | /* |
| 1045 | * Handle the case of re-adding a watch on an (inode,dev) pair that we | 650 | * Save a reference to the inode and bump the ref count to make it |
| 1046 | * are already watching. We just update the mask and return its wd. | 651 | * official. We hold a reference to nameidata, which makes this safe. |
| 1047 | */ | 652 | */ |
| 1048 | old = inode_find_dev(inode, dev); | 653 | watch->inode = igrab(inode); |
| 1049 | if (unlikely(old)) { | ||
| 1050 | if (mask_add) | ||
| 1051 | old->mask |= mask; | ||
| 1052 | else | ||
| 1053 | old->mask = mask; | ||
| 1054 | ret = old->wd; | ||
| 1055 | goto out; | ||
| 1056 | } | ||
| 1057 | |||
| 1058 | watch = create_watch(dev, mask, inode); | ||
| 1059 | if (unlikely(IS_ERR(watch))) { | ||
| 1060 | ret = PTR_ERR(watch); | ||
| 1061 | goto out; | ||
| 1062 | } | ||
| 1063 | 654 | ||
| 1064 | if (!inotify_inode_watched(inode)) | 655 | if (!inotify_inode_watched(inode)) |
| 1065 | set_dentry_child_flags(inode, 1); | 656 | set_dentry_child_flags(inode, 1); |
| 1066 | 657 | ||
| 1067 | /* Add the watch to the device's and the inode's list */ | 658 | /* Add the watch to the handle's and the inode's list */ |
| 1068 | list_add(&watch->d_list, &dev->watches); | 659 | list_add(&watch->h_list, &ih->watches); |
| 1069 | list_add(&watch->i_list, &inode->inotify_watches); | 660 | list_add(&watch->i_list, &inode->inotify_watches); |
| 1070 | ret = watch->wd; | ||
| 1071 | out: | 661 | out: |
| 1072 | mutex_unlock(&dev->mutex); | 662 | mutex_unlock(&ih->mutex); |
| 1073 | mutex_unlock(&inode->inotify_mutex); | 663 | mutex_unlock(&inode->inotify_mutex); |
| 1074 | path_release(&nd); | ||
| 1075 | fput_and_out: | ||
| 1076 | fput_light(filp, fput_needed); | ||
| 1077 | return ret; | 664 | return ret; |
| 1078 | } | 665 | } |
| 666 | EXPORT_SYMBOL_GPL(inotify_add_watch); | ||
| 1079 | 667 | ||
| 1080 | asmlinkage long sys_inotify_rm_watch(int fd, u32 wd) | 668 | /** |
| 669 | * inotify_rm_wd - remove a watch from an inotify instance | ||
| 670 | * @ih: inotify handle | ||
| 671 | * @wd: watch descriptor to remove | ||
| 672 | * | ||
| 673 | * Can sleep. | ||
| 674 | */ | ||
| 675 | int inotify_rm_wd(struct inotify_handle *ih, u32 wd) | ||
| 1081 | { | 676 | { |
| 1082 | struct file *filp; | 677 | struct inotify_watch *watch; |
| 1083 | struct inotify_device *dev; | 678 | struct inode *inode; |
| 1084 | int ret, fput_needed; | ||
| 1085 | |||
| 1086 | filp = fget_light(fd, &fput_needed); | ||
| 1087 | if (unlikely(!filp)) | ||
| 1088 | return -EBADF; | ||
| 1089 | 679 | ||
| 1090 | /* verify that this is indeed an inotify instance */ | 680 | mutex_lock(&ih->mutex); |
| 1091 | if (unlikely(filp->f_op != &inotify_fops)) { | 681 | watch = idr_find(&ih->idr, wd); |
| 1092 | ret = -EINVAL; | 682 | if (unlikely(!watch)) { |
| 1093 | goto out; | 683 | mutex_unlock(&ih->mutex); |
| 684 | return -EINVAL; | ||
| 1094 | } | 685 | } |
| 686 | get_inotify_watch(watch); | ||
| 687 | inode = watch->inode; | ||
| 688 | mutex_unlock(&ih->mutex); | ||
| 1095 | 689 | ||
| 1096 | dev = filp->private_data; | 690 | mutex_lock(&inode->inotify_mutex); |
| 1097 | ret = inotify_ignore(dev, wd); | 691 | mutex_lock(&ih->mutex); |
| 1098 | 692 | ||
| 1099 | out: | 693 | /* make sure that we did not race */ |
| 1100 | fput_light(filp, fput_needed); | 694 | if (likely(idr_find(&ih->idr, wd) == watch)) |
| 1101 | return ret; | 695 | inotify_remove_watch_locked(ih, watch); |
| 696 | |||
| 697 | mutex_unlock(&ih->mutex); | ||
| 698 | mutex_unlock(&inode->inotify_mutex); | ||
| 699 | put_inotify_watch(watch); | ||
| 700 | |||
| 701 | return 0; | ||
| 1102 | } | 702 | } |
| 703 | EXPORT_SYMBOL_GPL(inotify_rm_wd); | ||
| 1103 | 704 | ||
| 1104 | static struct super_block * | 705 | /** |
| 1105 | inotify_get_sb(struct file_system_type *fs_type, int flags, | 706 | * inotify_rm_watch - remove a watch from an inotify instance |
| 1106 | const char *dev_name, void *data) | 707 | * @ih: inotify handle |
| 708 | * @watch: watch to remove | ||
| 709 | * | ||
| 710 | * Can sleep. | ||
| 711 | */ | ||
| 712 | int inotify_rm_watch(struct inotify_handle *ih, | ||
| 713 | struct inotify_watch *watch) | ||
| 1107 | { | 714 | { |
| 1108 | return get_sb_pseudo(fs_type, "inotify", NULL, 0xBAD1DEA); | 715 | return inotify_rm_wd(ih, watch->wd); |
| 1109 | } | 716 | } |
| 1110 | 717 | EXPORT_SYMBOL_GPL(inotify_rm_watch); | |
| 1111 | static struct file_system_type inotify_fs_type = { | ||
| 1112 | .name = "inotifyfs", | ||
| 1113 | .get_sb = inotify_get_sb, | ||
| 1114 | .kill_sb = kill_anon_super, | ||
| 1115 | }; | ||
| 1116 | 718 | ||
| 1117 | /* | 719 | /* |
| 1118 | * inotify_setup - Our initialization function. Note that we cannnot return | 720 | * inotify_setup - core initialization function |
| 1119 | * error because we have compiled-in VFS hooks. So an (unlikely) failure here | ||
| 1120 | * must result in panic(). | ||
| 1121 | */ | 721 | */ |
| 1122 | static int __init inotify_setup(void) | 722 | static int __init inotify_setup(void) |
| 1123 | { | 723 | { |
| 1124 | int ret; | ||
| 1125 | |||
| 1126 | ret = register_filesystem(&inotify_fs_type); | ||
| 1127 | if (unlikely(ret)) | ||
| 1128 | panic("inotify: register_filesystem returned %d!\n", ret); | ||
| 1129 | |||
| 1130 | inotify_mnt = kern_mount(&inotify_fs_type); | ||
| 1131 | if (IS_ERR(inotify_mnt)) | ||
| 1132 | panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt)); | ||
| 1133 | |||
| 1134 | inotify_max_queued_events = 16384; | ||
| 1135 | inotify_max_user_instances = 128; | ||
| 1136 | inotify_max_user_watches = 8192; | ||
| 1137 | |||
| 1138 | atomic_set(&inotify_cookie, 0); | 724 | atomic_set(&inotify_cookie, 0); |
| 1139 | 725 | ||
| 1140 | watch_cachep = kmem_cache_create("inotify_watch_cache", | ||
| 1141 | sizeof(struct inotify_watch), | ||
| 1142 | 0, SLAB_PANIC, NULL, NULL); | ||
| 1143 | event_cachep = kmem_cache_create("inotify_event_cache", | ||
| 1144 | sizeof(struct inotify_kernel_event), | ||
| 1145 | 0, SLAB_PANIC, NULL, NULL); | ||
| 1146 | |||
| 1147 | return 0; | 726 | return 0; |
| 1148 | } | 727 | } |
| 1149 | 728 | ||
diff --git a/fs/inotify_user.c b/fs/inotify_user.c new file mode 100644 index 000000000000..9e9931e2badd --- /dev/null +++ b/fs/inotify_user.c | |||
| @@ -0,0 +1,719 @@ | |||
| 1 | /* | ||
| 2 | * fs/inotify_user.c - inotify support for userspace | ||
| 3 | * | ||
| 4 | * Authors: | ||
| 5 | * John McCutchan <ttb@tentacle.dhs.org> | ||
| 6 | * Robert Love <rml@novell.com> | ||
| 7 | * | ||
| 8 | * Copyright (C) 2005 John McCutchan | ||
| 9 | * Copyright 2006 Hewlett-Packard Development Company, L.P. | ||
| 10 | * | ||
| 11 | * This program is free software; you can redistribute it and/or modify it | ||
| 12 | * under the terms of the GNU General Public License as published by the | ||
| 13 | * Free Software Foundation; either version 2, or (at your option) any | ||
| 14 | * later version. | ||
| 15 | * | ||
| 16 | * This program is distributed in the hope that it will be useful, but | ||
| 17 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 19 | * General Public License for more details. | ||
| 20 | */ | ||
| 21 | |||
| 22 | #include <linux/kernel.h> | ||
| 23 | #include <linux/sched.h> | ||
| 24 | #include <linux/slab.h> | ||
| 25 | #include <linux/fs.h> | ||
| 26 | #include <linux/file.h> | ||
| 27 | #include <linux/mount.h> | ||
| 28 | #include <linux/namei.h> | ||
| 29 | #include <linux/poll.h> | ||
| 30 | #include <linux/init.h> | ||
| 31 | #include <linux/list.h> | ||
| 32 | #include <linux/inotify.h> | ||
| 33 | #include <linux/syscalls.h> | ||
| 34 | |||
| 35 | #include <asm/ioctls.h> | ||
| 36 | |||
| 37 | static kmem_cache_t *watch_cachep __read_mostly; | ||
| 38 | static kmem_cache_t *event_cachep __read_mostly; | ||
| 39 | |||
| 40 | static struct vfsmount *inotify_mnt __read_mostly; | ||
| 41 | |||
| 42 | /* these are configurable via /proc/sys/fs/inotify/ */ | ||
| 43 | int inotify_max_user_instances __read_mostly; | ||
| 44 | int inotify_max_user_watches __read_mostly; | ||
| 45 | int inotify_max_queued_events __read_mostly; | ||
| 46 | |||
| 47 | /* | ||
| 48 | * Lock ordering: | ||
| 49 | * | ||
| 50 | * inotify_dev->up_mutex (ensures we don't re-add the same watch) | ||
| 51 | * inode->inotify_mutex (protects inode's watch list) | ||
| 52 | * inotify_handle->mutex (protects inotify_handle's watch list) | ||
| 53 | * inotify_dev->ev_mutex (protects device's event queue) | ||
| 54 | */ | ||
| 55 | |||
| 56 | /* | ||
| 57 | * Lifetimes of the main data structures: | ||
| 58 | * | ||
| 59 | * inotify_device: Lifetime is managed by reference count, from | ||
| 60 | * sys_inotify_init() until release. Additional references can bump the count | ||
| 61 | * via get_inotify_dev() and drop the count via put_inotify_dev(). | ||
| 62 | * | ||
| 63 | * inotify_user_watch: Lifetime is from create_watch() to the receipt of an | ||
| 64 | * IN_IGNORED event from inotify, or when using IN_ONESHOT, to receipt of the | ||
| 65 | * first event, or to inotify_destroy(). | ||
| 66 | */ | ||
| 67 | |||
| 68 | /* | ||
| 69 | * struct inotify_device - represents an inotify instance | ||
| 70 | * | ||
| 71 | * This structure is protected by the mutex 'mutex'. | ||
| 72 | */ | ||
| 73 | struct inotify_device { | ||
| 74 | wait_queue_head_t wq; /* wait queue for i/o */ | ||
| 75 | struct mutex ev_mutex; /* protects event queue */ | ||
| 76 | struct mutex up_mutex; /* synchronizes watch updates */ | ||
| 77 | struct list_head events; /* list of queued events */ | ||
| 78 | atomic_t count; /* reference count */ | ||
| 79 | struct user_struct *user; /* user who opened this dev */ | ||
| 80 | struct inotify_handle *ih; /* inotify handle */ | ||
| 81 | unsigned int queue_size; /* size of the queue (bytes) */ | ||
| 82 | unsigned int event_count; /* number of pending events */ | ||
| 83 | unsigned int max_events; /* maximum number of events */ | ||
| 84 | }; | ||
| 85 | |||
| 86 | /* | ||
| 87 | * struct inotify_kernel_event - An inotify event, originating from a watch and | ||
| 88 | * queued for user-space. A list of these is attached to each instance of the | ||
| 89 | * device. In read(), this list is walked and all events that can fit in the | ||
| 90 | * buffer are returned. | ||
| 91 | * | ||
| 92 | * Protected by dev->ev_mutex of the device in which we are queued. | ||
| 93 | */ | ||
| 94 | struct inotify_kernel_event { | ||
| 95 | struct inotify_event event; /* the user-space event */ | ||
| 96 | struct list_head list; /* entry in inotify_device's list */ | ||
| 97 | char *name; /* filename, if any */ | ||
| 98 | }; | ||
| 99 | |||
| 100 | /* | ||
| 101 | * struct inotify_user_watch - our version of an inotify_watch, we add | ||
| 102 | * a reference to the associated inotify_device. | ||
| 103 | */ | ||
| 104 | struct inotify_user_watch { | ||
| 105 | struct inotify_device *dev; /* associated device */ | ||
| 106 | struct inotify_watch wdata; /* inotify watch data */ | ||
| 107 | }; | ||
| 108 | |||
| 109 | #ifdef CONFIG_SYSCTL | ||
| 110 | |||
| 111 | #include <linux/sysctl.h> | ||
| 112 | |||
| 113 | static int zero; | ||
| 114 | |||
| 115 | ctl_table inotify_table[] = { | ||
| 116 | { | ||
| 117 | .ctl_name = INOTIFY_MAX_USER_INSTANCES, | ||
| 118 | .procname = "max_user_instances", | ||
| 119 | .data = &inotify_max_user_instances, | ||
| 120 | .maxlen = sizeof(int), | ||
| 121 | .mode = 0644, | ||
| 122 | .proc_handler = &proc_dointvec_minmax, | ||
| 123 | .strategy = &sysctl_intvec, | ||
| 124 | .extra1 = &zero, | ||
| 125 | }, | ||
| 126 | { | ||
| 127 | .ctl_name = INOTIFY_MAX_USER_WATCHES, | ||
| 128 | .procname = "max_user_watches", | ||
| 129 | .data = &inotify_max_user_watches, | ||
| 130 | .maxlen = sizeof(int), | ||
| 131 | .mode = 0644, | ||
| 132 | .proc_handler = &proc_dointvec_minmax, | ||
| 133 | .strategy = &sysctl_intvec, | ||
| 134 | .extra1 = &zero, | ||
| 135 | }, | ||
| 136 | { | ||
| 137 | .ctl_name = INOTIFY_MAX_QUEUED_EVENTS, | ||
| 138 | .procname = "max_queued_events", | ||
| 139 | .data = &inotify_max_queued_events, | ||
| 140 | .maxlen = sizeof(int), | ||
| 141 | .mode = 0644, | ||
| 142 | .proc_handler = &proc_dointvec_minmax, | ||
| 143 | .strategy = &sysctl_intvec, | ||
| 144 | .extra1 = &zero | ||
| 145 | }, | ||
| 146 | { .ctl_name = 0 } | ||
| 147 | }; | ||
| 148 | #endif /* CONFIG_SYSCTL */ | ||
| 149 | |||
| 150 | static inline void get_inotify_dev(struct inotify_device *dev) | ||
| 151 | { | ||
| 152 | atomic_inc(&dev->count); | ||
| 153 | } | ||
| 154 | |||
| 155 | static inline void put_inotify_dev(struct inotify_device *dev) | ||
| 156 | { | ||
| 157 | if (atomic_dec_and_test(&dev->count)) { | ||
| 158 | atomic_dec(&dev->user->inotify_devs); | ||
| 159 | free_uid(dev->user); | ||
| 160 | kfree(dev); | ||
| 161 | } | ||
| 162 | } | ||
| 163 | |||
| 164 | /* | ||
| 165 | * free_inotify_user_watch - cleans up the watch and its references | ||
| 166 | */ | ||
| 167 | static void free_inotify_user_watch(struct inotify_watch *w) | ||
| 168 | { | ||
| 169 | struct inotify_user_watch *watch; | ||
| 170 | struct inotify_device *dev; | ||
| 171 | |||
| 172 | watch = container_of(w, struct inotify_user_watch, wdata); | ||
| 173 | dev = watch->dev; | ||
| 174 | |||
| 175 | atomic_dec(&dev->user->inotify_watches); | ||
| 176 | put_inotify_dev(dev); | ||
| 177 | kmem_cache_free(watch_cachep, watch); | ||
| 178 | } | ||
| 179 | |||
| 180 | /* | ||
| 181 | * kernel_event - create a new kernel event with the given parameters | ||
| 182 | * | ||
| 183 | * This function can sleep. | ||
| 184 | */ | ||
| 185 | static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie, | ||
| 186 | const char *name) | ||
| 187 | { | ||
| 188 | struct inotify_kernel_event *kevent; | ||
| 189 | |||
| 190 | kevent = kmem_cache_alloc(event_cachep, GFP_KERNEL); | ||
| 191 | if (unlikely(!kevent)) | ||
| 192 | return NULL; | ||
| 193 | |||
| 194 | /* we hand this out to user-space, so zero it just in case */ | ||
| 195 | memset(&kevent->event, 0, sizeof(struct inotify_event)); | ||
| 196 | |||
| 197 | kevent->event.wd = wd; | ||
| 198 | kevent->event.mask = mask; | ||
| 199 | kevent->event.cookie = cookie; | ||
| 200 | |||
| 201 | INIT_LIST_HEAD(&kevent->list); | ||
| 202 | |||
| 203 | if (name) { | ||
| 204 | size_t len, rem, event_size = sizeof(struct inotify_event); | ||
| 205 | |||
| 206 | /* | ||
| 207 | * We need to pad the filename so as to properly align an | ||
| 208 | * array of inotify_event structures. Because the structure is | ||
| 209 | * small and the common case is a small filename, we just round | ||
| 210 | * up to the next multiple of the structure's sizeof. This is | ||
| 211 | * simple and safe for all architectures. | ||
| 212 | */ | ||
| 213 | len = strlen(name) + 1; | ||
| 214 | rem = event_size - len; | ||
| 215 | if (len > event_size) { | ||
| 216 | rem = event_size - (len % event_size); | ||
| 217 | if (len % event_size == 0) | ||
| 218 | rem = 0; | ||
| 219 | } | ||
| 220 | |||
| 221 | kevent->name = kmalloc(len + rem, GFP_KERNEL); | ||
| 222 | if (unlikely(!kevent->name)) { | ||
| 223 | kmem_cache_free(event_cachep, kevent); | ||
| 224 | return NULL; | ||
| 225 | } | ||
| 226 | memcpy(kevent->name, name, len); | ||
| 227 | if (rem) | ||
| 228 | memset(kevent->name + len, 0, rem); | ||
| 229 | kevent->event.len = len + rem; | ||
| 230 | } else { | ||
| 231 | kevent->event.len = 0; | ||
| 232 | kevent->name = NULL; | ||
| 233 | } | ||
| 234 | |||
| 235 | return kevent; | ||
| 236 | } | ||
| 237 | |||
| 238 | /* | ||
| 239 | * inotify_dev_get_event - return the next event in the given dev's queue | ||
| 240 | * | ||
| 241 | * Caller must hold dev->ev_mutex. | ||
| 242 | */ | ||
| 243 | static inline struct inotify_kernel_event * | ||
| 244 | inotify_dev_get_event(struct inotify_device *dev) | ||
| 245 | { | ||
| 246 | return list_entry(dev->events.next, struct inotify_kernel_event, list); | ||
| 247 | } | ||
| 248 | |||
| 249 | /* | ||
| 250 | * inotify_dev_queue_event - event handler registered with core inotify, adds | ||
| 251 | * a new event to the given device | ||
| 252 | * | ||
| 253 | * Can sleep (calls kernel_event()). | ||
| 254 | */ | ||
| 255 | static void inotify_dev_queue_event(struct inotify_watch *w, u32 wd, u32 mask, | ||
| 256 | u32 cookie, const char *name, | ||
| 257 | struct inode *ignored) | ||
| 258 | { | ||
| 259 | struct inotify_user_watch *watch; | ||
| 260 | struct inotify_device *dev; | ||
| 261 | struct inotify_kernel_event *kevent, *last; | ||
| 262 | |||
| 263 | watch = container_of(w, struct inotify_user_watch, wdata); | ||
| 264 | dev = watch->dev; | ||
| 265 | |||
| 266 | mutex_lock(&dev->ev_mutex); | ||
| 267 | |||
| 268 | /* we can safely put the watch as we don't reference it while | ||
| 269 | * generating the event | ||
| 270 | */ | ||
| 271 | if (mask & IN_IGNORED || mask & IN_ONESHOT) | ||
| 272 | put_inotify_watch(w); /* final put */ | ||
| 273 | |||
| 274 | /* coalescing: drop this event if it is a dupe of the previous */ | ||
| 275 | last = inotify_dev_get_event(dev); | ||
| 276 | if (last && last->event.mask == mask && last->event.wd == wd && | ||
| 277 | last->event.cookie == cookie) { | ||
| 278 | const char *lastname = last->name; | ||
| 279 | |||
| 280 | if (!name && !lastname) | ||
| 281 | goto out; | ||
| 282 | if (name && lastname && !strcmp(lastname, name)) | ||
| 283 | goto out; | ||
| 284 | } | ||
| 285 | |||
| 286 | /* the queue overflowed and we already sent the Q_OVERFLOW event */ | ||
| 287 | if (unlikely(dev->event_count > dev->max_events)) | ||
| 288 | goto out; | ||
| 289 | |||
| 290 | /* if the queue overflows, we need to notify user space */ | ||
| 291 | if (unlikely(dev->event_count == dev->max_events)) | ||
| 292 | kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL); | ||
| 293 | else | ||
| 294 | kevent = kernel_event(wd, mask, cookie, name); | ||
| 295 | |||
| 296 | if (unlikely(!kevent)) | ||
| 297 | goto out; | ||
| 298 | |||
| 299 | /* queue the event and wake up anyone waiting */ | ||
| 300 | dev->event_count++; | ||
| 301 | dev->queue_size += sizeof(struct inotify_event) + kevent->event.len; | ||
| 302 | list_add_tail(&kevent->list, &dev->events); | ||
| 303 | wake_up_interruptible(&dev->wq); | ||
| 304 | |||
| 305 | out: | ||
| 306 | mutex_unlock(&dev->ev_mutex); | ||
| 307 | } | ||
| 308 | |||
| 309 | /* | ||
| 310 | * remove_kevent - cleans up and ultimately frees the given kevent | ||
| 311 | * | ||
| 312 | * Caller must hold dev->ev_mutex. | ||
| 313 | */ | ||
| 314 | static void remove_kevent(struct inotify_device *dev, | ||
| 315 | struct inotify_kernel_event *kevent) | ||
| 316 | { | ||
| 317 | list_del(&kevent->list); | ||
| 318 | |||
| 319 | dev->event_count--; | ||
| 320 | dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len; | ||
| 321 | |||
| 322 | kfree(kevent->name); | ||
| 323 | kmem_cache_free(event_cachep, kevent); | ||
| 324 | } | ||
| 325 | |||
| 326 | /* | ||
| 327 | * inotify_dev_event_dequeue - destroy an event on the given device | ||
| 328 | * | ||
| 329 | * Caller must hold dev->ev_mutex. | ||
| 330 | */ | ||
| 331 | static void inotify_dev_event_dequeue(struct inotify_device *dev) | ||
| 332 | { | ||
| 333 | if (!list_empty(&dev->events)) { | ||
| 334 | struct inotify_kernel_event *kevent; | ||
| 335 | kevent = inotify_dev_get_event(dev); | ||
| 336 | remove_kevent(dev, kevent); | ||
| 337 | } | ||
| 338 | } | ||
| 339 | |||
| 340 | /* | ||
| 341 | * find_inode - resolve a user-given path to a specific inode and return a nd | ||
| 342 | */ | ||
| 343 | static int find_inode(const char __user *dirname, struct nameidata *nd, | ||
| 344 | unsigned flags) | ||
| 345 | { | ||
| 346 | int error; | ||
| 347 | |||
| 348 | error = __user_walk(dirname, flags, nd); | ||
| 349 | if (error) | ||
| 350 | return error; | ||
| 351 | /* you can only watch an inode if you have read permissions on it */ | ||
| 352 | error = vfs_permission(nd, MAY_READ); | ||
| 353 | if (error) | ||
| 354 | path_release(nd); | ||
| 355 | return error; | ||
| 356 | } | ||
| 357 | |||
| 358 | /* | ||
| 359 | * create_watch - creates a watch on the given device. | ||
| 360 | * | ||
| 361 | * Callers must hold dev->up_mutex. | ||
| 362 | */ | ||
| 363 | static int create_watch(struct inotify_device *dev, struct inode *inode, | ||
| 364 | u32 mask) | ||
| 365 | { | ||
| 366 | struct inotify_user_watch *watch; | ||
| 367 | int ret; | ||
| 368 | |||
| 369 | if (atomic_read(&dev->user->inotify_watches) >= | ||
| 370 | inotify_max_user_watches) | ||
| 371 | return -ENOSPC; | ||
| 372 | |||
| 373 | watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL); | ||
| 374 | if (unlikely(!watch)) | ||
| 375 | return -ENOMEM; | ||
| 376 | |||
| 377 | /* save a reference to device and bump the count to make it official */ | ||
| 378 | get_inotify_dev(dev); | ||
| 379 | watch->dev = dev; | ||
| 380 | |||
| 381 | atomic_inc(&dev->user->inotify_watches); | ||
| 382 | |||
| 383 | inotify_init_watch(&watch->wdata); | ||
| 384 | ret = inotify_add_watch(dev->ih, &watch->wdata, inode, mask); | ||
| 385 | if (ret < 0) | ||
| 386 | free_inotify_user_watch(&watch->wdata); | ||
| 387 | |||
| 388 | return ret; | ||
| 389 | } | ||
| 390 | |||
| 391 | /* Device Interface */ | ||
| 392 | |||
| 393 | static unsigned int inotify_poll(struct file *file, poll_table *wait) | ||
| 394 | { | ||
| 395 | struct inotify_device *dev = file->private_data; | ||
| 396 | int ret = 0; | ||
| 397 | |||
| 398 | poll_wait(file, &dev->wq, wait); | ||
| 399 | mutex_lock(&dev->ev_mutex); | ||
| 400 | if (!list_empty(&dev->events)) | ||
| 401 | ret = POLLIN | POLLRDNORM; | ||
| 402 | mutex_unlock(&dev->ev_mutex); | ||
| 403 | |||
| 404 | return ret; | ||
| 405 | } | ||
| 406 | |||
| 407 | static ssize_t inotify_read(struct file *file, char __user *buf, | ||
| 408 | size_t count, loff_t *pos) | ||
| 409 | { | ||
| 410 | size_t event_size = sizeof (struct inotify_event); | ||
| 411 | struct inotify_device *dev; | ||
| 412 | char __user *start; | ||
| 413 | int ret; | ||
| 414 | DEFINE_WAIT(wait); | ||
| 415 | |||
| 416 | start = buf; | ||
| 417 | dev = file->private_data; | ||
| 418 | |||
| 419 | while (1) { | ||
| 420 | int events; | ||
| 421 | |||
| 422 | prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); | ||
| 423 | |||
| 424 | mutex_lock(&dev->ev_mutex); | ||
| 425 | events = !list_empty(&dev->events); | ||
| 426 | mutex_unlock(&dev->ev_mutex); | ||
| 427 | if (events) { | ||
| 428 | ret = 0; | ||
| 429 | break; | ||
| 430 | } | ||
| 431 | |||
| 432 | if (file->f_flags & O_NONBLOCK) { | ||
| 433 | ret = -EAGAIN; | ||
| 434 | break; | ||
| 435 | } | ||
| 436 | |||
| 437 | if (signal_pending(current)) { | ||
| 438 | ret = -EINTR; | ||
| 439 | break; | ||
| 440 | } | ||
| 441 | |||
| 442 | schedule(); | ||
| 443 | } | ||
| 444 | |||
| 445 | finish_wait(&dev->wq, &wait); | ||
| 446 | if (ret) | ||
| 447 | return ret; | ||
| 448 | |||
| 449 | mutex_lock(&dev->ev_mutex); | ||
| 450 | while (1) { | ||
| 451 | struct inotify_kernel_event *kevent; | ||
| 452 | |||
| 453 | ret = buf - start; | ||
| 454 | if (list_empty(&dev->events)) | ||
| 455 | break; | ||
| 456 | |||
| 457 | kevent = inotify_dev_get_event(dev); | ||
| 458 | if (event_size + kevent->event.len > count) | ||
| 459 | break; | ||
| 460 | |||
| 461 | if (copy_to_user(buf, &kevent->event, event_size)) { | ||
| 462 | ret = -EFAULT; | ||
| 463 | break; | ||
| 464 | } | ||
| 465 | buf += event_size; | ||
| 466 | count -= event_size; | ||
| 467 | |||
| 468 | if (kevent->name) { | ||
| 469 | if (copy_to_user(buf, kevent->name, kevent->event.len)){ | ||
| 470 | ret = -EFAULT; | ||
| 471 | break; | ||
| 472 | } | ||
| 473 | buf += kevent->event.len; | ||
| 474 | count -= kevent->event.len; | ||
| 475 | } | ||
| 476 | |||
| 477 | remove_kevent(dev, kevent); | ||
| 478 | } | ||
| 479 | mutex_unlock(&dev->ev_mutex); | ||
| 480 | |||
| 481 | return ret; | ||
| 482 | } | ||
| 483 | |||
| 484 | static int inotify_release(struct inode *ignored, struct file *file) | ||
| 485 | { | ||
| 486 | struct inotify_device *dev = file->private_data; | ||
| 487 | |||
| 488 | inotify_destroy(dev->ih); | ||
| 489 | |||
| 490 | /* destroy all of the events on this device */ | ||
| 491 | mutex_lock(&dev->ev_mutex); | ||
| 492 | while (!list_empty(&dev->events)) | ||
| 493 | inotify_dev_event_dequeue(dev); | ||
| 494 | mutex_unlock(&dev->ev_mutex); | ||
| 495 | |||
| 496 | /* free this device: the put matching the get in inotify_init() */ | ||
| 497 | put_inotify_dev(dev); | ||
| 498 | |||
| 499 | return 0; | ||
| 500 | } | ||
| 501 | |||
| 502 | static long inotify_ioctl(struct file *file, unsigned int cmd, | ||
| 503 | unsigned long arg) | ||
| 504 | { | ||
| 505 | struct inotify_device *dev; | ||
| 506 | void __user *p; | ||
| 507 | int ret = -ENOTTY; | ||
| 508 | |||
| 509 | dev = file->private_data; | ||
| 510 | p = (void __user *) arg; | ||
| 511 | |||
| 512 | switch (cmd) { | ||
| 513 | case FIONREAD: | ||
| 514 | ret = put_user(dev->queue_size, (int __user *) p); | ||
| 515 | break; | ||
| 516 | } | ||
| 517 | |||
| 518 | return ret; | ||
| 519 | } | ||
| 520 | |||
| 521 | static const struct file_operations inotify_fops = { | ||
| 522 | .poll = inotify_poll, | ||
| 523 | .read = inotify_read, | ||
| 524 | .release = inotify_release, | ||
| 525 | .unlocked_ioctl = inotify_ioctl, | ||
| 526 | .compat_ioctl = inotify_ioctl, | ||
| 527 | }; | ||
| 528 | |||
| 529 | static const struct inotify_operations inotify_user_ops = { | ||
| 530 | .handle_event = inotify_dev_queue_event, | ||
| 531 | .destroy_watch = free_inotify_user_watch, | ||
| 532 | }; | ||
| 533 | |||
| 534 | asmlinkage long sys_inotify_init(void) | ||
| 535 | { | ||
| 536 | struct inotify_device *dev; | ||
| 537 | struct inotify_handle *ih; | ||
| 538 | struct user_struct *user; | ||
| 539 | struct file *filp; | ||
| 540 | int fd, ret; | ||
| 541 | |||
| 542 | fd = get_unused_fd(); | ||
| 543 | if (fd < 0) | ||
| 544 | return fd; | ||
| 545 | |||
| 546 | filp = get_empty_filp(); | ||
| 547 | if (!filp) { | ||
| 548 | ret = -ENFILE; | ||
| 549 | goto out_put_fd; | ||
| 550 | } | ||
| 551 | |||
| 552 | user = get_uid(current->user); | ||
| 553 | if (unlikely(atomic_read(&user->inotify_devs) >= | ||
| 554 | inotify_max_user_instances)) { | ||
| 555 | ret = -EMFILE; | ||
| 556 | goto out_free_uid; | ||
| 557 | } | ||
| 558 | |||
| 559 | dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL); | ||
| 560 | if (unlikely(!dev)) { | ||
| 561 | ret = -ENOMEM; | ||
| 562 | goto out_free_uid; | ||
| 563 | } | ||
| 564 | |||
| 565 | ih = inotify_init(&inotify_user_ops); | ||
| 566 | if (unlikely(IS_ERR(ih))) { | ||
| 567 | ret = PTR_ERR(ih); | ||
| 568 | goto out_free_dev; | ||
| 569 | } | ||
| 570 | dev->ih = ih; | ||
| 571 | |||
| 572 | filp->f_op = &inotify_fops; | ||
| 573 | filp->f_vfsmnt = mntget(inotify_mnt); | ||
| 574 | filp->f_dentry = dget(inotify_mnt->mnt_root); | ||
| 575 | filp->f_mapping = filp->f_dentry->d_inode->i_mapping; | ||
| 576 | filp->f_mode = FMODE_READ; | ||
| 577 | filp->f_flags = O_RDONLY; | ||
| 578 | filp->private_data = dev; | ||
| 579 | |||
| 580 | INIT_LIST_HEAD(&dev->events); | ||
| 581 | init_waitqueue_head(&dev->wq); | ||
| 582 | mutex_init(&dev->ev_mutex); | ||
| 583 | mutex_init(&dev->up_mutex); | ||
| 584 | dev->event_count = 0; | ||
| 585 | dev->queue_size = 0; | ||
| 586 | dev->max_events = inotify_max_queued_events; | ||
| 587 | dev->user = user; | ||
| 588 | atomic_set(&dev->count, 0); | ||
| 589 | |||
| 590 | get_inotify_dev(dev); | ||
| 591 | atomic_inc(&user->inotify_devs); | ||
| 592 | fd_install(fd, filp); | ||
| 593 | |||
| 594 | return fd; | ||
| 595 | out_free_dev: | ||
| 596 | kfree(dev); | ||
| 597 | out_free_uid: | ||
| 598 | free_uid(user); | ||
| 599 | put_filp(filp); | ||
| 600 | out_put_fd: | ||
| 601 | put_unused_fd(fd); | ||
| 602 | return ret; | ||
| 603 | } | ||
| 604 | |||
| 605 | asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) | ||
| 606 | { | ||
| 607 | struct inode *inode; | ||
| 608 | struct inotify_device *dev; | ||
| 609 | struct nameidata nd; | ||
| 610 | struct file *filp; | ||
| 611 | int ret, fput_needed; | ||
| 612 | unsigned flags = 0; | ||
| 613 | |||
| 614 | filp = fget_light(fd, &fput_needed); | ||
| 615 | if (unlikely(!filp)) | ||
| 616 | return -EBADF; | ||
| 617 | |||
| 618 | /* verify that this is indeed an inotify instance */ | ||
| 619 | if (unlikely(filp->f_op != &inotify_fops)) { | ||
| 620 | ret = -EINVAL; | ||
| 621 | goto fput_and_out; | ||
| 622 | } | ||
| 623 | |||
| 624 | if (!(mask & IN_DONT_FOLLOW)) | ||
| 625 | flags |= LOOKUP_FOLLOW; | ||
| 626 | if (mask & IN_ONLYDIR) | ||
| 627 | flags |= LOOKUP_DIRECTORY; | ||
| 628 | |||
| 629 | ret = find_inode(path, &nd, flags); | ||
| 630 | if (unlikely(ret)) | ||
| 631 | goto fput_and_out; | ||
| 632 | |||
| 633 | /* inode held in place by reference to nd; dev by fget on fd */ | ||
| 634 | inode = nd.dentry->d_inode; | ||
| 635 | dev = filp->private_data; | ||
| 636 | |||
| 637 | mutex_lock(&dev->up_mutex); | ||
| 638 | ret = inotify_find_update_watch(dev->ih, inode, mask); | ||
| 639 | if (ret == -ENOENT) | ||
| 640 | ret = create_watch(dev, inode, mask); | ||
| 641 | mutex_unlock(&dev->up_mutex); | ||
| 642 | |||
| 643 | path_release(&nd); | ||
| 644 | fput_and_out: | ||
| 645 | fput_light(filp, fput_needed); | ||
| 646 | return ret; | ||
| 647 | } | ||
| 648 | |||
| 649 | asmlinkage long sys_inotify_rm_watch(int fd, u32 wd) | ||
| 650 | { | ||
| 651 | struct file *filp; | ||
| 652 | struct inotify_device *dev; | ||
| 653 | int ret, fput_needed; | ||
| 654 | |||
| 655 | filp = fget_light(fd, &fput_needed); | ||
| 656 | if (unlikely(!filp)) | ||
| 657 | return -EBADF; | ||
| 658 | |||
| 659 | /* verify that this is indeed an inotify instance */ | ||
| 660 | if (unlikely(filp->f_op != &inotify_fops)) { | ||
| 661 | ret = -EINVAL; | ||
| 662 | goto out; | ||
| 663 | } | ||
| 664 | |||
| 665 | dev = filp->private_data; | ||
| 666 | |||
| 667 | /* we free our watch data when we get IN_IGNORED */ | ||
| 668 | ret = inotify_rm_wd(dev->ih, wd); | ||
| 669 | |||
| 670 | out: | ||
| 671 | fput_light(filp, fput_needed); | ||
| 672 | return ret; | ||
| 673 | } | ||
| 674 | |||
| 675 | static struct super_block * | ||
| 676 | inotify_get_sb(struct file_system_type *fs_type, int flags, | ||
| 677 | const char *dev_name, void *data) | ||
| 678 | { | ||
| 679 | return get_sb_pseudo(fs_type, "inotify", NULL, 0xBAD1DEA); | ||
| 680 | } | ||
| 681 | |||
| 682 | static struct file_system_type inotify_fs_type = { | ||
| 683 | .name = "inotifyfs", | ||
| 684 | .get_sb = inotify_get_sb, | ||
| 685 | .kill_sb = kill_anon_super, | ||
| 686 | }; | ||
| 687 | |||
| 688 | /* | ||
| 689 | * inotify_user_setup - Our initialization function. Note that we cannnot return | ||
| 690 | * error because we have compiled-in VFS hooks. So an (unlikely) failure here | ||
| 691 | * must result in panic(). | ||
| 692 | */ | ||
| 693 | static int __init inotify_user_setup(void) | ||
| 694 | { | ||
| 695 | int ret; | ||
| 696 | |||
| 697 | ret = register_filesystem(&inotify_fs_type); | ||
| 698 | if (unlikely(ret)) | ||
| 699 | panic("inotify: register_filesystem returned %d!\n", ret); | ||
| 700 | |||
| 701 | inotify_mnt = kern_mount(&inotify_fs_type); | ||
| 702 | if (IS_ERR(inotify_mnt)) | ||
| 703 | panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt)); | ||
| 704 | |||
| 705 | inotify_max_queued_events = 16384; | ||
| 706 | inotify_max_user_instances = 128; | ||
| 707 | inotify_max_user_watches = 8192; | ||
| 708 | |||
| 709 | watch_cachep = kmem_cache_create("inotify_watch_cache", | ||
| 710 | sizeof(struct inotify_user_watch), | ||
| 711 | 0, SLAB_PANIC, NULL, NULL); | ||
| 712 | event_cachep = kmem_cache_create("inotify_event_cache", | ||
| 713 | sizeof(struct inotify_kernel_event), | ||
| 714 | 0, SLAB_PANIC, NULL, NULL); | ||
| 715 | |||
| 716 | return 0; | ||
| 717 | } | ||
| 718 | |||
| 719 | module_init(inotify_user_setup); | ||
diff --git a/fs/jffs/intrep.c b/fs/jffs/intrep.c index 0ef207dfaf6f..5371a403130a 100644 --- a/fs/jffs/intrep.c +++ b/fs/jffs/intrep.c | |||
| @@ -247,7 +247,7 @@ flash_safe_read(struct mtd_info *mtd, loff_t from, | |||
| 247 | D3(printk(KERN_NOTICE "flash_safe_read(%p, %08x, %p, %08x)\n", | 247 | D3(printk(KERN_NOTICE "flash_safe_read(%p, %08x, %p, %08x)\n", |
| 248 | mtd, (unsigned int) from, buf, count)); | 248 | mtd, (unsigned int) from, buf, count)); |
| 249 | 249 | ||
| 250 | res = MTD_READ(mtd, from, count, &retlen, buf); | 250 | res = mtd->read(mtd, from, count, &retlen, buf); |
| 251 | if (retlen != count) { | 251 | if (retlen != count) { |
| 252 | panic("Didn't read all bytes in flash_safe_read(). Returned %d\n", res); | 252 | panic("Didn't read all bytes in flash_safe_read(). Returned %d\n", res); |
| 253 | } | 253 | } |
| @@ -262,7 +262,7 @@ flash_read_u32(struct mtd_info *mtd, loff_t from) | |||
| 262 | __u32 ret; | 262 | __u32 ret; |
| 263 | int res; | 263 | int res; |
| 264 | 264 | ||
| 265 | res = MTD_READ(mtd, from, 4, &retlen, (unsigned char *)&ret); | 265 | res = mtd->read(mtd, from, 4, &retlen, (unsigned char *)&ret); |
| 266 | if (retlen != 4) { | 266 | if (retlen != 4) { |
| 267 | printk("Didn't read all bytes in flash_read_u32(). Returned %d\n", res); | 267 | printk("Didn't read all bytes in flash_read_u32(). Returned %d\n", res); |
| 268 | return 0; | 268 | return 0; |
| @@ -282,7 +282,7 @@ flash_safe_write(struct mtd_info *mtd, loff_t to, | |||
| 282 | D3(printk(KERN_NOTICE "flash_safe_write(%p, %08x, %p, %08x)\n", | 282 | D3(printk(KERN_NOTICE "flash_safe_write(%p, %08x, %p, %08x)\n", |
| 283 | mtd, (unsigned int) to, buf, count)); | 283 | mtd, (unsigned int) to, buf, count)); |
| 284 | 284 | ||
| 285 | res = MTD_WRITE(mtd, to, count, &retlen, buf); | 285 | res = mtd->write(mtd, to, count, &retlen, buf); |
| 286 | if (retlen != count) { | 286 | if (retlen != count) { |
| 287 | printk("Didn't write all bytes in flash_safe_write(). Returned %d\n", res); | 287 | printk("Didn't write all bytes in flash_safe_write(). Returned %d\n", res); |
| 288 | } | 288 | } |
| @@ -300,9 +300,9 @@ flash_safe_writev(struct mtd_info *mtd, const struct kvec *vecs, | |||
| 300 | 300 | ||
| 301 | D3(printk(KERN_NOTICE "flash_safe_writev(%p, %08x, %p)\n", | 301 | D3(printk(KERN_NOTICE "flash_safe_writev(%p, %08x, %p)\n", |
| 302 | mtd, (unsigned int) to, vecs)); | 302 | mtd, (unsigned int) to, vecs)); |
| 303 | 303 | ||
| 304 | if (mtd->writev) { | 304 | if (mtd->writev) { |
| 305 | res = MTD_WRITEV(mtd, vecs, iovec_cnt, to, &retlen); | 305 | res = mtd->writev(mtd, vecs, iovec_cnt, to, &retlen); |
| 306 | return res ? res : retlen; | 306 | return res ? res : retlen; |
| 307 | } | 307 | } |
| 308 | /* Not implemented writev. Repeatedly use write - on the not so | 308 | /* Not implemented writev. Repeatedly use write - on the not so |
| @@ -312,7 +312,8 @@ flash_safe_writev(struct mtd_info *mtd, const struct kvec *vecs, | |||
| 312 | retlen=0; | 312 | retlen=0; |
| 313 | 313 | ||
| 314 | for (i=0; !res && i<iovec_cnt; i++) { | 314 | for (i=0; !res && i<iovec_cnt; i++) { |
| 315 | res = MTD_WRITE(mtd, to, vecs[i].iov_len, &retlen_a, vecs[i].iov_base); | 315 | res = mtd->write(mtd, to, vecs[i].iov_len, &retlen_a, |
| 316 | vecs[i].iov_base); | ||
| 316 | if (retlen_a != vecs[i].iov_len) { | 317 | if (retlen_a != vecs[i].iov_len) { |
| 317 | printk("Didn't write all bytes in flash_safe_writev(). Returned %d\n", res); | 318 | printk("Didn't write all bytes in flash_safe_writev(). Returned %d\n", res); |
| 318 | if (i != iovec_cnt-1) | 319 | if (i != iovec_cnt-1) |
| @@ -393,7 +394,7 @@ flash_erase_region(struct mtd_info *mtd, loff_t start, | |||
| 393 | set_current_state(TASK_UNINTERRUPTIBLE); | 394 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 394 | add_wait_queue(&wait_q, &wait); | 395 | add_wait_queue(&wait_q, &wait); |
| 395 | 396 | ||
| 396 | if (MTD_ERASE(mtd, erase) < 0) { | 397 | if (mtd->erase(mtd, erase) < 0) { |
| 397 | set_current_state(TASK_RUNNING); | 398 | set_current_state(TASK_RUNNING); |
| 398 | remove_wait_queue(&wait_q, &wait); | 399 | remove_wait_queue(&wait_q, &wait); |
| 399 | kfree(erase); | 400 | kfree(erase); |
diff --git a/fs/jffs2/Makefile b/fs/jffs2/Makefile index 77dc5561a04e..7f28ee0bd132 100644 --- a/fs/jffs2/Makefile +++ b/fs/jffs2/Makefile | |||
| @@ -12,6 +12,9 @@ jffs2-y += symlink.o build.o erase.o background.o fs.o writev.o | |||
| 12 | jffs2-y += super.o debug.o | 12 | jffs2-y += super.o debug.o |
| 13 | 13 | ||
| 14 | jffs2-$(CONFIG_JFFS2_FS_WRITEBUFFER) += wbuf.o | 14 | jffs2-$(CONFIG_JFFS2_FS_WRITEBUFFER) += wbuf.o |
| 15 | jffs2-$(CONFIG_JFFS2_FS_XATTR) += xattr.o xattr_trusted.o xattr_user.o | ||
| 16 | jffs2-$(CONFIG_JFFS2_FS_SECURITY) += security.o | ||
| 17 | jffs2-$(CONFIG_JFFS2_FS_POSIX_ACL) += acl.o | ||
| 15 | jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rubin.o | 18 | jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rubin.o |
| 16 | jffs2-$(CONFIG_JFFS2_RTIME) += compr_rtime.o | 19 | jffs2-$(CONFIG_JFFS2_RTIME) += compr_rtime.o |
| 17 | jffs2-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o | 20 | jffs2-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o |
diff --git a/fs/jffs2/README.Locking b/fs/jffs2/README.Locking index b7943439b6ec..c8f0bd64e53e 100644 --- a/fs/jffs2/README.Locking +++ b/fs/jffs2/README.Locking | |||
| @@ -150,3 +150,24 @@ the buffer. | |||
| 150 | 150 | ||
| 151 | Ordering constraints: | 151 | Ordering constraints: |
| 152 | Lock wbuf_sem last, after the alloc_sem or and f->sem. | 152 | Lock wbuf_sem last, after the alloc_sem or and f->sem. |
| 153 | |||
| 154 | |||
| 155 | c->xattr_sem | ||
| 156 | ------------ | ||
| 157 | |||
| 158 | This read/write semaphore protects against concurrent access to the | ||
| 159 | xattr related objects which include stuff in superblock and ic->xref. | ||
| 160 | In read-only path, write-semaphore is too much exclusion. It's enough | ||
| 161 | by read-semaphore. But you must hold write-semaphore when updating, | ||
| 162 | creating or deleting any xattr related object. | ||
| 163 | |||
| 164 | Once xattr_sem released, there would be no assurance for the existence | ||
| 165 | of those objects. Thus, a series of processes is often required to retry, | ||
| 166 | when updating such a object is necessary under holding read semaphore. | ||
| 167 | For example, do_jffs2_getxattr() holds read-semaphore to scan xref and | ||
| 168 | xdatum at first. But it retries this process with holding write-semaphore | ||
| 169 | after release read-semaphore, if it's necessary to load name/value pair | ||
| 170 | from medium. | ||
| 171 | |||
| 172 | Ordering constraints: | ||
| 173 | Lock xattr_sem last, after the alloc_sem. | ||
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c new file mode 100644 index 000000000000..320dd48b834e --- /dev/null +++ b/fs/jffs2/acl.c | |||
| @@ -0,0 +1,485 @@ | |||
| 1 | /* | ||
| 2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2006 NEC Corporation | ||
| 5 | * | ||
| 6 | * Created by KaiGai Kohei <kaigai@ak.jp.nec.com> | ||
| 7 | * | ||
| 8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
| 9 | * | ||
| 10 | */ | ||
| 11 | #include <linux/kernel.h> | ||
| 12 | #include <linux/slab.h> | ||
| 13 | #include <linux/fs.h> | ||
| 14 | #include <linux/time.h> | ||
| 15 | #include <linux/crc32.h> | ||
| 16 | #include <linux/jffs2.h> | ||
| 17 | #include <linux/xattr.h> | ||
| 18 | #include <linux/posix_acl_xattr.h> | ||
| 19 | #include <linux/mtd/mtd.h> | ||
| 20 | #include "nodelist.h" | ||
| 21 | |||
| 22 | static size_t jffs2_acl_size(int count) | ||
| 23 | { | ||
| 24 | if (count <= 4) { | ||
| 25 | return sizeof(struct jffs2_acl_header) | ||
| 26 | + count * sizeof(struct jffs2_acl_entry_short); | ||
| 27 | } else { | ||
| 28 | return sizeof(struct jffs2_acl_header) | ||
| 29 | + 4 * sizeof(struct jffs2_acl_entry_short) | ||
| 30 | + (count - 4) * sizeof(struct jffs2_acl_entry); | ||
| 31 | } | ||
| 32 | } | ||
| 33 | |||
| 34 | static int jffs2_acl_count(size_t size) | ||
| 35 | { | ||
| 36 | size_t s; | ||
| 37 | |||
| 38 | size -= sizeof(struct jffs2_acl_header); | ||
| 39 | s = size - 4 * sizeof(struct jffs2_acl_entry_short); | ||
| 40 | if (s < 0) { | ||
| 41 | if (size % sizeof(struct jffs2_acl_entry_short)) | ||
| 42 | return -1; | ||
| 43 | return size / sizeof(struct jffs2_acl_entry_short); | ||
| 44 | } else { | ||
| 45 | if (s % sizeof(struct jffs2_acl_entry)) | ||
| 46 | return -1; | ||
| 47 | return s / sizeof(struct jffs2_acl_entry) + 4; | ||
| 48 | } | ||
| 49 | } | ||
| 50 | |||
| 51 | static struct posix_acl *jffs2_acl_from_medium(void *value, size_t size) | ||
| 52 | { | ||
| 53 | void *end = value + size; | ||
| 54 | struct jffs2_acl_header *header = value; | ||
| 55 | struct jffs2_acl_entry *entry; | ||
| 56 | struct posix_acl *acl; | ||
| 57 | uint32_t ver; | ||
| 58 | int i, count; | ||
| 59 | |||
| 60 | if (!value) | ||
| 61 | return NULL; | ||
| 62 | if (size < sizeof(struct jffs2_acl_header)) | ||
| 63 | return ERR_PTR(-EINVAL); | ||
| 64 | ver = je32_to_cpu(header->a_version); | ||
| 65 | if (ver != JFFS2_ACL_VERSION) { | ||
| 66 | JFFS2_WARNING("Invalid ACL version. (=%u)\n", ver); | ||
| 67 | return ERR_PTR(-EINVAL); | ||
| 68 | } | ||
| 69 | |||
| 70 | value += sizeof(struct jffs2_acl_header); | ||
| 71 | count = jffs2_acl_count(size); | ||
| 72 | if (count < 0) | ||
| 73 | return ERR_PTR(-EINVAL); | ||
| 74 | if (count == 0) | ||
| 75 | return NULL; | ||
| 76 | |||
| 77 | acl = posix_acl_alloc(count, GFP_KERNEL); | ||
| 78 | if (!acl) | ||
| 79 | return ERR_PTR(-ENOMEM); | ||
| 80 | |||
| 81 | for (i=0; i < count; i++) { | ||
| 82 | entry = value; | ||
| 83 | if (value + sizeof(struct jffs2_acl_entry_short) > end) | ||
| 84 | goto fail; | ||
| 85 | acl->a_entries[i].e_tag = je16_to_cpu(entry->e_tag); | ||
| 86 | acl->a_entries[i].e_perm = je16_to_cpu(entry->e_perm); | ||
| 87 | switch (acl->a_entries[i].e_tag) { | ||
| 88 | case ACL_USER_OBJ: | ||
| 89 | case ACL_GROUP_OBJ: | ||
| 90 | case ACL_MASK: | ||
| 91 | case ACL_OTHER: | ||
| 92 | value += sizeof(struct jffs2_acl_entry_short); | ||
| 93 | acl->a_entries[i].e_id = ACL_UNDEFINED_ID; | ||
| 94 | break; | ||
| 95 | |||
| 96 | case ACL_USER: | ||
| 97 | case ACL_GROUP: | ||
| 98 | value += sizeof(struct jffs2_acl_entry); | ||
| 99 | if (value > end) | ||
| 100 | goto fail; | ||
| 101 | acl->a_entries[i].e_id = je32_to_cpu(entry->e_id); | ||
| 102 | break; | ||
| 103 | |||
| 104 | default: | ||
| 105 | goto fail; | ||
| 106 | } | ||
| 107 | } | ||
| 108 | if (value != end) | ||
| 109 | goto fail; | ||
| 110 | return acl; | ||
| 111 | fail: | ||
| 112 | posix_acl_release(acl); | ||
| 113 | return ERR_PTR(-EINVAL); | ||
| 114 | } | ||
| 115 | |||
| 116 | static void *jffs2_acl_to_medium(const struct posix_acl *acl, size_t *size) | ||
| 117 | { | ||
| 118 | struct jffs2_acl_header *header; | ||
| 119 | struct jffs2_acl_entry *entry; | ||
| 120 | void *e; | ||
| 121 | size_t i; | ||
| 122 | |||
| 123 | *size = jffs2_acl_size(acl->a_count); | ||
| 124 | header = kmalloc(sizeof(*header) + acl->a_count * sizeof(*entry), GFP_KERNEL); | ||
| 125 | if (!header) | ||
| 126 | return ERR_PTR(-ENOMEM); | ||
| 127 | header->a_version = cpu_to_je32(JFFS2_ACL_VERSION); | ||
| 128 | e = header + 1; | ||
| 129 | for (i=0; i < acl->a_count; i++) { | ||
| 130 | entry = e; | ||
| 131 | entry->e_tag = cpu_to_je16(acl->a_entries[i].e_tag); | ||
| 132 | entry->e_perm = cpu_to_je16(acl->a_entries[i].e_perm); | ||
| 133 | switch(acl->a_entries[i].e_tag) { | ||
| 134 | case ACL_USER: | ||
| 135 | case ACL_GROUP: | ||
| 136 | entry->e_id = cpu_to_je32(acl->a_entries[i].e_id); | ||
| 137 | e += sizeof(struct jffs2_acl_entry); | ||
| 138 | break; | ||
| 139 | |||
| 140 | case ACL_USER_OBJ: | ||
| 141 | case ACL_GROUP_OBJ: | ||
| 142 | case ACL_MASK: | ||
| 143 | case ACL_OTHER: | ||
| 144 | e += sizeof(struct jffs2_acl_entry_short); | ||
| 145 | break; | ||
| 146 | |||
| 147 | default: | ||
| 148 | goto fail; | ||
| 149 | } | ||
| 150 | } | ||
| 151 | return header; | ||
| 152 | fail: | ||
| 153 | kfree(header); | ||
| 154 | return ERR_PTR(-EINVAL); | ||
| 155 | } | ||
| 156 | |||
| 157 | static struct posix_acl *jffs2_iget_acl(struct inode *inode, struct posix_acl **i_acl) | ||
| 158 | { | ||
| 159 | struct posix_acl *acl = JFFS2_ACL_NOT_CACHED; | ||
| 160 | |||
| 161 | spin_lock(&inode->i_lock); | ||
| 162 | if (*i_acl != JFFS2_ACL_NOT_CACHED) | ||
| 163 | acl = posix_acl_dup(*i_acl); | ||
| 164 | spin_unlock(&inode->i_lock); | ||
| 165 | return acl; | ||
| 166 | } | ||
| 167 | |||
| 168 | static void jffs2_iset_acl(struct inode *inode, struct posix_acl **i_acl, struct posix_acl *acl) | ||
| 169 | { | ||
| 170 | spin_lock(&inode->i_lock); | ||
| 171 | if (*i_acl != JFFS2_ACL_NOT_CACHED) | ||
| 172 | posix_acl_release(*i_acl); | ||
| 173 | *i_acl = posix_acl_dup(acl); | ||
| 174 | spin_unlock(&inode->i_lock); | ||
| 175 | } | ||
| 176 | |||
| 177 | static struct posix_acl *jffs2_get_acl(struct inode *inode, int type) | ||
| 178 | { | ||
| 179 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); | ||
| 180 | struct posix_acl *acl; | ||
| 181 | char *value = NULL; | ||
| 182 | int rc, xprefix; | ||
| 183 | |||
| 184 | switch (type) { | ||
| 185 | case ACL_TYPE_ACCESS: | ||
| 186 | acl = jffs2_iget_acl(inode, &f->i_acl_access); | ||
| 187 | if (acl != JFFS2_ACL_NOT_CACHED) | ||
| 188 | return acl; | ||
| 189 | xprefix = JFFS2_XPREFIX_ACL_ACCESS; | ||
| 190 | break; | ||
| 191 | case ACL_TYPE_DEFAULT: | ||
| 192 | acl = jffs2_iget_acl(inode, &f->i_acl_default); | ||
| 193 | if (acl != JFFS2_ACL_NOT_CACHED) | ||
| 194 | return acl; | ||
| 195 | xprefix = JFFS2_XPREFIX_ACL_DEFAULT; | ||
| 196 | break; | ||
| 197 | default: | ||
| 198 | return ERR_PTR(-EINVAL); | ||
| 199 | } | ||
| 200 | rc = do_jffs2_getxattr(inode, xprefix, "", NULL, 0); | ||
| 201 | if (rc > 0) { | ||
| 202 | value = kmalloc(rc, GFP_KERNEL); | ||
| 203 | if (!value) | ||
| 204 | return ERR_PTR(-ENOMEM); | ||
| 205 | rc = do_jffs2_getxattr(inode, xprefix, "", value, rc); | ||
| 206 | } | ||
| 207 | if (rc > 0) { | ||
| 208 | acl = jffs2_acl_from_medium(value, rc); | ||
| 209 | } else if (rc == -ENODATA || rc == -ENOSYS) { | ||
| 210 | acl = NULL; | ||
| 211 | } else { | ||
| 212 | acl = ERR_PTR(rc); | ||
| 213 | } | ||
| 214 | if (value) | ||
| 215 | kfree(value); | ||
| 216 | if (!IS_ERR(acl)) { | ||
| 217 | switch (type) { | ||
| 218 | case ACL_TYPE_ACCESS: | ||
| 219 | jffs2_iset_acl(inode, &f->i_acl_access, acl); | ||
| 220 | break; | ||
| 221 | case ACL_TYPE_DEFAULT: | ||
| 222 | jffs2_iset_acl(inode, &f->i_acl_default, acl); | ||
| 223 | break; | ||
| 224 | } | ||
| 225 | } | ||
| 226 | return acl; | ||
| 227 | } | ||
| 228 | |||
| 229 | static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl) | ||
| 230 | { | ||
| 231 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); | ||
| 232 | size_t size = 0; | ||
| 233 | char *value = NULL; | ||
| 234 | int rc, xprefix; | ||
| 235 | |||
| 236 | if (S_ISLNK(inode->i_mode)) | ||
| 237 | return -EOPNOTSUPP; | ||
| 238 | |||
| 239 | switch (type) { | ||
| 240 | case ACL_TYPE_ACCESS: | ||
| 241 | xprefix = JFFS2_XPREFIX_ACL_ACCESS; | ||
| 242 | if (acl) { | ||
| 243 | mode_t mode = inode->i_mode; | ||
| 244 | rc = posix_acl_equiv_mode(acl, &mode); | ||
| 245 | if (rc < 0) | ||
| 246 | return rc; | ||
| 247 | if (inode->i_mode != mode) { | ||
| 248 | inode->i_mode = mode; | ||
| 249 | jffs2_dirty_inode(inode); | ||
| 250 | } | ||
| 251 | if (rc == 0) | ||
| 252 | acl = NULL; | ||
| 253 | } | ||
| 254 | break; | ||
| 255 | case ACL_TYPE_DEFAULT: | ||
| 256 | xprefix = JFFS2_XPREFIX_ACL_DEFAULT; | ||
| 257 | if (!S_ISDIR(inode->i_mode)) | ||
| 258 | return acl ? -EACCES : 0; | ||
| 259 | break; | ||
| 260 | default: | ||
| 261 | return -EINVAL; | ||
| 262 | } | ||
| 263 | if (acl) { | ||
| 264 | value = jffs2_acl_to_medium(acl, &size); | ||
| 265 | if (IS_ERR(value)) | ||
| 266 | return PTR_ERR(value); | ||
| 267 | } | ||
| 268 | |||
| 269 | rc = do_jffs2_setxattr(inode, xprefix, "", value, size, 0); | ||
| 270 | if (value) | ||
| 271 | kfree(value); | ||
| 272 | if (!rc) { | ||
| 273 | switch(type) { | ||
| 274 | case ACL_TYPE_ACCESS: | ||
| 275 | jffs2_iset_acl(inode, &f->i_acl_access, acl); | ||
| 276 | break; | ||
| 277 | case ACL_TYPE_DEFAULT: | ||
| 278 | jffs2_iset_acl(inode, &f->i_acl_default, acl); | ||
| 279 | break; | ||
| 280 | } | ||
| 281 | } | ||
| 282 | return rc; | ||
| 283 | } | ||
| 284 | |||
| 285 | static int jffs2_check_acl(struct inode *inode, int mask) | ||
| 286 | { | ||
| 287 | struct posix_acl *acl; | ||
| 288 | int rc; | ||
| 289 | |||
| 290 | acl = jffs2_get_acl(inode, ACL_TYPE_ACCESS); | ||
| 291 | if (IS_ERR(acl)) | ||
| 292 | return PTR_ERR(acl); | ||
| 293 | if (acl) { | ||
| 294 | rc = posix_acl_permission(inode, acl, mask); | ||
| 295 | posix_acl_release(acl); | ||
| 296 | return rc; | ||
| 297 | } | ||
| 298 | return -EAGAIN; | ||
| 299 | } | ||
| 300 | |||
| 301 | int jffs2_permission(struct inode *inode, int mask, struct nameidata *nd) | ||
| 302 | { | ||
| 303 | return generic_permission(inode, mask, jffs2_check_acl); | ||
| 304 | } | ||
| 305 | |||
| 306 | int jffs2_init_acl(struct inode *inode, struct inode *dir) | ||
| 307 | { | ||
| 308 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); | ||
| 309 | struct posix_acl *acl = NULL, *clone; | ||
| 310 | mode_t mode; | ||
| 311 | int rc = 0; | ||
| 312 | |||
| 313 | f->i_acl_access = JFFS2_ACL_NOT_CACHED; | ||
| 314 | f->i_acl_default = JFFS2_ACL_NOT_CACHED; | ||
| 315 | if (!S_ISLNK(inode->i_mode)) { | ||
| 316 | acl = jffs2_get_acl(dir, ACL_TYPE_DEFAULT); | ||
| 317 | if (IS_ERR(acl)) | ||
| 318 | return PTR_ERR(acl); | ||
| 319 | if (!acl) | ||
| 320 | inode->i_mode &= ~current->fs->umask; | ||
| 321 | } | ||
| 322 | if (acl) { | ||
| 323 | if (S_ISDIR(inode->i_mode)) { | ||
| 324 | rc = jffs2_set_acl(inode, ACL_TYPE_DEFAULT, acl); | ||
| 325 | if (rc) | ||
| 326 | goto cleanup; | ||
| 327 | } | ||
| 328 | clone = posix_acl_clone(acl, GFP_KERNEL); | ||
| 329 | rc = -ENOMEM; | ||
| 330 | if (!clone) | ||
| 331 | goto cleanup; | ||
| 332 | mode = inode->i_mode; | ||
| 333 | rc = posix_acl_create_masq(clone, &mode); | ||
| 334 | if (rc >= 0) { | ||
| 335 | inode->i_mode = mode; | ||
| 336 | if (rc > 0) | ||
| 337 | rc = jffs2_set_acl(inode, ACL_TYPE_ACCESS, clone); | ||
| 338 | } | ||
| 339 | posix_acl_release(clone); | ||
| 340 | } | ||
| 341 | cleanup: | ||
| 342 | posix_acl_release(acl); | ||
| 343 | return rc; | ||
| 344 | } | ||
| 345 | |||
| 346 | void jffs2_clear_acl(struct inode *inode) | ||
| 347 | { | ||
| 348 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); | ||
| 349 | |||
| 350 | if (f->i_acl_access && f->i_acl_access != JFFS2_ACL_NOT_CACHED) { | ||
| 351 | posix_acl_release(f->i_acl_access); | ||
| 352 | f->i_acl_access = JFFS2_ACL_NOT_CACHED; | ||
| 353 | } | ||
| 354 | if (f->i_acl_default && f->i_acl_default != JFFS2_ACL_NOT_CACHED) { | ||
| 355 | posix_acl_release(f->i_acl_default); | ||
| 356 | f->i_acl_default = JFFS2_ACL_NOT_CACHED; | ||
| 357 | } | ||
| 358 | } | ||
| 359 | |||
| 360 | int jffs2_acl_chmod(struct inode *inode) | ||
| 361 | { | ||
| 362 | struct posix_acl *acl, *clone; | ||
| 363 | int rc; | ||
| 364 | |||
| 365 | if (S_ISLNK(inode->i_mode)) | ||
| 366 | return -EOPNOTSUPP; | ||
| 367 | acl = jffs2_get_acl(inode, ACL_TYPE_ACCESS); | ||
| 368 | if (IS_ERR(acl) || !acl) | ||
| 369 | return PTR_ERR(acl); | ||
| 370 | clone = posix_acl_clone(acl, GFP_KERNEL); | ||
| 371 | posix_acl_release(acl); | ||
| 372 | if (!clone) | ||
| 373 | return -ENOMEM; | ||
| 374 | rc = posix_acl_chmod_masq(clone, inode->i_mode); | ||
| 375 | if (!rc) | ||
| 376 | rc = jffs2_set_acl(inode, ACL_TYPE_ACCESS, clone); | ||
| 377 | posix_acl_release(clone); | ||
| 378 | return rc; | ||
| 379 | } | ||
| 380 | |||
| 381 | static size_t jffs2_acl_access_listxattr(struct inode *inode, char *list, size_t list_size, | ||
| 382 | const char *name, size_t name_len) | ||
| 383 | { | ||
| 384 | const int retlen = sizeof(POSIX_ACL_XATTR_ACCESS); | ||
| 385 | |||
| 386 | if (list && retlen <= list_size) | ||
| 387 | strcpy(list, POSIX_ACL_XATTR_ACCESS); | ||
| 388 | return retlen; | ||
| 389 | } | ||
| 390 | |||
| 391 | static size_t jffs2_acl_default_listxattr(struct inode *inode, char *list, size_t list_size, | ||
| 392 | const char *name, size_t name_len) | ||
| 393 | { | ||
| 394 | const int retlen = sizeof(POSIX_ACL_XATTR_DEFAULT); | ||
| 395 | |||
| 396 | if (list && retlen <= list_size) | ||
| 397 | strcpy(list, POSIX_ACL_XATTR_DEFAULT); | ||
| 398 | return retlen; | ||
| 399 | } | ||
| 400 | |||
| 401 | static int jffs2_acl_getxattr(struct inode *inode, int type, void *buffer, size_t size) | ||
| 402 | { | ||
| 403 | struct posix_acl *acl; | ||
| 404 | int rc; | ||
| 405 | |||
| 406 | acl = jffs2_get_acl(inode, type); | ||
| 407 | if (IS_ERR(acl)) | ||
| 408 | return PTR_ERR(acl); | ||
| 409 | if (!acl) | ||
| 410 | return -ENODATA; | ||
| 411 | rc = posix_acl_to_xattr(acl, buffer, size); | ||
| 412 | posix_acl_release(acl); | ||
| 413 | |||
| 414 | return rc; | ||
| 415 | } | ||
| 416 | |||
| 417 | static int jffs2_acl_access_getxattr(struct inode *inode, const char *name, void *buffer, size_t size) | ||
| 418 | { | ||
| 419 | if (name[0] != '\0') | ||
| 420 | return -EINVAL; | ||
| 421 | return jffs2_acl_getxattr(inode, ACL_TYPE_ACCESS, buffer, size); | ||
| 422 | } | ||
| 423 | |||
| 424 | static int jffs2_acl_default_getxattr(struct inode *inode, const char *name, void *buffer, size_t size) | ||
| 425 | { | ||
| 426 | if (name[0] != '\0') | ||
| 427 | return -EINVAL; | ||
| 428 | return jffs2_acl_getxattr(inode, ACL_TYPE_DEFAULT, buffer, size); | ||
| 429 | } | ||
| 430 | |||
| 431 | static int jffs2_acl_setxattr(struct inode *inode, int type, const void *value, size_t size) | ||
| 432 | { | ||
| 433 | struct posix_acl *acl; | ||
| 434 | int rc; | ||
| 435 | |||
| 436 | if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) | ||
| 437 | return -EPERM; | ||
| 438 | |||
| 439 | if (value) { | ||
| 440 | acl = posix_acl_from_xattr(value, size); | ||
| 441 | if (IS_ERR(acl)) | ||
| 442 | return PTR_ERR(acl); | ||
| 443 | if (acl) { | ||
| 444 | rc = posix_acl_valid(acl); | ||
| 445 | if (rc) | ||
| 446 | goto out; | ||
| 447 | } | ||
| 448 | } else { | ||
| 449 | acl = NULL; | ||
| 450 | } | ||
| 451 | rc = jffs2_set_acl(inode, type, acl); | ||
| 452 | out: | ||
| 453 | posix_acl_release(acl); | ||
| 454 | return rc; | ||
| 455 | } | ||
| 456 | |||
| 457 | static int jffs2_acl_access_setxattr(struct inode *inode, const char *name, | ||
| 458 | const void *buffer, size_t size, int flags) | ||
| 459 | { | ||
| 460 | if (name[0] != '\0') | ||
| 461 | return -EINVAL; | ||
| 462 | return jffs2_acl_setxattr(inode, ACL_TYPE_ACCESS, buffer, size); | ||
| 463 | } | ||
| 464 | |||
| 465 | static int jffs2_acl_default_setxattr(struct inode *inode, const char *name, | ||
| 466 | const void *buffer, size_t size, int flags) | ||
| 467 | { | ||
| 468 | if (name[0] != '\0') | ||
| 469 | return -EINVAL; | ||
| 470 | return jffs2_acl_setxattr(inode, ACL_TYPE_DEFAULT, buffer, size); | ||
| 471 | } | ||
| 472 | |||
| 473 | struct xattr_handler jffs2_acl_access_xattr_handler = { | ||
| 474 | .prefix = POSIX_ACL_XATTR_ACCESS, | ||
| 475 | .list = jffs2_acl_access_listxattr, | ||
| 476 | .get = jffs2_acl_access_getxattr, | ||
| 477 | .set = jffs2_acl_access_setxattr, | ||
| 478 | }; | ||
| 479 | |||
| 480 | struct xattr_handler jffs2_acl_default_xattr_handler = { | ||
| 481 | .prefix = POSIX_ACL_XATTR_DEFAULT, | ||
| 482 | .list = jffs2_acl_default_listxattr, | ||
| 483 | .get = jffs2_acl_default_getxattr, | ||
| 484 | .set = jffs2_acl_default_setxattr, | ||
| 485 | }; | ||
diff --git a/fs/jffs2/acl.h b/fs/jffs2/acl.h new file mode 100644 index 000000000000..8893bd1a6ba7 --- /dev/null +++ b/fs/jffs2/acl.h | |||
| @@ -0,0 +1,45 @@ | |||
| 1 | /* | ||
| 2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2006 NEC Corporation | ||
| 5 | * | ||
| 6 | * Created by KaiGai Kohei <kaigai@ak.jp.nec.com> | ||
| 7 | * | ||
| 8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
| 9 | * | ||
| 10 | */ | ||
| 11 | struct jffs2_acl_entry { | ||
| 12 | jint16_t e_tag; | ||
| 13 | jint16_t e_perm; | ||
| 14 | jint32_t e_id; | ||
| 15 | }; | ||
| 16 | |||
| 17 | struct jffs2_acl_entry_short { | ||
| 18 | jint16_t e_tag; | ||
| 19 | jint16_t e_perm; | ||
| 20 | }; | ||
| 21 | |||
| 22 | struct jffs2_acl_header { | ||
| 23 | jint32_t a_version; | ||
| 24 | }; | ||
| 25 | |||
| 26 | #ifdef CONFIG_JFFS2_FS_POSIX_ACL | ||
| 27 | |||
| 28 | #define JFFS2_ACL_NOT_CACHED ((void *)-1) | ||
| 29 | |||
| 30 | extern int jffs2_permission(struct inode *, int, struct nameidata *); | ||
| 31 | extern int jffs2_acl_chmod(struct inode *); | ||
| 32 | extern int jffs2_init_acl(struct inode *, struct inode *); | ||
| 33 | extern void jffs2_clear_acl(struct inode *); | ||
| 34 | |||
| 35 | extern struct xattr_handler jffs2_acl_access_xattr_handler; | ||
| 36 | extern struct xattr_handler jffs2_acl_default_xattr_handler; | ||
| 37 | |||
| 38 | #else | ||
| 39 | |||
| 40 | #define jffs2_permission NULL | ||
| 41 | #define jffs2_acl_chmod(inode) (0) | ||
| 42 | #define jffs2_init_acl(inode,dir) (0) | ||
| 43 | #define jffs2_clear_acl(inode) | ||
| 44 | |||
| 45 | #endif /* CONFIG_JFFS2_FS_POSIX_ACL */ | ||
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c index 70f7a896c04a..02826967ab58 100644 --- a/fs/jffs2/build.c +++ b/fs/jffs2/build.c | |||
| @@ -160,6 +160,7 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) | |||
| 160 | ic->scan_dents = NULL; | 160 | ic->scan_dents = NULL; |
| 161 | cond_resched(); | 161 | cond_resched(); |
| 162 | } | 162 | } |
| 163 | jffs2_build_xattr_subsystem(c); | ||
| 163 | c->flags &= ~JFFS2_SB_FLAG_BUILDING; | 164 | c->flags &= ~JFFS2_SB_FLAG_BUILDING; |
| 164 | 165 | ||
| 165 | dbg_fsbuild("FS build complete\n"); | 166 | dbg_fsbuild("FS build complete\n"); |
| @@ -178,6 +179,7 @@ exit: | |||
| 178 | jffs2_free_full_dirent(fd); | 179 | jffs2_free_full_dirent(fd); |
| 179 | } | 180 | } |
| 180 | } | 181 | } |
| 182 | jffs2_clear_xattr_subsystem(c); | ||
| 181 | } | 183 | } |
| 182 | 184 | ||
| 183 | return ret; | 185 | return ret; |
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c index e7944e665b9f..7001ba26c067 100644 --- a/fs/jffs2/compr.c +++ b/fs/jffs2/compr.c | |||
| @@ -412,7 +412,7 @@ void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig) | |||
| 412 | kfree(comprbuf); | 412 | kfree(comprbuf); |
| 413 | } | 413 | } |
| 414 | 414 | ||
| 415 | int jffs2_compressors_init(void) | 415 | int __init jffs2_compressors_init(void) |
| 416 | { | 416 | { |
| 417 | /* Registering compressors */ | 417 | /* Registering compressors */ |
| 418 | #ifdef CONFIG_JFFS2_ZLIB | 418 | #ifdef CONFIG_JFFS2_ZLIB |
diff --git a/fs/jffs2/compr.h b/fs/jffs2/compr.h index a77e830d85c5..509b8b1c0811 100644 --- a/fs/jffs2/compr.h +++ b/fs/jffs2/compr.h | |||
| @@ -23,8 +23,8 @@ | |||
| 23 | #include <linux/errno.h> | 23 | #include <linux/errno.h> |
| 24 | #include <linux/fs.h> | 24 | #include <linux/fs.h> |
| 25 | #include <linux/jffs2.h> | 25 | #include <linux/jffs2.h> |
| 26 | #include <linux/jffs2_fs_i.h> | 26 | #include "jffs2_fs_i.h" |
| 27 | #include <linux/jffs2_fs_sb.h> | 27 | #include "jffs2_fs_sb.h" |
| 28 | #include "nodelist.h" | 28 | #include "nodelist.h" |
| 29 | 29 | ||
| 30 | #define JFFS2_RUBINMIPS_PRIORITY 10 | 30 | #define JFFS2_RUBINMIPS_PRIORITY 10 |
diff --git a/fs/jffs2/debug.c b/fs/jffs2/debug.c index 1fe17de713e8..72b4fc13a106 100644 --- a/fs/jffs2/debug.c +++ b/fs/jffs2/debug.c | |||
| @@ -192,13 +192,13 @@ __jffs2_dbg_acct_paranoia_check_nolock(struct jffs2_sb_info *c, | |||
| 192 | else | 192 | else |
| 193 | my_dirty_size += totlen; | 193 | my_dirty_size += totlen; |
| 194 | 194 | ||
| 195 | if ((!ref2->next_phys) != (ref2 == jeb->last_node)) { | 195 | if ((!ref_next(ref2)) != (ref2 == jeb->last_node)) { |
| 196 | JFFS2_ERROR("node_ref for node at %#08x (mem %p) has next_phys at %#08x (mem %p), last_node is at %#08x (mem %p).\n", | 196 | JFFS2_ERROR("node_ref for node at %#08x (mem %p) has next at %#08x (mem %p), last_node is at %#08x (mem %p).\n", |
| 197 | ref_offset(ref2), ref2, ref_offset(ref2->next_phys), ref2->next_phys, | 197 | ref_offset(ref2), ref2, ref_offset(ref_next(ref2)), ref_next(ref2), |
| 198 | ref_offset(jeb->last_node), jeb->last_node); | 198 | ref_offset(jeb->last_node), jeb->last_node); |
| 199 | goto error; | 199 | goto error; |
| 200 | } | 200 | } |
| 201 | ref2 = ref2->next_phys; | 201 | ref2 = ref_next(ref2); |
| 202 | } | 202 | } |
| 203 | 203 | ||
| 204 | if (my_used_size != jeb->used_size) { | 204 | if (my_used_size != jeb->used_size) { |
| @@ -268,9 +268,9 @@ __jffs2_dbg_dump_node_refs_nolock(struct jffs2_sb_info *c, | |||
| 268 | } | 268 | } |
| 269 | 269 | ||
| 270 | printk(JFFS2_DBG); | 270 | printk(JFFS2_DBG); |
| 271 | for (ref = jeb->first_node; ; ref = ref->next_phys) { | 271 | for (ref = jeb->first_node; ; ref = ref_next(ref)) { |
| 272 | printk("%#08x(%#x)", ref_offset(ref), ref->__totlen); | 272 | printk("%#08x(%#x)", ref_offset(ref), ref->__totlen); |
| 273 | if (ref->next_phys) | 273 | if (ref_next(ref)) |
| 274 | printk("->"); | 274 | printk("->"); |
| 275 | else | 275 | else |
| 276 | break; | 276 | break; |
diff --git a/fs/jffs2/debug.h b/fs/jffs2/debug.h index 162af6dfe292..5fa494a792b2 100644 --- a/fs/jffs2/debug.h +++ b/fs/jffs2/debug.h | |||
| @@ -171,6 +171,12 @@ | |||
| 171 | #define dbg_memalloc(fmt, ...) | 171 | #define dbg_memalloc(fmt, ...) |
| 172 | #endif | 172 | #endif |
| 173 | 173 | ||
| 174 | /* Watch the XATTR subsystem */ | ||
| 175 | #ifdef JFFS2_DBG_XATTR_MESSAGES | ||
| 176 | #define dbg_xattr(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) | ||
| 177 | #else | ||
| 178 | #define dbg_xattr(fmt, ...) | ||
| 179 | #endif | ||
| 174 | 180 | ||
| 175 | /* "Sanity" checks */ | 181 | /* "Sanity" checks */ |
| 176 | void | 182 | void |
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index 8bc7a5018e40..edd8371fc6a5 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c | |||
| @@ -17,8 +17,8 @@ | |||
| 17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
| 18 | #include <linux/crc32.h> | 18 | #include <linux/crc32.h> |
| 19 | #include <linux/jffs2.h> | 19 | #include <linux/jffs2.h> |
| 20 | #include <linux/jffs2_fs_i.h> | 20 | #include "jffs2_fs_i.h" |
| 21 | #include <linux/jffs2_fs_sb.h> | 21 | #include "jffs2_fs_sb.h" |
| 22 | #include <linux/time.h> | 22 | #include <linux/time.h> |
| 23 | #include "nodelist.h" | 23 | #include "nodelist.h" |
| 24 | 24 | ||
| @@ -57,7 +57,12 @@ struct inode_operations jffs2_dir_inode_operations = | |||
| 57 | .rmdir = jffs2_rmdir, | 57 | .rmdir = jffs2_rmdir, |
| 58 | .mknod = jffs2_mknod, | 58 | .mknod = jffs2_mknod, |
| 59 | .rename = jffs2_rename, | 59 | .rename = jffs2_rename, |
| 60 | .permission = jffs2_permission, | ||
| 60 | .setattr = jffs2_setattr, | 61 | .setattr = jffs2_setattr, |
| 62 | .setxattr = jffs2_setxattr, | ||
| 63 | .getxattr = jffs2_getxattr, | ||
| 64 | .listxattr = jffs2_listxattr, | ||
| 65 | .removexattr = jffs2_removexattr | ||
| 61 | }; | 66 | }; |
| 62 | 67 | ||
| 63 | /***********************************************************************/ | 68 | /***********************************************************************/ |
| @@ -78,6 +83,9 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target, | |||
| 78 | 83 | ||
| 79 | D1(printk(KERN_DEBUG "jffs2_lookup()\n")); | 84 | D1(printk(KERN_DEBUG "jffs2_lookup()\n")); |
| 80 | 85 | ||
| 86 | if (target->d_name.len > JFFS2_MAX_NAME_LEN) | ||
| 87 | return ERR_PTR(-ENAMETOOLONG); | ||
| 88 | |||
| 81 | dir_f = JFFS2_INODE_INFO(dir_i); | 89 | dir_f = JFFS2_INODE_INFO(dir_i); |
| 82 | c = JFFS2_SB_INFO(dir_i->i_sb); | 90 | c = JFFS2_SB_INFO(dir_i->i_sb); |
| 83 | 91 | ||
| @@ -206,12 +214,15 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode, | |||
| 206 | ret = jffs2_do_create(c, dir_f, f, ri, | 214 | ret = jffs2_do_create(c, dir_f, f, ri, |
| 207 | dentry->d_name.name, dentry->d_name.len); | 215 | dentry->d_name.name, dentry->d_name.len); |
| 208 | 216 | ||
| 209 | if (ret) { | 217 | if (ret) |
| 210 | make_bad_inode(inode); | 218 | goto fail; |
| 211 | iput(inode); | 219 | |
| 212 | jffs2_free_raw_inode(ri); | 220 | ret = jffs2_init_security(inode, dir_i); |
| 213 | return ret; | 221 | if (ret) |
| 214 | } | 222 | goto fail; |
| 223 | ret = jffs2_init_acl(inode, dir_i); | ||
| 224 | if (ret) | ||
| 225 | goto fail; | ||
| 215 | 226 | ||
| 216 | dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(ri->ctime)); | 227 | dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(ri->ctime)); |
| 217 | 228 | ||
| @@ -221,6 +232,12 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode, | |||
| 221 | D1(printk(KERN_DEBUG "jffs2_create: Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n", | 232 | D1(printk(KERN_DEBUG "jffs2_create: Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n", |
| 222 | inode->i_ino, inode->i_mode, inode->i_nlink, f->inocache->nlink, inode->i_mapping->nrpages)); | 233 | inode->i_ino, inode->i_mode, inode->i_nlink, f->inocache->nlink, inode->i_mapping->nrpages)); |
| 223 | return 0; | 234 | return 0; |
| 235 | |||
| 236 | fail: | ||
| 237 | make_bad_inode(inode); | ||
| 238 | iput(inode); | ||
| 239 | jffs2_free_raw_inode(ri); | ||
| 240 | return ret; | ||
| 224 | } | 241 | } |
| 225 | 242 | ||
| 226 | /***********************************************************************/ | 243 | /***********************************************************************/ |
| @@ -291,7 +308,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char | |||
| 291 | struct jffs2_full_dnode *fn; | 308 | struct jffs2_full_dnode *fn; |
| 292 | struct jffs2_full_dirent *fd; | 309 | struct jffs2_full_dirent *fd; |
| 293 | int namelen; | 310 | int namelen; |
| 294 | uint32_t alloclen, phys_ofs; | 311 | uint32_t alloclen; |
| 295 | int ret, targetlen = strlen(target); | 312 | int ret, targetlen = strlen(target); |
| 296 | 313 | ||
| 297 | /* FIXME: If you care. We'd need to use frags for the target | 314 | /* FIXME: If you care. We'd need to use frags for the target |
| @@ -310,8 +327,8 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char | |||
| 310 | * Just the node will do for now, though | 327 | * Just the node will do for now, though |
| 311 | */ | 328 | */ |
| 312 | namelen = dentry->d_name.len; | 329 | namelen = dentry->d_name.len; |
| 313 | ret = jffs2_reserve_space(c, sizeof(*ri) + targetlen, &phys_ofs, &alloclen, | 330 | ret = jffs2_reserve_space(c, sizeof(*ri) + targetlen, &alloclen, |
| 314 | ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); | 331 | ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); |
| 315 | 332 | ||
| 316 | if (ret) { | 333 | if (ret) { |
| 317 | jffs2_free_raw_inode(ri); | 334 | jffs2_free_raw_inode(ri); |
| @@ -339,7 +356,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char | |||
| 339 | ri->data_crc = cpu_to_je32(crc32(0, target, targetlen)); | 356 | ri->data_crc = cpu_to_je32(crc32(0, target, targetlen)); |
| 340 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); | 357 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); |
| 341 | 358 | ||
| 342 | fn = jffs2_write_dnode(c, f, ri, target, targetlen, phys_ofs, ALLOC_NORMAL); | 359 | fn = jffs2_write_dnode(c, f, ri, target, targetlen, ALLOC_NORMAL); |
| 343 | 360 | ||
| 344 | jffs2_free_raw_inode(ri); | 361 | jffs2_free_raw_inode(ri); |
| 345 | 362 | ||
| @@ -371,8 +388,20 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char | |||
| 371 | up(&f->sem); | 388 | up(&f->sem); |
| 372 | 389 | ||
| 373 | jffs2_complete_reservation(c); | 390 | jffs2_complete_reservation(c); |
| 374 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, | 391 | |
| 375 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | 392 | ret = jffs2_init_security(inode, dir_i); |
| 393 | if (ret) { | ||
| 394 | jffs2_clear_inode(inode); | ||
| 395 | return ret; | ||
| 396 | } | ||
| 397 | ret = jffs2_init_acl(inode, dir_i); | ||
| 398 | if (ret) { | ||
| 399 | jffs2_clear_inode(inode); | ||
| 400 | return ret; | ||
| 401 | } | ||
| 402 | |||
| 403 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, | ||
| 404 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | ||
| 376 | if (ret) { | 405 | if (ret) { |
| 377 | /* Eep. */ | 406 | /* Eep. */ |
| 378 | jffs2_clear_inode(inode); | 407 | jffs2_clear_inode(inode); |
| @@ -404,7 +433,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char | |||
| 404 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); | 433 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); |
| 405 | rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); | 434 | rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); |
| 406 | 435 | ||
| 407 | fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL); | 436 | fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, ALLOC_NORMAL); |
| 408 | 437 | ||
| 409 | if (IS_ERR(fd)) { | 438 | if (IS_ERR(fd)) { |
| 410 | /* dirent failed to write. Delete the inode normally | 439 | /* dirent failed to write. Delete the inode normally |
| @@ -442,7 +471,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode) | |||
| 442 | struct jffs2_full_dnode *fn; | 471 | struct jffs2_full_dnode *fn; |
| 443 | struct jffs2_full_dirent *fd; | 472 | struct jffs2_full_dirent *fd; |
| 444 | int namelen; | 473 | int namelen; |
| 445 | uint32_t alloclen, phys_ofs; | 474 | uint32_t alloclen; |
| 446 | int ret; | 475 | int ret; |
| 447 | 476 | ||
| 448 | mode |= S_IFDIR; | 477 | mode |= S_IFDIR; |
| @@ -457,8 +486,8 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode) | |||
| 457 | * Just the node will do for now, though | 486 | * Just the node will do for now, though |
| 458 | */ | 487 | */ |
| 459 | namelen = dentry->d_name.len; | 488 | namelen = dentry->d_name.len; |
| 460 | ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL, | 489 | ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL, |
| 461 | JFFS2_SUMMARY_INODE_SIZE); | 490 | JFFS2_SUMMARY_INODE_SIZE); |
| 462 | 491 | ||
| 463 | if (ret) { | 492 | if (ret) { |
| 464 | jffs2_free_raw_inode(ri); | 493 | jffs2_free_raw_inode(ri); |
| @@ -483,7 +512,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode) | |||
| 483 | ri->data_crc = cpu_to_je32(0); | 512 | ri->data_crc = cpu_to_je32(0); |
| 484 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); | 513 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); |
| 485 | 514 | ||
| 486 | fn = jffs2_write_dnode(c, f, ri, NULL, 0, phys_ofs, ALLOC_NORMAL); | 515 | fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL); |
| 487 | 516 | ||
| 488 | jffs2_free_raw_inode(ri); | 517 | jffs2_free_raw_inode(ri); |
| 489 | 518 | ||
| @@ -501,8 +530,20 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode) | |||
| 501 | up(&f->sem); | 530 | up(&f->sem); |
| 502 | 531 | ||
| 503 | jffs2_complete_reservation(c); | 532 | jffs2_complete_reservation(c); |
| 504 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, | 533 | |
| 505 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | 534 | ret = jffs2_init_security(inode, dir_i); |
| 535 | if (ret) { | ||
| 536 | jffs2_clear_inode(inode); | ||
| 537 | return ret; | ||
| 538 | } | ||
| 539 | ret = jffs2_init_acl(inode, dir_i); | ||
| 540 | if (ret) { | ||
| 541 | jffs2_clear_inode(inode); | ||
| 542 | return ret; | ||
| 543 | } | ||
| 544 | |||
| 545 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, | ||
| 546 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | ||
| 506 | if (ret) { | 547 | if (ret) { |
| 507 | /* Eep. */ | 548 | /* Eep. */ |
| 508 | jffs2_clear_inode(inode); | 549 | jffs2_clear_inode(inode); |
| @@ -534,7 +575,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode) | |||
| 534 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); | 575 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); |
| 535 | rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); | 576 | rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); |
| 536 | 577 | ||
| 537 | fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL); | 578 | fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, ALLOC_NORMAL); |
| 538 | 579 | ||
| 539 | if (IS_ERR(fd)) { | 580 | if (IS_ERR(fd)) { |
| 540 | /* dirent failed to write. Delete the inode normally | 581 | /* dirent failed to write. Delete the inode normally |
| @@ -588,12 +629,12 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de | |||
| 588 | struct jffs2_full_dnode *fn; | 629 | struct jffs2_full_dnode *fn; |
| 589 | struct jffs2_full_dirent *fd; | 630 | struct jffs2_full_dirent *fd; |
| 590 | int namelen; | 631 | int namelen; |
| 591 | jint16_t dev; | 632 | union jffs2_device_node dev; |
| 592 | int devlen = 0; | 633 | int devlen = 0; |
| 593 | uint32_t alloclen, phys_ofs; | 634 | uint32_t alloclen; |
| 594 | int ret; | 635 | int ret; |
| 595 | 636 | ||
| 596 | if (!old_valid_dev(rdev)) | 637 | if (!new_valid_dev(rdev)) |
| 597 | return -EINVAL; | 638 | return -EINVAL; |
| 598 | 639 | ||
| 599 | ri = jffs2_alloc_raw_inode(); | 640 | ri = jffs2_alloc_raw_inode(); |
| @@ -602,17 +643,15 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de | |||
| 602 | 643 | ||
| 603 | c = JFFS2_SB_INFO(dir_i->i_sb); | 644 | c = JFFS2_SB_INFO(dir_i->i_sb); |
| 604 | 645 | ||
| 605 | if (S_ISBLK(mode) || S_ISCHR(mode)) { | 646 | if (S_ISBLK(mode) || S_ISCHR(mode)) |
| 606 | dev = cpu_to_je16(old_encode_dev(rdev)); | 647 | devlen = jffs2_encode_dev(&dev, rdev); |
| 607 | devlen = sizeof(dev); | ||
| 608 | } | ||
| 609 | 648 | ||
| 610 | /* Try to reserve enough space for both node and dirent. | 649 | /* Try to reserve enough space for both node and dirent. |
| 611 | * Just the node will do for now, though | 650 | * Just the node will do for now, though |
| 612 | */ | 651 | */ |
| 613 | namelen = dentry->d_name.len; | 652 | namelen = dentry->d_name.len; |
| 614 | ret = jffs2_reserve_space(c, sizeof(*ri) + devlen, &phys_ofs, &alloclen, | 653 | ret = jffs2_reserve_space(c, sizeof(*ri) + devlen, &alloclen, |
| 615 | ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); | 654 | ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); |
| 616 | 655 | ||
| 617 | if (ret) { | 656 | if (ret) { |
| 618 | jffs2_free_raw_inode(ri); | 657 | jffs2_free_raw_inode(ri); |
| @@ -639,7 +678,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de | |||
| 639 | ri->data_crc = cpu_to_je32(crc32(0, &dev, devlen)); | 678 | ri->data_crc = cpu_to_je32(crc32(0, &dev, devlen)); |
| 640 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); | 679 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); |
| 641 | 680 | ||
| 642 | fn = jffs2_write_dnode(c, f, ri, (char *)&dev, devlen, phys_ofs, ALLOC_NORMAL); | 681 | fn = jffs2_write_dnode(c, f, ri, (char *)&dev, devlen, ALLOC_NORMAL); |
| 643 | 682 | ||
| 644 | jffs2_free_raw_inode(ri); | 683 | jffs2_free_raw_inode(ri); |
| 645 | 684 | ||
| @@ -657,8 +696,20 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de | |||
| 657 | up(&f->sem); | 696 | up(&f->sem); |
| 658 | 697 | ||
| 659 | jffs2_complete_reservation(c); | 698 | jffs2_complete_reservation(c); |
| 660 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, | 699 | |
| 661 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | 700 | ret = jffs2_init_security(inode, dir_i); |
| 701 | if (ret) { | ||
| 702 | jffs2_clear_inode(inode); | ||
| 703 | return ret; | ||
| 704 | } | ||
| 705 | ret = jffs2_init_acl(inode, dir_i); | ||
| 706 | if (ret) { | ||
| 707 | jffs2_clear_inode(inode); | ||
| 708 | return ret; | ||
| 709 | } | ||
| 710 | |||
| 711 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, | ||
| 712 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | ||
| 662 | if (ret) { | 713 | if (ret) { |
| 663 | /* Eep. */ | 714 | /* Eep. */ |
| 664 | jffs2_clear_inode(inode); | 715 | jffs2_clear_inode(inode); |
| @@ -693,7 +744,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de | |||
| 693 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); | 744 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); |
| 694 | rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); | 745 | rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); |
| 695 | 746 | ||
| 696 | fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL); | 747 | fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, ALLOC_NORMAL); |
| 697 | 748 | ||
| 698 | if (IS_ERR(fd)) { | 749 | if (IS_ERR(fd)) { |
| 699 | /* dirent failed to write. Delete the inode normally | 750 | /* dirent failed to write. Delete the inode normally |
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c index dad68fdffe9e..1862e8bc101d 100644 --- a/fs/jffs2/erase.c +++ b/fs/jffs2/erase.c | |||
| @@ -30,7 +30,6 @@ static void jffs2_erase_callback(struct erase_info *); | |||
| 30 | #endif | 30 | #endif |
| 31 | static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset); | 31 | static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset); |
| 32 | static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); | 32 | static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); |
| 33 | static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); | ||
| 34 | static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); | 33 | static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); |
| 35 | 34 | ||
| 36 | static void jffs2_erase_block(struct jffs2_sb_info *c, | 35 | static void jffs2_erase_block(struct jffs2_sb_info *c, |
| @@ -136,7 +135,7 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count) | |||
| 136 | c->used_size -= jeb->used_size; | 135 | c->used_size -= jeb->used_size; |
| 137 | c->dirty_size -= jeb->dirty_size; | 136 | c->dirty_size -= jeb->dirty_size; |
| 138 | jeb->wasted_size = jeb->used_size = jeb->dirty_size = jeb->free_size = 0; | 137 | jeb->wasted_size = jeb->used_size = jeb->dirty_size = jeb->free_size = 0; |
| 139 | jffs2_free_all_node_refs(c, jeb); | 138 | jffs2_free_jeb_node_refs(c, jeb); |
| 140 | list_add(&jeb->list, &c->erasing_list); | 139 | list_add(&jeb->list, &c->erasing_list); |
| 141 | spin_unlock(&c->erase_completion_lock); | 140 | spin_unlock(&c->erase_completion_lock); |
| 142 | 141 | ||
| @@ -231,6 +230,7 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, | |||
| 231 | at the end of the linked list. Stash it and continue | 230 | at the end of the linked list. Stash it and continue |
| 232 | from the beginning of the list */ | 231 | from the beginning of the list */ |
| 233 | ic = (struct jffs2_inode_cache *)(*prev); | 232 | ic = (struct jffs2_inode_cache *)(*prev); |
| 233 | BUG_ON(ic->class != RAWNODE_CLASS_INODE_CACHE); | ||
| 234 | prev = &ic->nodes; | 234 | prev = &ic->nodes; |
| 235 | continue; | 235 | continue; |
| 236 | } | 236 | } |
| @@ -283,22 +283,27 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, | |||
| 283 | jffs2_del_ino_cache(c, ic); | 283 | jffs2_del_ino_cache(c, ic); |
| 284 | } | 284 | } |
| 285 | 285 | ||
| 286 | static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | 286 | void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) |
| 287 | { | 287 | { |
| 288 | struct jffs2_raw_node_ref *ref; | 288 | struct jffs2_raw_node_ref *block, *ref; |
| 289 | D1(printk(KERN_DEBUG "Freeing all node refs for eraseblock offset 0x%08x\n", jeb->offset)); | 289 | D1(printk(KERN_DEBUG "Freeing all node refs for eraseblock offset 0x%08x\n", jeb->offset)); |
| 290 | while(jeb->first_node) { | ||
| 291 | ref = jeb->first_node; | ||
| 292 | jeb->first_node = ref->next_phys; | ||
| 293 | 290 | ||
| 294 | /* Remove from the inode-list */ | 291 | block = ref = jeb->first_node; |
| 295 | if (ref->next_in_ino) | 292 | |
| 293 | while (ref) { | ||
| 294 | if (ref->flash_offset == REF_LINK_NODE) { | ||
| 295 | ref = ref->next_in_ino; | ||
| 296 | jffs2_free_refblock(block); | ||
| 297 | block = ref; | ||
| 298 | continue; | ||
| 299 | } | ||
| 300 | if (ref->flash_offset != REF_EMPTY_NODE && ref->next_in_ino) | ||
| 296 | jffs2_remove_node_refs_from_ino_list(c, ref, jeb); | 301 | jffs2_remove_node_refs_from_ino_list(c, ref, jeb); |
| 297 | /* else it was a non-inode node or already removed, so don't bother */ | 302 | /* else it was a non-inode node or already removed, so don't bother */ |
| 298 | 303 | ||
| 299 | jffs2_free_raw_node_ref(ref); | 304 | ref++; |
| 300 | } | 305 | } |
| 301 | jeb->last_node = NULL; | 306 | jeb->first_node = jeb->last_node = NULL; |
| 302 | } | 307 | } |
| 303 | 308 | ||
| 304 | static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset) | 309 | static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset) |
| @@ -351,7 +356,6 @@ fail: | |||
| 351 | 356 | ||
| 352 | static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | 357 | static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) |
| 353 | { | 358 | { |
| 354 | struct jffs2_raw_node_ref *marker_ref = NULL; | ||
| 355 | size_t retlen; | 359 | size_t retlen; |
| 356 | int ret; | 360 | int ret; |
| 357 | uint32_t bad_offset; | 361 | uint32_t bad_offset; |
| @@ -373,12 +377,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb | |||
| 373 | goto filebad; | 377 | goto filebad; |
| 374 | } | 378 | } |
| 375 | 379 | ||
| 376 | jeb->first_node = jeb->last_node = NULL; | 380 | /* Everything else got zeroed before the erase */ |
| 377 | jeb->free_size = c->sector_size; | 381 | jeb->free_size = c->sector_size; |
| 378 | jeb->used_size = 0; | ||
| 379 | jeb->dirty_size = 0; | ||
| 380 | jeb->wasted_size = 0; | ||
| 381 | |||
| 382 | } else { | 382 | } else { |
| 383 | 383 | ||
| 384 | struct kvec vecs[1]; | 384 | struct kvec vecs[1]; |
| @@ -388,11 +388,7 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb | |||
| 388 | .totlen = cpu_to_je32(c->cleanmarker_size) | 388 | .totlen = cpu_to_je32(c->cleanmarker_size) |
| 389 | }; | 389 | }; |
| 390 | 390 | ||
| 391 | marker_ref = jffs2_alloc_raw_node_ref(); | 391 | jffs2_prealloc_raw_node_refs(c, jeb, 1); |
| 392 | if (!marker_ref) { | ||
| 393 | printk(KERN_WARNING "Failed to allocate raw node ref for clean marker. Refiling\n"); | ||
| 394 | goto refile; | ||
| 395 | } | ||
| 396 | 392 | ||
| 397 | marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4)); | 393 | marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4)); |
| 398 | 394 | ||
| @@ -408,21 +404,13 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb | |||
| 408 | printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n", | 404 | printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n", |
| 409 | jeb->offset, sizeof(marker), retlen); | 405 | jeb->offset, sizeof(marker), retlen); |
| 410 | 406 | ||
| 411 | jffs2_free_raw_node_ref(marker_ref); | ||
| 412 | goto filebad; | 407 | goto filebad; |
| 413 | } | 408 | } |
| 414 | 409 | ||
| 415 | marker_ref->next_in_ino = NULL; | 410 | /* Everything else got zeroed before the erase */ |
| 416 | marker_ref->next_phys = NULL; | 411 | jeb->free_size = c->sector_size; |
| 417 | marker_ref->flash_offset = jeb->offset | REF_NORMAL; | 412 | /* FIXME Special case for cleanmarker in empty block */ |
| 418 | marker_ref->__totlen = c->cleanmarker_size; | 413 | jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c->cleanmarker_size, NULL); |
| 419 | |||
| 420 | jeb->first_node = jeb->last_node = marker_ref; | ||
| 421 | |||
| 422 | jeb->free_size = c->sector_size - c->cleanmarker_size; | ||
| 423 | jeb->used_size = c->cleanmarker_size; | ||
| 424 | jeb->dirty_size = 0; | ||
| 425 | jeb->wasted_size = 0; | ||
| 426 | } | 414 | } |
| 427 | 415 | ||
| 428 | spin_lock(&c->erase_completion_lock); | 416 | spin_lock(&c->erase_completion_lock); |
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index 9f4171213e58..bb8844f40e48 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c | |||
| @@ -54,7 +54,12 @@ const struct file_operations jffs2_file_operations = | |||
| 54 | 54 | ||
| 55 | struct inode_operations jffs2_file_inode_operations = | 55 | struct inode_operations jffs2_file_inode_operations = |
| 56 | { | 56 | { |
| 57 | .setattr = jffs2_setattr | 57 | .permission = jffs2_permission, |
| 58 | .setattr = jffs2_setattr, | ||
| 59 | .setxattr = jffs2_setxattr, | ||
| 60 | .getxattr = jffs2_getxattr, | ||
| 61 | .listxattr = jffs2_listxattr, | ||
| 62 | .removexattr = jffs2_removexattr | ||
| 58 | }; | 63 | }; |
| 59 | 64 | ||
| 60 | struct address_space_operations jffs2_file_address_operations = | 65 | struct address_space_operations jffs2_file_address_operations = |
| @@ -129,13 +134,13 @@ static int jffs2_prepare_write (struct file *filp, struct page *pg, | |||
| 129 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); | 134 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); |
| 130 | struct jffs2_raw_inode ri; | 135 | struct jffs2_raw_inode ri; |
| 131 | struct jffs2_full_dnode *fn; | 136 | struct jffs2_full_dnode *fn; |
| 132 | uint32_t phys_ofs, alloc_len; | 137 | uint32_t alloc_len; |
| 133 | 138 | ||
| 134 | D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", | 139 | D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", |
| 135 | (unsigned int)inode->i_size, pageofs)); | 140 | (unsigned int)inode->i_size, pageofs)); |
| 136 | 141 | ||
| 137 | ret = jffs2_reserve_space(c, sizeof(ri), &phys_ofs, &alloc_len, | 142 | ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, |
| 138 | ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); | 143 | ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); |
| 139 | if (ret) | 144 | if (ret) |
| 140 | return ret; | 145 | return ret; |
| 141 | 146 | ||
| @@ -161,7 +166,7 @@ static int jffs2_prepare_write (struct file *filp, struct page *pg, | |||
| 161 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); | 166 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); |
| 162 | ri.data_crc = cpu_to_je32(0); | 167 | ri.data_crc = cpu_to_je32(0); |
| 163 | 168 | ||
| 164 | fn = jffs2_write_dnode(c, f, &ri, NULL, 0, phys_ofs, ALLOC_NORMAL); | 169 | fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_NORMAL); |
| 165 | 170 | ||
| 166 | if (IS_ERR(fn)) { | 171 | if (IS_ERR(fn)) { |
| 167 | ret = PTR_ERR(fn); | 172 | ret = PTR_ERR(fn); |
| @@ -215,12 +220,20 @@ static int jffs2_commit_write (struct file *filp, struct page *pg, | |||
| 215 | D1(printk(KERN_DEBUG "jffs2_commit_write(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", | 220 | D1(printk(KERN_DEBUG "jffs2_commit_write(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", |
| 216 | inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags)); | 221 | inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags)); |
| 217 | 222 | ||
| 218 | if (!start && end == PAGE_CACHE_SIZE) { | 223 | if (end == PAGE_CACHE_SIZE) { |
| 219 | /* We need to avoid deadlock with page_cache_read() in | 224 | if (!start) { |
| 220 | jffs2_garbage_collect_pass(). So we have to mark the | 225 | /* We need to avoid deadlock with page_cache_read() in |
| 221 | page up to date, to prevent page_cache_read() from | 226 | jffs2_garbage_collect_pass(). So we have to mark the |
| 222 | trying to re-lock it. */ | 227 | page up to date, to prevent page_cache_read() from |
| 223 | SetPageUptodate(pg); | 228 | trying to re-lock it. */ |
| 229 | SetPageUptodate(pg); | ||
| 230 | } else { | ||
| 231 | /* When writing out the end of a page, write out the | ||
| 232 | _whole_ page. This helps to reduce the number of | ||
| 233 | nodes in files which have many short writes, like | ||
| 234 | syslog files. */ | ||
| 235 | start = aligned_start = 0; | ||
| 236 | } | ||
| 224 | } | 237 | } |
| 225 | 238 | ||
| 226 | ri = jffs2_alloc_raw_inode(); | 239 | ri = jffs2_alloc_raw_inode(); |
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index 09e5d10b8840..7b6c24b14f85 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c | |||
| @@ -33,11 +33,11 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
| 33 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); | 33 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); |
| 34 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); | 34 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); |
| 35 | struct jffs2_raw_inode *ri; | 35 | struct jffs2_raw_inode *ri; |
| 36 | unsigned short dev; | 36 | union jffs2_device_node dev; |
| 37 | unsigned char *mdata = NULL; | 37 | unsigned char *mdata = NULL; |
| 38 | int mdatalen = 0; | 38 | int mdatalen = 0; |
| 39 | unsigned int ivalid; | 39 | unsigned int ivalid; |
| 40 | uint32_t phys_ofs, alloclen; | 40 | uint32_t alloclen; |
| 41 | int ret; | 41 | int ret; |
| 42 | D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino)); | 42 | D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino)); |
| 43 | ret = inode_change_ok(inode, iattr); | 43 | ret = inode_change_ok(inode, iattr); |
| @@ -51,20 +51,24 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
| 51 | it out again with the appropriate data attached */ | 51 | it out again with the appropriate data attached */ |
| 52 | if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) { | 52 | if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) { |
| 53 | /* For these, we don't actually need to read the old node */ | 53 | /* For these, we don't actually need to read the old node */ |
| 54 | dev = old_encode_dev(inode->i_rdev); | 54 | mdatalen = jffs2_encode_dev(&dev, inode->i_rdev); |
| 55 | mdata = (char *)&dev; | 55 | mdata = (char *)&dev; |
| 56 | mdatalen = sizeof(dev); | ||
| 57 | D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen)); | 56 | D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen)); |
| 58 | } else if (S_ISLNK(inode->i_mode)) { | 57 | } else if (S_ISLNK(inode->i_mode)) { |
| 58 | down(&f->sem); | ||
| 59 | mdatalen = f->metadata->size; | 59 | mdatalen = f->metadata->size; |
| 60 | mdata = kmalloc(f->metadata->size, GFP_USER); | 60 | mdata = kmalloc(f->metadata->size, GFP_USER); |
| 61 | if (!mdata) | 61 | if (!mdata) { |
| 62 | up(&f->sem); | ||
| 62 | return -ENOMEM; | 63 | return -ENOMEM; |
| 64 | } | ||
| 63 | ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen); | 65 | ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen); |
| 64 | if (ret) { | 66 | if (ret) { |
| 67 | up(&f->sem); | ||
| 65 | kfree(mdata); | 68 | kfree(mdata); |
| 66 | return ret; | 69 | return ret; |
| 67 | } | 70 | } |
| 71 | up(&f->sem); | ||
| 68 | D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen)); | 72 | D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen)); |
| 69 | } | 73 | } |
| 70 | 74 | ||
| @@ -75,8 +79,8 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
| 75 | return -ENOMEM; | 79 | return -ENOMEM; |
| 76 | } | 80 | } |
| 77 | 81 | ||
| 78 | ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &phys_ofs, &alloclen, | 82 | ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen, |
| 79 | ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); | 83 | ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); |
| 80 | if (ret) { | 84 | if (ret) { |
| 81 | jffs2_free_raw_inode(ri); | 85 | jffs2_free_raw_inode(ri); |
| 82 | if (S_ISLNK(inode->i_mode & S_IFMT)) | 86 | if (S_ISLNK(inode->i_mode & S_IFMT)) |
| @@ -127,7 +131,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
| 127 | else | 131 | else |
| 128 | ri->data_crc = cpu_to_je32(0); | 132 | ri->data_crc = cpu_to_je32(0); |
| 129 | 133 | ||
| 130 | new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, phys_ofs, ALLOC_NORMAL); | 134 | new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, ALLOC_NORMAL); |
| 131 | if (S_ISLNK(inode->i_mode)) | 135 | if (S_ISLNK(inode->i_mode)) |
| 132 | kfree(mdata); | 136 | kfree(mdata); |
| 133 | 137 | ||
| @@ -180,7 +184,12 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
| 180 | 184 | ||
| 181 | int jffs2_setattr(struct dentry *dentry, struct iattr *iattr) | 185 | int jffs2_setattr(struct dentry *dentry, struct iattr *iattr) |
| 182 | { | 186 | { |
| 183 | return jffs2_do_setattr(dentry->d_inode, iattr); | 187 | int rc; |
| 188 | |||
| 189 | rc = jffs2_do_setattr(dentry->d_inode, iattr); | ||
| 190 | if (!rc && (iattr->ia_valid & ATTR_MODE)) | ||
| 191 | rc = jffs2_acl_chmod(dentry->d_inode); | ||
| 192 | return rc; | ||
| 184 | } | 193 | } |
| 185 | 194 | ||
| 186 | int jffs2_statfs(struct super_block *sb, struct kstatfs *buf) | 195 | int jffs2_statfs(struct super_block *sb, struct kstatfs *buf) |
| @@ -219,6 +228,7 @@ void jffs2_clear_inode (struct inode *inode) | |||
| 219 | 228 | ||
| 220 | D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode)); | 229 | D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode)); |
| 221 | 230 | ||
| 231 | jffs2_xattr_delete_inode(c, f->inocache); | ||
| 222 | jffs2_do_clear_inode(c, f); | 232 | jffs2_do_clear_inode(c, f); |
| 223 | } | 233 | } |
| 224 | 234 | ||
| @@ -227,6 +237,8 @@ void jffs2_read_inode (struct inode *inode) | |||
| 227 | struct jffs2_inode_info *f; | 237 | struct jffs2_inode_info *f; |
| 228 | struct jffs2_sb_info *c; | 238 | struct jffs2_sb_info *c; |
| 229 | struct jffs2_raw_inode latest_node; | 239 | struct jffs2_raw_inode latest_node; |
| 240 | union jffs2_device_node jdev; | ||
| 241 | dev_t rdev = 0; | ||
| 230 | int ret; | 242 | int ret; |
| 231 | 243 | ||
| 232 | D1(printk(KERN_DEBUG "jffs2_read_inode(): inode->i_ino == %lu\n", inode->i_ino)); | 244 | D1(printk(KERN_DEBUG "jffs2_read_inode(): inode->i_ino == %lu\n", inode->i_ino)); |
| @@ -258,7 +270,6 @@ void jffs2_read_inode (struct inode *inode) | |||
| 258 | inode->i_blocks = (inode->i_size + 511) >> 9; | 270 | inode->i_blocks = (inode->i_size + 511) >> 9; |
| 259 | 271 | ||
| 260 | switch (inode->i_mode & S_IFMT) { | 272 | switch (inode->i_mode & S_IFMT) { |
| 261 | jint16_t rdev; | ||
| 262 | 273 | ||
| 263 | case S_IFLNK: | 274 | case S_IFLNK: |
| 264 | inode->i_op = &jffs2_symlink_inode_operations; | 275 | inode->i_op = &jffs2_symlink_inode_operations; |
| @@ -292,8 +303,16 @@ void jffs2_read_inode (struct inode *inode) | |||
| 292 | case S_IFBLK: | 303 | case S_IFBLK: |
| 293 | case S_IFCHR: | 304 | case S_IFCHR: |
| 294 | /* Read the device numbers from the media */ | 305 | /* Read the device numbers from the media */ |
| 306 | if (f->metadata->size != sizeof(jdev.old) && | ||
| 307 | f->metadata->size != sizeof(jdev.new)) { | ||
| 308 | printk(KERN_NOTICE "Device node has strange size %d\n", f->metadata->size); | ||
| 309 | up(&f->sem); | ||
| 310 | jffs2_do_clear_inode(c, f); | ||
| 311 | make_bad_inode(inode); | ||
| 312 | return; | ||
| 313 | } | ||
| 295 | D1(printk(KERN_DEBUG "Reading device numbers from flash\n")); | 314 | D1(printk(KERN_DEBUG "Reading device numbers from flash\n")); |
| 296 | if (jffs2_read_dnode(c, f, f->metadata, (char *)&rdev, 0, sizeof(rdev)) < 0) { | 315 | if (jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size) < 0) { |
| 297 | /* Eep */ | 316 | /* Eep */ |
| 298 | printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino); | 317 | printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino); |
| 299 | up(&f->sem); | 318 | up(&f->sem); |
| @@ -301,12 +320,15 @@ void jffs2_read_inode (struct inode *inode) | |||
| 301 | make_bad_inode(inode); | 320 | make_bad_inode(inode); |
| 302 | return; | 321 | return; |
| 303 | } | 322 | } |
| 323 | if (f->metadata->size == sizeof(jdev.old)) | ||
| 324 | rdev = old_decode_dev(je16_to_cpu(jdev.old)); | ||
| 325 | else | ||
| 326 | rdev = new_decode_dev(je32_to_cpu(jdev.new)); | ||
| 304 | 327 | ||
| 305 | case S_IFSOCK: | 328 | case S_IFSOCK: |
| 306 | case S_IFIFO: | 329 | case S_IFIFO: |
| 307 | inode->i_op = &jffs2_file_inode_operations; | 330 | inode->i_op = &jffs2_file_inode_operations; |
| 308 | init_special_inode(inode, inode->i_mode, | 331 | init_special_inode(inode, inode->i_mode, rdev); |
| 309 | old_decode_dev((je16_to_cpu(rdev)))); | ||
| 310 | break; | 332 | break; |
| 311 | 333 | ||
| 312 | default: | 334 | default: |
| @@ -492,6 +514,8 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) | |||
| 492 | } | 514 | } |
| 493 | memset(c->inocache_list, 0, INOCACHE_HASHSIZE * sizeof(struct jffs2_inode_cache *)); | 515 | memset(c->inocache_list, 0, INOCACHE_HASHSIZE * sizeof(struct jffs2_inode_cache *)); |
| 494 | 516 | ||
| 517 | jffs2_init_xattr_subsystem(c); | ||
| 518 | |||
| 495 | if ((ret = jffs2_do_mount_fs(c))) | 519 | if ((ret = jffs2_do_mount_fs(c))) |
| 496 | goto out_inohash; | 520 | goto out_inohash; |
| 497 | 521 | ||
| @@ -526,6 +550,7 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) | |||
| 526 | else | 550 | else |
| 527 | kfree(c->blocks); | 551 | kfree(c->blocks); |
| 528 | out_inohash: | 552 | out_inohash: |
| 553 | jffs2_clear_xattr_subsystem(c); | ||
| 529 | kfree(c->inocache_list); | 554 | kfree(c->inocache_list); |
| 530 | out_wbuf: | 555 | out_wbuf: |
| 531 | jffs2_flash_cleanup(c); | 556 | jffs2_flash_cleanup(c); |
| @@ -639,13 +664,6 @@ static int jffs2_flash_setup(struct jffs2_sb_info *c) { | |||
| 639 | return ret; | 664 | return ret; |
| 640 | } | 665 | } |
| 641 | 666 | ||
| 642 | /* add setups for other bizarre flashes here... */ | ||
| 643 | if (jffs2_nor_ecc(c)) { | ||
| 644 | ret = jffs2_nor_ecc_flash_setup(c); | ||
| 645 | if (ret) | ||
| 646 | return ret; | ||
| 647 | } | ||
| 648 | |||
| 649 | /* and Dataflash */ | 667 | /* and Dataflash */ |
| 650 | if (jffs2_dataflash(c)) { | 668 | if (jffs2_dataflash(c)) { |
| 651 | ret = jffs2_dataflash_setup(c); | 669 | ret = jffs2_dataflash_setup(c); |
| @@ -669,11 +687,6 @@ void jffs2_flash_cleanup(struct jffs2_sb_info *c) { | |||
| 669 | jffs2_nand_flash_cleanup(c); | 687 | jffs2_nand_flash_cleanup(c); |
| 670 | } | 688 | } |
| 671 | 689 | ||
| 672 | /* add cleanups for other bizarre flashes here... */ | ||
| 673 | if (jffs2_nor_ecc(c)) { | ||
| 674 | jffs2_nor_ecc_flash_cleanup(c); | ||
| 675 | } | ||
| 676 | |||
| 677 | /* and DataFlash */ | 690 | /* and DataFlash */ |
| 678 | if (jffs2_dataflash(c)) { | 691 | if (jffs2_dataflash(c)) { |
| 679 | jffs2_dataflash_cleanup(c); | 692 | jffs2_dataflash_cleanup(c); |
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c index f9ffece453a3..477c526d638b 100644 --- a/fs/jffs2/gc.c +++ b/fs/jffs2/gc.c | |||
| @@ -125,6 +125,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
| 125 | struct jffs2_eraseblock *jeb; | 125 | struct jffs2_eraseblock *jeb; |
| 126 | struct jffs2_raw_node_ref *raw; | 126 | struct jffs2_raw_node_ref *raw; |
| 127 | int ret = 0, inum, nlink; | 127 | int ret = 0, inum, nlink; |
| 128 | int xattr = 0; | ||
| 128 | 129 | ||
| 129 | if (down_interruptible(&c->alloc_sem)) | 130 | if (down_interruptible(&c->alloc_sem)) |
| 130 | return -EINTR; | 131 | return -EINTR; |
| @@ -138,7 +139,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
| 138 | the node CRCs etc. Do it now. */ | 139 | the node CRCs etc. Do it now. */ |
| 139 | 140 | ||
| 140 | /* checked_ino is protected by the alloc_sem */ | 141 | /* checked_ino is protected by the alloc_sem */ |
| 141 | if (c->checked_ino > c->highest_ino) { | 142 | if (c->checked_ino > c->highest_ino && xattr) { |
| 142 | printk(KERN_CRIT "Checked all inodes but still 0x%x bytes of unchecked space?\n", | 143 | printk(KERN_CRIT "Checked all inodes but still 0x%x bytes of unchecked space?\n", |
| 143 | c->unchecked_size); | 144 | c->unchecked_size); |
| 144 | jffs2_dbg_dump_block_lists_nolock(c); | 145 | jffs2_dbg_dump_block_lists_nolock(c); |
| @@ -148,6 +149,9 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
| 148 | 149 | ||
| 149 | spin_unlock(&c->erase_completion_lock); | 150 | spin_unlock(&c->erase_completion_lock); |
| 150 | 151 | ||
| 152 | if (!xattr) | ||
| 153 | xattr = jffs2_verify_xattr(c); | ||
| 154 | |||
| 151 | spin_lock(&c->inocache_lock); | 155 | spin_lock(&c->inocache_lock); |
| 152 | 156 | ||
| 153 | ic = jffs2_get_ino_cache(c, c->checked_ino++); | 157 | ic = jffs2_get_ino_cache(c, c->checked_ino++); |
| @@ -181,6 +185,10 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
| 181 | and trigger the BUG() above while we haven't yet | 185 | and trigger the BUG() above while we haven't yet |
| 182 | finished checking all its nodes */ | 186 | finished checking all its nodes */ |
| 183 | D1(printk(KERN_DEBUG "Waiting for ino #%u to finish reading\n", ic->ino)); | 187 | D1(printk(KERN_DEBUG "Waiting for ino #%u to finish reading\n", ic->ino)); |
| 188 | /* We need to come back again for the _same_ inode. We've | ||
| 189 | made no progress in this case, but that should be OK */ | ||
| 190 | c->checked_ino--; | ||
| 191 | |||
| 184 | up(&c->alloc_sem); | 192 | up(&c->alloc_sem); |
| 185 | sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); | 193 | sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); |
| 186 | return 0; | 194 | return 0; |
| @@ -231,7 +239,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
| 231 | 239 | ||
| 232 | while(ref_obsolete(raw)) { | 240 | while(ref_obsolete(raw)) { |
| 233 | D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw))); | 241 | D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw))); |
| 234 | raw = raw->next_phys; | 242 | raw = ref_next(raw); |
| 235 | if (unlikely(!raw)) { | 243 | if (unlikely(!raw)) { |
| 236 | printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n"); | 244 | printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n"); |
| 237 | printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n", | 245 | printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n", |
| @@ -248,16 +256,37 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
| 248 | 256 | ||
| 249 | if (!raw->next_in_ino) { | 257 | if (!raw->next_in_ino) { |
| 250 | /* Inode-less node. Clean marker, snapshot or something like that */ | 258 | /* Inode-less node. Clean marker, snapshot or something like that */ |
| 251 | /* FIXME: If it's something that needs to be copied, including something | ||
| 252 | we don't grok that has JFFS2_NODETYPE_RWCOMPAT_COPY, we should do so */ | ||
| 253 | spin_unlock(&c->erase_completion_lock); | 259 | spin_unlock(&c->erase_completion_lock); |
| 254 | jffs2_mark_node_obsolete(c, raw); | 260 | if (ref_flags(raw) == REF_PRISTINE) { |
| 261 | /* It's an unknown node with JFFS2_FEATURE_RWCOMPAT_COPY */ | ||
| 262 | jffs2_garbage_collect_pristine(c, NULL, raw); | ||
| 263 | } else { | ||
| 264 | /* Just mark it obsolete */ | ||
| 265 | jffs2_mark_node_obsolete(c, raw); | ||
| 266 | } | ||
| 255 | up(&c->alloc_sem); | 267 | up(&c->alloc_sem); |
| 256 | goto eraseit_lock; | 268 | goto eraseit_lock; |
| 257 | } | 269 | } |
| 258 | 270 | ||
| 259 | ic = jffs2_raw_ref_to_ic(raw); | 271 | ic = jffs2_raw_ref_to_ic(raw); |
| 260 | 272 | ||
| 273 | #ifdef CONFIG_JFFS2_FS_XATTR | ||
| 274 | /* When 'ic' refers xattr_datum/xattr_ref, this node is GCed as xattr. | ||
| 275 | * We can decide whether this node is inode or xattr by ic->class. */ | ||
| 276 | if (ic->class == RAWNODE_CLASS_XATTR_DATUM | ||
| 277 | || ic->class == RAWNODE_CLASS_XATTR_REF) { | ||
| 278 | BUG_ON(raw->next_in_ino != (void *)ic); | ||
| 279 | spin_unlock(&c->erase_completion_lock); | ||
| 280 | |||
| 281 | if (ic->class == RAWNODE_CLASS_XATTR_DATUM) { | ||
| 282 | ret = jffs2_garbage_collect_xattr_datum(c, (struct jffs2_xattr_datum *)ic); | ||
| 283 | } else { | ||
| 284 | ret = jffs2_garbage_collect_xattr_ref(c, (struct jffs2_xattr_ref *)ic); | ||
| 285 | } | ||
| 286 | goto release_sem; | ||
| 287 | } | ||
| 288 | #endif | ||
| 289 | |||
| 261 | /* We need to hold the inocache. Either the erase_completion_lock or | 290 | /* We need to hold the inocache. Either the erase_completion_lock or |
| 262 | the inocache_lock are sufficient; we trade down since the inocache_lock | 291 | the inocache_lock are sufficient; we trade down since the inocache_lock |
| 263 | causes less contention. */ | 292 | causes less contention. */ |
| @@ -499,7 +528,6 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
| 499 | struct jffs2_raw_node_ref *raw) | 528 | struct jffs2_raw_node_ref *raw) |
| 500 | { | 529 | { |
| 501 | union jffs2_node_union *node; | 530 | union jffs2_node_union *node; |
| 502 | struct jffs2_raw_node_ref *nraw; | ||
| 503 | size_t retlen; | 531 | size_t retlen; |
| 504 | int ret; | 532 | int ret; |
| 505 | uint32_t phys_ofs, alloclen; | 533 | uint32_t phys_ofs, alloclen; |
| @@ -508,15 +536,16 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
| 508 | 536 | ||
| 509 | D1(printk(KERN_DEBUG "Going to GC REF_PRISTINE node at 0x%08x\n", ref_offset(raw))); | 537 | D1(printk(KERN_DEBUG "Going to GC REF_PRISTINE node at 0x%08x\n", ref_offset(raw))); |
| 510 | 538 | ||
| 511 | rawlen = ref_totlen(c, c->gcblock, raw); | 539 | alloclen = rawlen = ref_totlen(c, c->gcblock, raw); |
| 512 | 540 | ||
| 513 | /* Ask for a small amount of space (or the totlen if smaller) because we | 541 | /* Ask for a small amount of space (or the totlen if smaller) because we |
| 514 | don't want to force wastage of the end of a block if splitting would | 542 | don't want to force wastage of the end of a block if splitting would |
| 515 | work. */ | 543 | work. */ |
| 516 | ret = jffs2_reserve_space_gc(c, min_t(uint32_t, sizeof(struct jffs2_raw_inode) + | 544 | if (ic && alloclen > sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN) |
| 517 | JFFS2_MIN_DATA_LEN, rawlen), &phys_ofs, &alloclen, rawlen); | 545 | alloclen = sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN; |
| 518 | /* this is not the exact summary size of it, | 546 | |
| 519 | it is only an upper estimation */ | 547 | ret = jffs2_reserve_space_gc(c, alloclen, &alloclen, rawlen); |
| 548 | /* 'rawlen' is not the exact summary size; it is only an upper estimation */ | ||
| 520 | 549 | ||
| 521 | if (ret) | 550 | if (ret) |
| 522 | return ret; | 551 | return ret; |
| @@ -580,22 +609,17 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
| 580 | } | 609 | } |
| 581 | break; | 610 | break; |
| 582 | default: | 611 | default: |
| 583 | printk(KERN_WARNING "Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n", | 612 | /* If it's inode-less, we don't _know_ what it is. Just copy it intact */ |
| 584 | ref_offset(raw), je16_to_cpu(node->u.nodetype)); | 613 | if (ic) { |
| 585 | goto bail; | 614 | printk(KERN_WARNING "Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n", |
| 586 | } | 615 | ref_offset(raw), je16_to_cpu(node->u.nodetype)); |
| 587 | 616 | goto bail; | |
| 588 | nraw = jffs2_alloc_raw_node_ref(); | 617 | } |
| 589 | if (!nraw) { | ||
| 590 | ret = -ENOMEM; | ||
| 591 | goto out_node; | ||
| 592 | } | 618 | } |
| 593 | 619 | ||
| 594 | /* OK, all the CRCs are good; this node can just be copied as-is. */ | 620 | /* OK, all the CRCs are good; this node can just be copied as-is. */ |
| 595 | retry: | 621 | retry: |
| 596 | nraw->flash_offset = phys_ofs; | 622 | phys_ofs = write_ofs(c); |
| 597 | nraw->__totlen = rawlen; | ||
| 598 | nraw->next_phys = NULL; | ||
| 599 | 623 | ||
| 600 | ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node); | 624 | ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node); |
| 601 | 625 | ||
| @@ -603,17 +627,11 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
| 603 | printk(KERN_NOTICE "Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n", | 627 | printk(KERN_NOTICE "Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n", |
| 604 | rawlen, phys_ofs, ret, retlen); | 628 | rawlen, phys_ofs, ret, retlen); |
| 605 | if (retlen) { | 629 | if (retlen) { |
| 606 | /* Doesn't belong to any inode */ | 630 | jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, rawlen, NULL); |
| 607 | nraw->next_in_ino = NULL; | ||
| 608 | |||
| 609 | nraw->flash_offset |= REF_OBSOLETE; | ||
| 610 | jffs2_add_physical_node_ref(c, nraw); | ||
| 611 | jffs2_mark_node_obsolete(c, nraw); | ||
| 612 | } else { | 631 | } else { |
| 613 | printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", nraw->flash_offset); | 632 | printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", phys_ofs); |
| 614 | jffs2_free_raw_node_ref(nraw); | ||
| 615 | } | 633 | } |
| 616 | if (!retried && (nraw = jffs2_alloc_raw_node_ref())) { | 634 | if (!retried) { |
| 617 | /* Try to reallocate space and retry */ | 635 | /* Try to reallocate space and retry */ |
| 618 | uint32_t dummy; | 636 | uint32_t dummy; |
| 619 | struct jffs2_eraseblock *jeb = &c->blocks[phys_ofs / c->sector_size]; | 637 | struct jffs2_eraseblock *jeb = &c->blocks[phys_ofs / c->sector_size]; |
| @@ -625,7 +643,7 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
| 625 | jffs2_dbg_acct_sanity_check(c,jeb); | 643 | jffs2_dbg_acct_sanity_check(c,jeb); |
| 626 | jffs2_dbg_acct_paranoia_check(c, jeb); | 644 | jffs2_dbg_acct_paranoia_check(c, jeb); |
| 627 | 645 | ||
| 628 | ret = jffs2_reserve_space_gc(c, rawlen, &phys_ofs, &dummy, rawlen); | 646 | ret = jffs2_reserve_space_gc(c, rawlen, &dummy, rawlen); |
| 629 | /* this is not the exact summary size of it, | 647 | /* this is not the exact summary size of it, |
| 630 | it is only an upper estimation */ | 648 | it is only an upper estimation */ |
| 631 | 649 | ||
| @@ -638,25 +656,13 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
| 638 | goto retry; | 656 | goto retry; |
| 639 | } | 657 | } |
| 640 | D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); | 658 | D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); |
| 641 | jffs2_free_raw_node_ref(nraw); | ||
| 642 | } | 659 | } |
| 643 | 660 | ||
| 644 | jffs2_free_raw_node_ref(nraw); | ||
| 645 | if (!ret) | 661 | if (!ret) |
| 646 | ret = -EIO; | 662 | ret = -EIO; |
| 647 | goto out_node; | 663 | goto out_node; |
| 648 | } | 664 | } |
| 649 | nraw->flash_offset |= REF_PRISTINE; | 665 | jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, rawlen, ic); |
| 650 | jffs2_add_physical_node_ref(c, nraw); | ||
| 651 | |||
| 652 | /* Link into per-inode list. This is safe because of the ic | ||
| 653 | state being INO_STATE_GC. Note that if we're doing this | ||
| 654 | for an inode which is in-core, the 'nraw' pointer is then | ||
| 655 | going to be fetched from ic->nodes by our caller. */ | ||
| 656 | spin_lock(&c->erase_completion_lock); | ||
| 657 | nraw->next_in_ino = ic->nodes; | ||
| 658 | ic->nodes = nraw; | ||
| 659 | spin_unlock(&c->erase_completion_lock); | ||
| 660 | 666 | ||
| 661 | jffs2_mark_node_obsolete(c, raw); | 667 | jffs2_mark_node_obsolete(c, raw); |
| 662 | D1(printk(KERN_DEBUG "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n", ref_offset(raw))); | 668 | D1(printk(KERN_DEBUG "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n", ref_offset(raw))); |
| @@ -675,19 +681,16 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_ | |||
| 675 | struct jffs2_full_dnode *new_fn; | 681 | struct jffs2_full_dnode *new_fn; |
| 676 | struct jffs2_raw_inode ri; | 682 | struct jffs2_raw_inode ri; |
| 677 | struct jffs2_node_frag *last_frag; | 683 | struct jffs2_node_frag *last_frag; |
| 678 | jint16_t dev; | 684 | union jffs2_device_node dev; |
| 679 | char *mdata = NULL, mdatalen = 0; | 685 | char *mdata = NULL, mdatalen = 0; |
| 680 | uint32_t alloclen, phys_ofs, ilen; | 686 | uint32_t alloclen, ilen; |
| 681 | int ret; | 687 | int ret; |
| 682 | 688 | ||
| 683 | if (S_ISBLK(JFFS2_F_I_MODE(f)) || | 689 | if (S_ISBLK(JFFS2_F_I_MODE(f)) || |
| 684 | S_ISCHR(JFFS2_F_I_MODE(f)) ) { | 690 | S_ISCHR(JFFS2_F_I_MODE(f)) ) { |
| 685 | /* For these, we don't actually need to read the old node */ | 691 | /* For these, we don't actually need to read the old node */ |
| 686 | /* FIXME: for minor or major > 255. */ | 692 | mdatalen = jffs2_encode_dev(&dev, JFFS2_F_I_RDEV(f)); |
| 687 | dev = cpu_to_je16(((JFFS2_F_I_RDEV_MAJ(f) << 8) | | ||
| 688 | JFFS2_F_I_RDEV_MIN(f))); | ||
| 689 | mdata = (char *)&dev; | 693 | mdata = (char *)&dev; |
| 690 | mdatalen = sizeof(dev); | ||
| 691 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bytes of kdev_t\n", mdatalen)); | 694 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bytes of kdev_t\n", mdatalen)); |
| 692 | } else if (S_ISLNK(JFFS2_F_I_MODE(f))) { | 695 | } else if (S_ISLNK(JFFS2_F_I_MODE(f))) { |
| 693 | mdatalen = fn->size; | 696 | mdatalen = fn->size; |
| @@ -706,7 +709,7 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_ | |||
| 706 | 709 | ||
| 707 | } | 710 | } |
| 708 | 711 | ||
| 709 | ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &phys_ofs, &alloclen, | 712 | ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &alloclen, |
| 710 | JFFS2_SUMMARY_INODE_SIZE); | 713 | JFFS2_SUMMARY_INODE_SIZE); |
| 711 | if (ret) { | 714 | if (ret) { |
| 712 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n", | 715 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n", |
| @@ -744,7 +747,7 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_ | |||
| 744 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); | 747 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); |
| 745 | ri.data_crc = cpu_to_je32(crc32(0, mdata, mdatalen)); | 748 | ri.data_crc = cpu_to_je32(crc32(0, mdata, mdatalen)); |
| 746 | 749 | ||
| 747 | new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, phys_ofs, ALLOC_GC); | 750 | new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, ALLOC_GC); |
| 748 | 751 | ||
| 749 | if (IS_ERR(new_fn)) { | 752 | if (IS_ERR(new_fn)) { |
| 750 | printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn)); | 753 | printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn)); |
| @@ -765,7 +768,7 @@ static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_er | |||
| 765 | { | 768 | { |
| 766 | struct jffs2_full_dirent *new_fd; | 769 | struct jffs2_full_dirent *new_fd; |
| 767 | struct jffs2_raw_dirent rd; | 770 | struct jffs2_raw_dirent rd; |
| 768 | uint32_t alloclen, phys_ofs; | 771 | uint32_t alloclen; |
| 769 | int ret; | 772 | int ret; |
| 770 | 773 | ||
| 771 | rd.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | 774 | rd.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); |
| @@ -787,14 +790,14 @@ static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_er | |||
| 787 | rd.node_crc = cpu_to_je32(crc32(0, &rd, sizeof(rd)-8)); | 790 | rd.node_crc = cpu_to_je32(crc32(0, &rd, sizeof(rd)-8)); |
| 788 | rd.name_crc = cpu_to_je32(crc32(0, fd->name, rd.nsize)); | 791 | rd.name_crc = cpu_to_je32(crc32(0, fd->name, rd.nsize)); |
| 789 | 792 | ||
| 790 | ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &phys_ofs, &alloclen, | 793 | ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &alloclen, |
| 791 | JFFS2_SUMMARY_DIRENT_SIZE(rd.nsize)); | 794 | JFFS2_SUMMARY_DIRENT_SIZE(rd.nsize)); |
| 792 | if (ret) { | 795 | if (ret) { |
| 793 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n", | 796 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n", |
| 794 | sizeof(rd)+rd.nsize, ret); | 797 | sizeof(rd)+rd.nsize, ret); |
| 795 | return ret; | 798 | return ret; |
| 796 | } | 799 | } |
| 797 | new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, phys_ofs, ALLOC_GC); | 800 | new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, ALLOC_GC); |
| 798 | 801 | ||
| 799 | if (IS_ERR(new_fd)) { | 802 | if (IS_ERR(new_fd)) { |
| 800 | printk(KERN_WARNING "jffs2_write_dirent in garbage_collect_dirent failed: %ld\n", PTR_ERR(new_fd)); | 803 | printk(KERN_WARNING "jffs2_write_dirent in garbage_collect_dirent failed: %ld\n", PTR_ERR(new_fd)); |
| @@ -922,7 +925,7 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras | |||
| 922 | struct jffs2_raw_inode ri; | 925 | struct jffs2_raw_inode ri; |
| 923 | struct jffs2_node_frag *frag; | 926 | struct jffs2_node_frag *frag; |
| 924 | struct jffs2_full_dnode *new_fn; | 927 | struct jffs2_full_dnode *new_fn; |
| 925 | uint32_t alloclen, phys_ofs, ilen; | 928 | uint32_t alloclen, ilen; |
| 926 | int ret; | 929 | int ret; |
| 927 | 930 | ||
| 928 | D1(printk(KERN_DEBUG "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n", | 931 | D1(printk(KERN_DEBUG "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n", |
| @@ -1001,14 +1004,14 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras | |||
| 1001 | ri.data_crc = cpu_to_je32(0); | 1004 | ri.data_crc = cpu_to_je32(0); |
| 1002 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); | 1005 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); |
| 1003 | 1006 | ||
| 1004 | ret = jffs2_reserve_space_gc(c, sizeof(ri), &phys_ofs, &alloclen, | 1007 | ret = jffs2_reserve_space_gc(c, sizeof(ri), &alloclen, |
| 1005 | JFFS2_SUMMARY_INODE_SIZE); | 1008 | JFFS2_SUMMARY_INODE_SIZE); |
| 1006 | if (ret) { | 1009 | if (ret) { |
| 1007 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n", | 1010 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n", |
| 1008 | sizeof(ri), ret); | 1011 | sizeof(ri), ret); |
| 1009 | return ret; | 1012 | return ret; |
| 1010 | } | 1013 | } |
| 1011 | new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, phys_ofs, ALLOC_GC); | 1014 | new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_GC); |
| 1012 | 1015 | ||
| 1013 | if (IS_ERR(new_fn)) { | 1016 | if (IS_ERR(new_fn)) { |
| 1014 | printk(KERN_WARNING "Error writing new hole node: %ld\n", PTR_ERR(new_fn)); | 1017 | printk(KERN_WARNING "Error writing new hole node: %ld\n", PTR_ERR(new_fn)); |
| @@ -1070,7 +1073,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
| 1070 | { | 1073 | { |
| 1071 | struct jffs2_full_dnode *new_fn; | 1074 | struct jffs2_full_dnode *new_fn; |
| 1072 | struct jffs2_raw_inode ri; | 1075 | struct jffs2_raw_inode ri; |
| 1073 | uint32_t alloclen, phys_ofs, offset, orig_end, orig_start; | 1076 | uint32_t alloclen, offset, orig_end, orig_start; |
| 1074 | int ret = 0; | 1077 | int ret = 0; |
| 1075 | unsigned char *comprbuf = NULL, *writebuf; | 1078 | unsigned char *comprbuf = NULL, *writebuf; |
| 1076 | unsigned long pg; | 1079 | unsigned long pg; |
| @@ -1227,7 +1230,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
| 1227 | uint32_t cdatalen; | 1230 | uint32_t cdatalen; |
| 1228 | uint16_t comprtype = JFFS2_COMPR_NONE; | 1231 | uint16_t comprtype = JFFS2_COMPR_NONE; |
| 1229 | 1232 | ||
| 1230 | ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, | 1233 | ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN, |
| 1231 | &alloclen, JFFS2_SUMMARY_INODE_SIZE); | 1234 | &alloclen, JFFS2_SUMMARY_INODE_SIZE); |
| 1232 | 1235 | ||
| 1233 | if (ret) { | 1236 | if (ret) { |
| @@ -1264,7 +1267,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
| 1264 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); | 1267 | ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); |
| 1265 | ri.data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen)); | 1268 | ri.data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen)); |
| 1266 | 1269 | ||
| 1267 | new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, phys_ofs, ALLOC_GC); | 1270 | new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, ALLOC_GC); |
| 1268 | 1271 | ||
| 1269 | jffs2_free_comprbuf(comprbuf, writebuf); | 1272 | jffs2_free_comprbuf(comprbuf, writebuf); |
| 1270 | 1273 | ||
diff --git a/fs/jffs2/histo.h b/fs/jffs2/histo.h deleted file mode 100644 index 22a93a08210c..000000000000 --- a/fs/jffs2/histo.h +++ /dev/null | |||
| @@ -1,3 +0,0 @@ | |||
| 1 | /* This file provides the bit-probabilities for the input file */ | ||
| 2 | #define BIT_DIVIDER 629 | ||
| 3 | static int bits[9] = { 179,167,183,165,159,198,178,119,}; /* ia32 .so files */ | ||
diff --git a/fs/jffs2/jffs2_fs_i.h b/fs/jffs2/jffs2_fs_i.h new file mode 100644 index 000000000000..2e0cc8e00b85 --- /dev/null +++ b/fs/jffs2/jffs2_fs_i.h | |||
| @@ -0,0 +1,55 @@ | |||
| 1 | /* $Id: jffs2_fs_i.h,v 1.19 2005/11/07 11:14:52 gleixner Exp $ */ | ||
| 2 | |||
| 3 | #ifndef _JFFS2_FS_I | ||
| 4 | #define _JFFS2_FS_I | ||
| 5 | |||
| 6 | #include <linux/version.h> | ||
| 7 | #include <linux/rbtree.h> | ||
| 8 | #include <linux/posix_acl.h> | ||
| 9 | #include <asm/semaphore.h> | ||
| 10 | |||
| 11 | struct jffs2_inode_info { | ||
| 12 | /* We need an internal mutex similar to inode->i_mutex. | ||
| 13 | Unfortunately, we can't used the existing one, because | ||
| 14 | either the GC would deadlock, or we'd have to release it | ||
| 15 | before letting GC proceed. Or we'd have to put ugliness | ||
| 16 | into the GC code so it didn't attempt to obtain the i_mutex | ||
| 17 | for the inode(s) which are already locked */ | ||
| 18 | struct semaphore sem; | ||
| 19 | |||
| 20 | /* The highest (datanode) version number used for this ino */ | ||
| 21 | uint32_t highest_version; | ||
| 22 | |||
| 23 | /* List of data fragments which make up the file */ | ||
| 24 | struct rb_root fragtree; | ||
| 25 | |||
| 26 | /* There may be one datanode which isn't referenced by any of the | ||
| 27 | above fragments, if it contains a metadata update but no actual | ||
| 28 | data - or if this is a directory inode */ | ||
| 29 | /* This also holds the _only_ dnode for symlinks/device nodes, | ||
| 30 | etc. */ | ||
| 31 | struct jffs2_full_dnode *metadata; | ||
| 32 | |||
| 33 | /* Directory entries */ | ||
| 34 | struct jffs2_full_dirent *dents; | ||
| 35 | |||
| 36 | /* The target path if this is the inode of a symlink */ | ||
| 37 | unsigned char *target; | ||
| 38 | |||
| 39 | /* Some stuff we just have to keep in-core at all times, for each inode. */ | ||
| 40 | struct jffs2_inode_cache *inocache; | ||
| 41 | |||
| 42 | uint16_t flags; | ||
| 43 | uint8_t usercompr; | ||
| 44 | #if !defined (__ECOS) | ||
| 45 | #if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,2) | ||
| 46 | struct inode vfs_inode; | ||
| 47 | #endif | ||
| 48 | #endif | ||
| 49 | #ifdef CONFIG_JFFS2_FS_POSIX_ACL | ||
| 50 | struct posix_acl *i_acl_access; | ||
| 51 | struct posix_acl *i_acl_default; | ||
| 52 | #endif | ||
| 53 | }; | ||
| 54 | |||
| 55 | #endif /* _JFFS2_FS_I */ | ||
diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h new file mode 100644 index 000000000000..935fec1b1201 --- /dev/null +++ b/fs/jffs2/jffs2_fs_sb.h | |||
| @@ -0,0 +1,133 @@ | |||
| 1 | /* $Id: jffs2_fs_sb.h,v 1.54 2005/09/21 13:37:34 dedekind Exp $ */ | ||
| 2 | |||
| 3 | #ifndef _JFFS2_FS_SB | ||
| 4 | #define _JFFS2_FS_SB | ||
| 5 | |||
| 6 | #include <linux/types.h> | ||
| 7 | #include <linux/spinlock.h> | ||
| 8 | #include <linux/workqueue.h> | ||
| 9 | #include <linux/completion.h> | ||
| 10 | #include <asm/semaphore.h> | ||
| 11 | #include <linux/timer.h> | ||
| 12 | #include <linux/wait.h> | ||
| 13 | #include <linux/list.h> | ||
| 14 | #include <linux/rwsem.h> | ||
| 15 | |||
| 16 | #define JFFS2_SB_FLAG_RO 1 | ||
| 17 | #define JFFS2_SB_FLAG_SCANNING 2 /* Flash scanning is in progress */ | ||
| 18 | #define JFFS2_SB_FLAG_BUILDING 4 /* File system building is in progress */ | ||
| 19 | |||
| 20 | struct jffs2_inodirty; | ||
| 21 | |||
| 22 | /* A struct for the overall file system control. Pointers to | ||
| 23 | jffs2_sb_info structs are named `c' in the source code. | ||
| 24 | Nee jffs_control | ||
| 25 | */ | ||
| 26 | struct jffs2_sb_info { | ||
| 27 | struct mtd_info *mtd; | ||
| 28 | |||
| 29 | uint32_t highest_ino; | ||
| 30 | uint32_t checked_ino; | ||
| 31 | |||
| 32 | unsigned int flags; | ||
| 33 | |||
| 34 | struct task_struct *gc_task; /* GC task struct */ | ||
| 35 | struct completion gc_thread_start; /* GC thread start completion */ | ||
| 36 | struct completion gc_thread_exit; /* GC thread exit completion port */ | ||
| 37 | |||
| 38 | struct semaphore alloc_sem; /* Used to protect all the following | ||
| 39 | fields, and also to protect against | ||
| 40 | out-of-order writing of nodes. And GC. */ | ||
| 41 | uint32_t cleanmarker_size; /* Size of an _inline_ CLEANMARKER | ||
| 42 | (i.e. zero for OOB CLEANMARKER */ | ||
| 43 | |||
| 44 | uint32_t flash_size; | ||
| 45 | uint32_t used_size; | ||
| 46 | uint32_t dirty_size; | ||
| 47 | uint32_t wasted_size; | ||
| 48 | uint32_t free_size; | ||
| 49 | uint32_t erasing_size; | ||
| 50 | uint32_t bad_size; | ||
| 51 | uint32_t sector_size; | ||
| 52 | uint32_t unchecked_size; | ||
| 53 | |||
| 54 | uint32_t nr_free_blocks; | ||
| 55 | uint32_t nr_erasing_blocks; | ||
| 56 | |||
| 57 | /* Number of free blocks there must be before we... */ | ||
| 58 | uint8_t resv_blocks_write; /* ... allow a normal filesystem write */ | ||
| 59 | uint8_t resv_blocks_deletion; /* ... allow a normal filesystem deletion */ | ||
| 60 | uint8_t resv_blocks_gctrigger; /* ... wake up the GC thread */ | ||
| 61 | uint8_t resv_blocks_gcbad; /* ... pick a block from the bad_list to GC */ | ||
| 62 | uint8_t resv_blocks_gcmerge; /* ... merge pages when garbage collecting */ | ||
| 63 | |||
| 64 | uint32_t nospc_dirty_size; | ||
| 65 | |||
| 66 | uint32_t nr_blocks; | ||
| 67 | struct jffs2_eraseblock *blocks; /* The whole array of blocks. Used for getting blocks | ||
| 68 | * from the offset (blocks[ofs / sector_size]) */ | ||
| 69 | struct jffs2_eraseblock *nextblock; /* The block we're currently filling */ | ||
| 70 | |||
| 71 | struct jffs2_eraseblock *gcblock; /* The block we're currently garbage-collecting */ | ||
| 72 | |||
| 73 | struct list_head clean_list; /* Blocks 100% full of clean data */ | ||
| 74 | struct list_head very_dirty_list; /* Blocks with lots of dirty space */ | ||
| 75 | struct list_head dirty_list; /* Blocks with some dirty space */ | ||
| 76 | struct list_head erasable_list; /* Blocks which are completely dirty, and need erasing */ | ||
| 77 | struct list_head erasable_pending_wbuf_list; /* Blocks which need erasing but only after the current wbuf is flushed */ | ||
| 78 | struct list_head erasing_list; /* Blocks which are currently erasing */ | ||
| 79 | struct list_head erase_pending_list; /* Blocks which need erasing now */ | ||
| 80 | struct list_head erase_complete_list; /* Blocks which are erased and need the clean marker written to them */ | ||
| 81 | struct list_head free_list; /* Blocks which are free and ready to be used */ | ||
| 82 | struct list_head bad_list; /* Bad blocks. */ | ||
| 83 | struct list_head bad_used_list; /* Bad blocks with valid data in. */ | ||
| 84 | |||
| 85 | spinlock_t erase_completion_lock; /* Protect free_list and erasing_list | ||
| 86 | against erase completion handler */ | ||
| 87 | wait_queue_head_t erase_wait; /* For waiting for erases to complete */ | ||
| 88 | |||
| 89 | wait_queue_head_t inocache_wq; | ||
| 90 | struct jffs2_inode_cache **inocache_list; | ||
| 91 | spinlock_t inocache_lock; | ||
| 92 | |||
| 93 | /* Sem to allow jffs2_garbage_collect_deletion_dirent to | ||
| 94 | drop the erase_completion_lock while it's holding a pointer | ||
| 95 | to an obsoleted node. I don't like this. Alternatives welcomed. */ | ||
| 96 | struct semaphore erase_free_sem; | ||
| 97 | |||
| 98 | uint32_t wbuf_pagesize; /* 0 for NOR and other flashes with no wbuf */ | ||
| 99 | |||
| 100 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER | ||
| 101 | /* Write-behind buffer for NAND flash */ | ||
| 102 | unsigned char *wbuf; | ||
| 103 | unsigned char *oobbuf; | ||
| 104 | uint32_t wbuf_ofs; | ||
| 105 | uint32_t wbuf_len; | ||
| 106 | struct jffs2_inodirty *wbuf_inodes; | ||
| 107 | |||
| 108 | struct rw_semaphore wbuf_sem; /* Protects the write buffer */ | ||
| 109 | |||
| 110 | /* Information about out-of-band area usage... */ | ||
| 111 | struct nand_ecclayout *ecclayout; | ||
| 112 | uint32_t badblock_pos; | ||
| 113 | uint32_t fsdata_pos; | ||
| 114 | uint32_t fsdata_len; | ||
| 115 | #endif | ||
| 116 | |||
| 117 | struct jffs2_summary *summary; /* Summary information */ | ||
| 118 | |||
| 119 | #ifdef CONFIG_JFFS2_FS_XATTR | ||
| 120 | #define XATTRINDEX_HASHSIZE (57) | ||
| 121 | uint32_t highest_xid; | ||
| 122 | struct list_head xattrindex[XATTRINDEX_HASHSIZE]; | ||
| 123 | struct list_head xattr_unchecked; | ||
| 124 | struct jffs2_xattr_ref *xref_temp; | ||
| 125 | struct rw_semaphore xattr_sem; | ||
| 126 | uint32_t xdatum_mem_usage; | ||
| 127 | uint32_t xdatum_mem_threshold; | ||
| 128 | #endif | ||
| 129 | /* OS-private pointer for getting back to master superblock info */ | ||
| 130 | void *os_priv; | ||
| 131 | }; | ||
| 132 | |||
| 133 | #endif /* _JFFS2_FB_SB */ | ||
diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c index 036cbd11c004..4889d0700c0e 100644 --- a/fs/jffs2/malloc.c +++ b/fs/jffs2/malloc.c | |||
| @@ -26,6 +26,10 @@ static kmem_cache_t *tmp_dnode_info_slab; | |||
| 26 | static kmem_cache_t *raw_node_ref_slab; | 26 | static kmem_cache_t *raw_node_ref_slab; |
| 27 | static kmem_cache_t *node_frag_slab; | 27 | static kmem_cache_t *node_frag_slab; |
| 28 | static kmem_cache_t *inode_cache_slab; | 28 | static kmem_cache_t *inode_cache_slab; |
| 29 | #ifdef CONFIG_JFFS2_FS_XATTR | ||
| 30 | static kmem_cache_t *xattr_datum_cache; | ||
| 31 | static kmem_cache_t *xattr_ref_cache; | ||
| 32 | #endif | ||
| 29 | 33 | ||
| 30 | int __init jffs2_create_slab_caches(void) | 34 | int __init jffs2_create_slab_caches(void) |
| 31 | { | 35 | { |
| @@ -53,8 +57,8 @@ int __init jffs2_create_slab_caches(void) | |||
| 53 | if (!tmp_dnode_info_slab) | 57 | if (!tmp_dnode_info_slab) |
| 54 | goto err; | 58 | goto err; |
| 55 | 59 | ||
| 56 | raw_node_ref_slab = kmem_cache_create("jffs2_raw_node_ref", | 60 | raw_node_ref_slab = kmem_cache_create("jffs2_refblock", |
| 57 | sizeof(struct jffs2_raw_node_ref), | 61 | sizeof(struct jffs2_raw_node_ref) * (REFS_PER_BLOCK + 1), |
| 58 | 0, 0, NULL, NULL); | 62 | 0, 0, NULL, NULL); |
| 59 | if (!raw_node_ref_slab) | 63 | if (!raw_node_ref_slab) |
| 60 | goto err; | 64 | goto err; |
| @@ -68,8 +72,24 @@ int __init jffs2_create_slab_caches(void) | |||
| 68 | inode_cache_slab = kmem_cache_create("jffs2_inode_cache", | 72 | inode_cache_slab = kmem_cache_create("jffs2_inode_cache", |
| 69 | sizeof(struct jffs2_inode_cache), | 73 | sizeof(struct jffs2_inode_cache), |
| 70 | 0, 0, NULL, NULL); | 74 | 0, 0, NULL, NULL); |
| 71 | if (inode_cache_slab) | 75 | if (!inode_cache_slab) |
| 72 | return 0; | 76 | goto err; |
| 77 | |||
| 78 | #ifdef CONFIG_JFFS2_FS_XATTR | ||
| 79 | xattr_datum_cache = kmem_cache_create("jffs2_xattr_datum", | ||
| 80 | sizeof(struct jffs2_xattr_datum), | ||
| 81 | 0, 0, NULL, NULL); | ||
| 82 | if (!xattr_datum_cache) | ||
| 83 | goto err; | ||
| 84 | |||
| 85 | xattr_ref_cache = kmem_cache_create("jffs2_xattr_ref", | ||
| 86 | sizeof(struct jffs2_xattr_ref), | ||
| 87 | 0, 0, NULL, NULL); | ||
| 88 | if (!xattr_ref_cache) | ||
| 89 | goto err; | ||
| 90 | #endif | ||
| 91 | |||
| 92 | return 0; | ||
| 73 | err: | 93 | err: |
| 74 | jffs2_destroy_slab_caches(); | 94 | jffs2_destroy_slab_caches(); |
| 75 | return -ENOMEM; | 95 | return -ENOMEM; |
| @@ -91,6 +111,12 @@ void jffs2_destroy_slab_caches(void) | |||
| 91 | kmem_cache_destroy(node_frag_slab); | 111 | kmem_cache_destroy(node_frag_slab); |
| 92 | if(inode_cache_slab) | 112 | if(inode_cache_slab) |
| 93 | kmem_cache_destroy(inode_cache_slab); | 113 | kmem_cache_destroy(inode_cache_slab); |
| 114 | #ifdef CONFIG_JFFS2_FS_XATTR | ||
| 115 | if (xattr_datum_cache) | ||
| 116 | kmem_cache_destroy(xattr_datum_cache); | ||
| 117 | if (xattr_ref_cache) | ||
| 118 | kmem_cache_destroy(xattr_ref_cache); | ||
| 119 | #endif | ||
| 94 | } | 120 | } |
| 95 | 121 | ||
| 96 | struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize) | 122 | struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize) |
| @@ -164,15 +190,65 @@ void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x) | |||
| 164 | kmem_cache_free(tmp_dnode_info_slab, x); | 190 | kmem_cache_free(tmp_dnode_info_slab, x); |
| 165 | } | 191 | } |
| 166 | 192 | ||
| 167 | struct jffs2_raw_node_ref *jffs2_alloc_raw_node_ref(void) | 193 | struct jffs2_raw_node_ref *jffs2_alloc_refblock(void) |
| 168 | { | 194 | { |
| 169 | struct jffs2_raw_node_ref *ret; | 195 | struct jffs2_raw_node_ref *ret; |
| 196 | |||
| 170 | ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL); | 197 | ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL); |
| 171 | dbg_memalloc("%p\n", ret); | 198 | if (ret) { |
| 199 | int i = 0; | ||
| 200 | for (i=0; i < REFS_PER_BLOCK; i++) { | ||
| 201 | ret[i].flash_offset = REF_EMPTY_NODE; | ||
| 202 | ret[i].next_in_ino = NULL; | ||
| 203 | } | ||
| 204 | ret[i].flash_offset = REF_LINK_NODE; | ||
| 205 | ret[i].next_in_ino = NULL; | ||
| 206 | } | ||
| 172 | return ret; | 207 | return ret; |
| 173 | } | 208 | } |
| 174 | 209 | ||
| 175 | void jffs2_free_raw_node_ref(struct jffs2_raw_node_ref *x) | 210 | int jffs2_prealloc_raw_node_refs(struct jffs2_sb_info *c, |
| 211 | struct jffs2_eraseblock *jeb, int nr) | ||
| 212 | { | ||
| 213 | struct jffs2_raw_node_ref **p, *ref; | ||
| 214 | int i = nr; | ||
| 215 | |||
| 216 | dbg_memalloc("%d\n", nr); | ||
| 217 | |||
| 218 | p = &jeb->last_node; | ||
| 219 | ref = *p; | ||
| 220 | |||
| 221 | dbg_memalloc("Reserving %d refs for block @0x%08x\n", nr, jeb->offset); | ||
| 222 | |||
| 223 | /* If jeb->last_node is really a valid node then skip over it */ | ||
| 224 | if (ref && ref->flash_offset != REF_EMPTY_NODE) | ||
| 225 | ref++; | ||
| 226 | |||
| 227 | while (i) { | ||
| 228 | if (!ref) { | ||
| 229 | dbg_memalloc("Allocating new refblock linked from %p\n", p); | ||
| 230 | ref = *p = jffs2_alloc_refblock(); | ||
| 231 | if (!ref) | ||
| 232 | return -ENOMEM; | ||
| 233 | } | ||
| 234 | if (ref->flash_offset == REF_LINK_NODE) { | ||
| 235 | p = &ref->next_in_ino; | ||
| 236 | ref = *p; | ||
| 237 | continue; | ||
| 238 | } | ||
| 239 | i--; | ||
| 240 | ref++; | ||
| 241 | } | ||
| 242 | jeb->allocated_refs = nr; | ||
| 243 | |||
| 244 | dbg_memalloc("Reserved %d refs for block @0x%08x, last_node is %p (%08x,%p)\n", | ||
| 245 | nr, jeb->offset, jeb->last_node, jeb->last_node->flash_offset, | ||
| 246 | jeb->last_node->next_in_ino); | ||
| 247 | |||
| 248 | return 0; | ||
| 249 | } | ||
| 250 | |||
| 251 | void jffs2_free_refblock(struct jffs2_raw_node_ref *x) | ||
| 176 | { | 252 | { |
| 177 | dbg_memalloc("%p\n", x); | 253 | dbg_memalloc("%p\n", x); |
| 178 | kmem_cache_free(raw_node_ref_slab, x); | 254 | kmem_cache_free(raw_node_ref_slab, x); |
| @@ -205,3 +281,40 @@ void jffs2_free_inode_cache(struct jffs2_inode_cache *x) | |||
| 205 | dbg_memalloc("%p\n", x); | 281 | dbg_memalloc("%p\n", x); |
| 206 | kmem_cache_free(inode_cache_slab, x); | 282 | kmem_cache_free(inode_cache_slab, x); |
| 207 | } | 283 | } |
| 284 | |||
| 285 | #ifdef CONFIG_JFFS2_FS_XATTR | ||
| 286 | struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void) | ||
| 287 | { | ||
| 288 | struct jffs2_xattr_datum *xd; | ||
| 289 | xd = kmem_cache_alloc(xattr_datum_cache, GFP_KERNEL); | ||
| 290 | dbg_memalloc("%p\n", xd); | ||
| 291 | |||
| 292 | memset(xd, 0, sizeof(struct jffs2_xattr_datum)); | ||
| 293 | xd->class = RAWNODE_CLASS_XATTR_DATUM; | ||
| 294 | INIT_LIST_HEAD(&xd->xindex); | ||
| 295 | return xd; | ||
| 296 | } | ||
| 297 | |||
| 298 | void jffs2_free_xattr_datum(struct jffs2_xattr_datum *xd) | ||
| 299 | { | ||
| 300 | dbg_memalloc("%p\n", xd); | ||
| 301 | kmem_cache_free(xattr_datum_cache, xd); | ||
| 302 | } | ||
| 303 | |||
| 304 | struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void) | ||
| 305 | { | ||
| 306 | struct jffs2_xattr_ref *ref; | ||
| 307 | ref = kmem_cache_alloc(xattr_ref_cache, GFP_KERNEL); | ||
| 308 | dbg_memalloc("%p\n", ref); | ||
| 309 | |||
| 310 | memset(ref, 0, sizeof(struct jffs2_xattr_ref)); | ||
| 311 | ref->class = RAWNODE_CLASS_XATTR_REF; | ||
| 312 | return ref; | ||
| 313 | } | ||
| 314 | |||
| 315 | void jffs2_free_xattr_ref(struct jffs2_xattr_ref *ref) | ||
| 316 | { | ||
| 317 | dbg_memalloc("%p\n", ref); | ||
| 318 | kmem_cache_free(xattr_ref_cache, ref); | ||
| 319 | } | ||
| 320 | #endif | ||
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c index 1d46677afd17..927dfe42ba76 100644 --- a/fs/jffs2/nodelist.c +++ b/fs/jffs2/nodelist.c | |||
| @@ -438,8 +438,7 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info | |||
| 438 | if (c->mtd->point) { | 438 | if (c->mtd->point) { |
| 439 | err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer); | 439 | err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer); |
| 440 | if (!err && retlen < tn->csize) { | 440 | if (!err && retlen < tn->csize) { |
| 441 | JFFS2_WARNING("MTD point returned len too short: %zu " | 441 | JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize); |
| 442 | "instead of %u.\n", retlen, tn->csize); | ||
| 443 | c->mtd->unpoint(c->mtd, buffer, ofs, len); | 442 | c->mtd->unpoint(c->mtd, buffer, ofs, len); |
| 444 | } else if (err) | 443 | } else if (err) |
| 445 | JFFS2_WARNING("MTD point failed: error code %d.\n", err); | 444 | JFFS2_WARNING("MTD point failed: error code %d.\n", err); |
| @@ -462,8 +461,7 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info | |||
| 462 | } | 461 | } |
| 463 | 462 | ||
| 464 | if (retlen != len) { | 463 | if (retlen != len) { |
| 465 | JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n", | 464 | JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n", ofs, retlen, len); |
| 466 | ofs, retlen, len); | ||
| 467 | err = -EIO; | 465 | err = -EIO; |
| 468 | goto free_out; | 466 | goto free_out; |
| 469 | } | 467 | } |
| @@ -940,6 +938,7 @@ void jffs2_free_ino_caches(struct jffs2_sb_info *c) | |||
| 940 | this = c->inocache_list[i]; | 938 | this = c->inocache_list[i]; |
| 941 | while (this) { | 939 | while (this) { |
| 942 | next = this->next; | 940 | next = this->next; |
| 941 | jffs2_xattr_free_inode(c, this); | ||
| 943 | jffs2_free_inode_cache(this); | 942 | jffs2_free_inode_cache(this); |
| 944 | this = next; | 943 | this = next; |
| 945 | } | 944 | } |
| @@ -954,9 +953,13 @@ void jffs2_free_raw_node_refs(struct jffs2_sb_info *c) | |||
| 954 | 953 | ||
| 955 | for (i=0; i<c->nr_blocks; i++) { | 954 | for (i=0; i<c->nr_blocks; i++) { |
| 956 | this = c->blocks[i].first_node; | 955 | this = c->blocks[i].first_node; |
| 957 | while(this) { | 956 | while (this) { |
| 958 | next = this->next_phys; | 957 | if (this[REFS_PER_BLOCK].flash_offset == REF_LINK_NODE) |
| 959 | jffs2_free_raw_node_ref(this); | 958 | next = this[REFS_PER_BLOCK].next_in_ino; |
| 959 | else | ||
| 960 | next = NULL; | ||
| 961 | |||
| 962 | jffs2_free_refblock(this); | ||
| 960 | this = next; | 963 | this = next; |
| 961 | } | 964 | } |
| 962 | c->blocks[i].first_node = c->blocks[i].last_node = NULL; | 965 | c->blocks[i].first_node = c->blocks[i].last_node = NULL; |
| @@ -1047,3 +1050,169 @@ void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c) | |||
| 1047 | cond_resched(); | 1050 | cond_resched(); |
| 1048 | } | 1051 | } |
| 1049 | } | 1052 | } |
| 1053 | |||
| 1054 | struct jffs2_raw_node_ref *jffs2_link_node_ref(struct jffs2_sb_info *c, | ||
| 1055 | struct jffs2_eraseblock *jeb, | ||
| 1056 | uint32_t ofs, uint32_t len, | ||
| 1057 | struct jffs2_inode_cache *ic) | ||
| 1058 | { | ||
| 1059 | struct jffs2_raw_node_ref *ref; | ||
| 1060 | |||
| 1061 | BUG_ON(!jeb->allocated_refs); | ||
| 1062 | jeb->allocated_refs--; | ||
| 1063 | |||
| 1064 | ref = jeb->last_node; | ||
| 1065 | |||
| 1066 | dbg_noderef("Last node at %p is (%08x,%p)\n", ref, ref->flash_offset, | ||
| 1067 | ref->next_in_ino); | ||
| 1068 | |||
| 1069 | while (ref->flash_offset != REF_EMPTY_NODE) { | ||
| 1070 | if (ref->flash_offset == REF_LINK_NODE) | ||
| 1071 | ref = ref->next_in_ino; | ||
| 1072 | else | ||
| 1073 | ref++; | ||
| 1074 | } | ||
| 1075 | |||
| 1076 | dbg_noderef("New ref is %p (%08x becomes %08x,%p) len 0x%x\n", ref, | ||
| 1077 | ref->flash_offset, ofs, ref->next_in_ino, len); | ||
| 1078 | |||
| 1079 | ref->flash_offset = ofs; | ||
| 1080 | |||
| 1081 | if (!jeb->first_node) { | ||
| 1082 | jeb->first_node = ref; | ||
| 1083 | BUG_ON(ref_offset(ref) != jeb->offset); | ||
| 1084 | } else if (unlikely(ref_offset(ref) != jeb->offset + c->sector_size - jeb->free_size)) { | ||
| 1085 | uint32_t last_len = ref_totlen(c, jeb, jeb->last_node); | ||
| 1086 | |||
| 1087 | JFFS2_ERROR("Adding new ref %p at (0x%08x-0x%08x) not immediately after previous (0x%08x-0x%08x)\n", | ||
| 1088 | ref, ref_offset(ref), ref_offset(ref)+len, | ||
| 1089 | ref_offset(jeb->last_node), | ||
| 1090 | ref_offset(jeb->last_node)+last_len); | ||
| 1091 | BUG(); | ||
| 1092 | } | ||
| 1093 | jeb->last_node = ref; | ||
| 1094 | |||
| 1095 | if (ic) { | ||
| 1096 | ref->next_in_ino = ic->nodes; | ||
| 1097 | ic->nodes = ref; | ||
| 1098 | } else { | ||
| 1099 | ref->next_in_ino = NULL; | ||
| 1100 | } | ||
| 1101 | |||
| 1102 | switch(ref_flags(ref)) { | ||
| 1103 | case REF_UNCHECKED: | ||
| 1104 | c->unchecked_size += len; | ||
| 1105 | jeb->unchecked_size += len; | ||
| 1106 | break; | ||
| 1107 | |||
| 1108 | case REF_NORMAL: | ||
| 1109 | case REF_PRISTINE: | ||
| 1110 | c->used_size += len; | ||
| 1111 | jeb->used_size += len; | ||
| 1112 | break; | ||
| 1113 | |||
| 1114 | case REF_OBSOLETE: | ||
| 1115 | c->dirty_size += len; | ||
| 1116 | jeb->dirty_size += len; | ||
| 1117 | break; | ||
| 1118 | } | ||
| 1119 | c->free_size -= len; | ||
| 1120 | jeb->free_size -= len; | ||
| 1121 | |||
| 1122 | #ifdef TEST_TOTLEN | ||
| 1123 | /* Set (and test) __totlen field... for now */ | ||
| 1124 | ref->__totlen = len; | ||
| 1125 | ref_totlen(c, jeb, ref); | ||
| 1126 | #endif | ||
| 1127 | return ref; | ||
| 1128 | } | ||
| 1129 | |||
| 1130 | /* No locking, no reservation of 'ref'. Do not use on a live file system */ | ||
| 1131 | int jffs2_scan_dirty_space(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
| 1132 | uint32_t size) | ||
| 1133 | { | ||
| 1134 | if (!size) | ||
| 1135 | return 0; | ||
| 1136 | if (unlikely(size > jeb->free_size)) { | ||
| 1137 | printk(KERN_CRIT "Dirty space 0x%x larger then free_size 0x%x (wasted 0x%x)\n", | ||
| 1138 | size, jeb->free_size, jeb->wasted_size); | ||
| 1139 | BUG(); | ||
| 1140 | } | ||
| 1141 | /* REF_EMPTY_NODE is !obsolete, so that works OK */ | ||
| 1142 | if (jeb->last_node && ref_obsolete(jeb->last_node)) { | ||
| 1143 | #ifdef TEST_TOTLEN | ||
| 1144 | jeb->last_node->__totlen += size; | ||
| 1145 | #endif | ||
| 1146 | c->dirty_size += size; | ||
| 1147 | c->free_size -= size; | ||
| 1148 | jeb->dirty_size += size; | ||
| 1149 | jeb->free_size -= size; | ||
| 1150 | } else { | ||
| 1151 | uint32_t ofs = jeb->offset + c->sector_size - jeb->free_size; | ||
| 1152 | ofs |= REF_OBSOLETE; | ||
| 1153 | |||
| 1154 | jffs2_link_node_ref(c, jeb, ofs, size, NULL); | ||
| 1155 | } | ||
| 1156 | |||
| 1157 | return 0; | ||
| 1158 | } | ||
| 1159 | |||
| 1160 | /* Calculate totlen from surrounding nodes or eraseblock */ | ||
| 1161 | static inline uint32_t __ref_totlen(struct jffs2_sb_info *c, | ||
| 1162 | struct jffs2_eraseblock *jeb, | ||
| 1163 | struct jffs2_raw_node_ref *ref) | ||
| 1164 | { | ||
| 1165 | uint32_t ref_end; | ||
| 1166 | struct jffs2_raw_node_ref *next_ref = ref_next(ref); | ||
| 1167 | |||
| 1168 | if (next_ref) | ||
| 1169 | ref_end = ref_offset(next_ref); | ||
| 1170 | else { | ||
| 1171 | if (!jeb) | ||
| 1172 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; | ||
| 1173 | |||
| 1174 | /* Last node in block. Use free_space */ | ||
| 1175 | if (unlikely(ref != jeb->last_node)) { | ||
| 1176 | printk(KERN_CRIT "ref %p @0x%08x is not jeb->last_node (%p @0x%08x)\n", | ||
| 1177 | ref, ref_offset(ref), jeb->last_node, jeb->last_node?ref_offset(jeb->last_node):0); | ||
| 1178 | BUG(); | ||
| 1179 | } | ||
| 1180 | ref_end = jeb->offset + c->sector_size - jeb->free_size; | ||
| 1181 | } | ||
| 1182 | return ref_end - ref_offset(ref); | ||
| 1183 | } | ||
| 1184 | |||
| 1185 | uint32_t __jffs2_ref_totlen(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
| 1186 | struct jffs2_raw_node_ref *ref) | ||
| 1187 | { | ||
| 1188 | uint32_t ret; | ||
| 1189 | |||
| 1190 | ret = __ref_totlen(c, jeb, ref); | ||
| 1191 | |||
| 1192 | #ifdef TEST_TOTLEN | ||
| 1193 | if (unlikely(ret != ref->__totlen)) { | ||
| 1194 | if (!jeb) | ||
| 1195 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; | ||
| 1196 | |||
| 1197 | printk(KERN_CRIT "Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n", | ||
| 1198 | ref, ref_offset(ref), ref_offset(ref)+ref->__totlen, | ||
| 1199 | ret, ref->__totlen); | ||
| 1200 | if (ref_next(ref)) { | ||
| 1201 | printk(KERN_CRIT "next %p (0x%08x-0x%08x)\n", ref_next(ref), ref_offset(ref_next(ref)), | ||
| 1202 | ref_offset(ref_next(ref))+ref->__totlen); | ||
| 1203 | } else | ||
| 1204 | printk(KERN_CRIT "No next ref. jeb->last_node is %p\n", jeb->last_node); | ||
| 1205 | |||
| 1206 | printk(KERN_CRIT "jeb->wasted_size %x, dirty_size %x, used_size %x, free_size %x\n", jeb->wasted_size, jeb->dirty_size, jeb->used_size, jeb->free_size); | ||
| 1207 | |||
| 1208 | #if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS) | ||
| 1209 | __jffs2_dbg_dump_node_refs_nolock(c, jeb); | ||
| 1210 | #endif | ||
| 1211 | |||
| 1212 | WARN_ON(1); | ||
| 1213 | |||
| 1214 | ret = ref->__totlen; | ||
| 1215 | } | ||
| 1216 | #endif /* TEST_TOTLEN */ | ||
| 1217 | return ret; | ||
| 1218 | } | ||
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h index 23a67bb3052f..b16c60bbcf6e 100644 --- a/fs/jffs2/nodelist.h +++ b/fs/jffs2/nodelist.h | |||
| @@ -18,8 +18,10 @@ | |||
| 18 | #include <linux/fs.h> | 18 | #include <linux/fs.h> |
| 19 | #include <linux/types.h> | 19 | #include <linux/types.h> |
| 20 | #include <linux/jffs2.h> | 20 | #include <linux/jffs2.h> |
| 21 | #include <linux/jffs2_fs_sb.h> | 21 | #include "jffs2_fs_sb.h" |
| 22 | #include <linux/jffs2_fs_i.h> | 22 | #include "jffs2_fs_i.h" |
| 23 | #include "xattr.h" | ||
| 24 | #include "acl.h" | ||
| 23 | #include "summary.h" | 25 | #include "summary.h" |
| 24 | 26 | ||
| 25 | #ifdef __ECOS | 27 | #ifdef __ECOS |
| @@ -75,14 +77,50 @@ | |||
| 75 | struct jffs2_raw_node_ref | 77 | struct jffs2_raw_node_ref |
| 76 | { | 78 | { |
| 77 | struct jffs2_raw_node_ref *next_in_ino; /* Points to the next raw_node_ref | 79 | struct jffs2_raw_node_ref *next_in_ino; /* Points to the next raw_node_ref |
| 78 | for this inode. If this is the last, it points to the inode_cache | 80 | for this object. If this _is_ the last, it points to the inode_cache, |
| 79 | for this inode instead. The inode_cache will have NULL in the first | 81 | xattr_ref or xattr_datum instead. The common part of those structures |
| 80 | word so you know when you've got there :) */ | 82 | has NULL in the first word. See jffs2_raw_ref_to_ic() below */ |
| 81 | struct jffs2_raw_node_ref *next_phys; | ||
| 82 | uint32_t flash_offset; | 83 | uint32_t flash_offset; |
| 84 | #define TEST_TOTLEN | ||
| 85 | #ifdef TEST_TOTLEN | ||
| 83 | uint32_t __totlen; /* This may die; use ref_totlen(c, jeb, ) below */ | 86 | uint32_t __totlen; /* This may die; use ref_totlen(c, jeb, ) below */ |
| 87 | #endif | ||
| 84 | }; | 88 | }; |
| 85 | 89 | ||
| 90 | #define REF_LINK_NODE ((int32_t)-1) | ||
| 91 | #define REF_EMPTY_NODE ((int32_t)-2) | ||
| 92 | |||
| 93 | /* Use blocks of about 256 bytes */ | ||
| 94 | #define REFS_PER_BLOCK ((255/sizeof(struct jffs2_raw_node_ref))-1) | ||
| 95 | |||
| 96 | static inline struct jffs2_raw_node_ref *ref_next(struct jffs2_raw_node_ref *ref) | ||
| 97 | { | ||
| 98 | ref++; | ||
| 99 | |||
| 100 | /* Link to another block of refs */ | ||
| 101 | if (ref->flash_offset == REF_LINK_NODE) { | ||
| 102 | ref = ref->next_in_ino; | ||
| 103 | if (!ref) | ||
| 104 | return ref; | ||
| 105 | } | ||
| 106 | |||
| 107 | /* End of chain */ | ||
| 108 | if (ref->flash_offset == REF_EMPTY_NODE) | ||
| 109 | return NULL; | ||
| 110 | |||
| 111 | return ref; | ||
| 112 | } | ||
| 113 | |||
| 114 | static inline struct jffs2_inode_cache *jffs2_raw_ref_to_ic(struct jffs2_raw_node_ref *raw) | ||
| 115 | { | ||
| 116 | while(raw->next_in_ino) | ||
| 117 | raw = raw->next_in_ino; | ||
| 118 | |||
| 119 | /* NB. This can be a jffs2_xattr_datum or jffs2_xattr_ref and | ||
| 120 | not actually a jffs2_inode_cache. Check ->class */ | ||
| 121 | return ((struct jffs2_inode_cache *)raw); | ||
| 122 | } | ||
| 123 | |||
| 86 | /* flash_offset & 3 always has to be zero, because nodes are | 124 | /* flash_offset & 3 always has to be zero, because nodes are |
| 87 | always aligned at 4 bytes. So we have a couple of extra bits | 125 | always aligned at 4 bytes. So we have a couple of extra bits |
| 88 | to play with, which indicate the node's status; see below: */ | 126 | to play with, which indicate the node's status; see below: */ |
| @@ -95,6 +133,11 @@ struct jffs2_raw_node_ref | |||
| 95 | #define ref_obsolete(ref) (((ref)->flash_offset & 3) == REF_OBSOLETE) | 133 | #define ref_obsolete(ref) (((ref)->flash_offset & 3) == REF_OBSOLETE) |
| 96 | #define mark_ref_normal(ref) do { (ref)->flash_offset = ref_offset(ref) | REF_NORMAL; } while(0) | 134 | #define mark_ref_normal(ref) do { (ref)->flash_offset = ref_offset(ref) | REF_NORMAL; } while(0) |
| 97 | 135 | ||
| 136 | /* NB: REF_PRISTINE for an inode-less node (ref->next_in_ino == NULL) indicates | ||
| 137 | it is an unknown node of type JFFS2_NODETYPE_RWCOMPAT_COPY, so it'll get | ||
| 138 | copied. If you need to do anything different to GC inode-less nodes, then | ||
| 139 | you need to modify gc.c accordingly. */ | ||
| 140 | |||
| 98 | /* For each inode in the filesystem, we need to keep a record of | 141 | /* For each inode in the filesystem, we need to keep a record of |
| 99 | nlink, because it would be a PITA to scan the whole directory tree | 142 | nlink, because it would be a PITA to scan the whole directory tree |
| 100 | at read_inode() time to calculate it, and to keep sufficient information | 143 | at read_inode() time to calculate it, and to keep sufficient information |
| @@ -103,15 +146,27 @@ struct jffs2_raw_node_ref | |||
| 103 | a pointer to the first physical node which is part of this inode, too. | 146 | a pointer to the first physical node which is part of this inode, too. |
| 104 | */ | 147 | */ |
| 105 | struct jffs2_inode_cache { | 148 | struct jffs2_inode_cache { |
| 149 | /* First part of structure is shared with other objects which | ||
| 150 | can terminate the raw node refs' next_in_ino list -- which | ||
| 151 | currently struct jffs2_xattr_datum and struct jffs2_xattr_ref. */ | ||
| 152 | |||
| 106 | struct jffs2_full_dirent *scan_dents; /* Used during scan to hold | 153 | struct jffs2_full_dirent *scan_dents; /* Used during scan to hold |
| 107 | temporary lists of dirents, and later must be set to | 154 | temporary lists of dirents, and later must be set to |
| 108 | NULL to mark the end of the raw_node_ref->next_in_ino | 155 | NULL to mark the end of the raw_node_ref->next_in_ino |
| 109 | chain. */ | 156 | chain. */ |
| 110 | struct jffs2_inode_cache *next; | ||
| 111 | struct jffs2_raw_node_ref *nodes; | 157 | struct jffs2_raw_node_ref *nodes; |
| 158 | uint8_t class; /* It's used for identification */ | ||
| 159 | |||
| 160 | /* end of shared structure */ | ||
| 161 | |||
| 162 | uint8_t flags; | ||
| 163 | uint16_t state; | ||
| 112 | uint32_t ino; | 164 | uint32_t ino; |
| 165 | struct jffs2_inode_cache *next; | ||
| 166 | #ifdef CONFIG_JFFS2_FS_XATTR | ||
| 167 | struct jffs2_xattr_ref *xref; | ||
| 168 | #endif | ||
| 113 | int nlink; | 169 | int nlink; |
| 114 | int state; | ||
| 115 | }; | 170 | }; |
| 116 | 171 | ||
| 117 | /* Inode states for 'state' above. We need the 'GC' state to prevent | 172 | /* Inode states for 'state' above. We need the 'GC' state to prevent |
| @@ -125,8 +180,16 @@ struct jffs2_inode_cache { | |||
| 125 | #define INO_STATE_READING 5 /* In read_inode() */ | 180 | #define INO_STATE_READING 5 /* In read_inode() */ |
| 126 | #define INO_STATE_CLEARING 6 /* In clear_inode() */ | 181 | #define INO_STATE_CLEARING 6 /* In clear_inode() */ |
| 127 | 182 | ||
| 183 | #define INO_FLAGS_XATTR_CHECKED 0x01 /* has no duplicate xattr_ref */ | ||
| 184 | |||
| 185 | #define RAWNODE_CLASS_INODE_CACHE 0 | ||
| 186 | #define RAWNODE_CLASS_XATTR_DATUM 1 | ||
| 187 | #define RAWNODE_CLASS_XATTR_REF 2 | ||
| 188 | |||
| 128 | #define INOCACHE_HASHSIZE 128 | 189 | #define INOCACHE_HASHSIZE 128 |
| 129 | 190 | ||
| 191 | #define write_ofs(c) ((c)->nextblock->offset + (c)->sector_size - (c)->nextblock->free_size) | ||
| 192 | |||
| 130 | /* | 193 | /* |
| 131 | Larger representation of a raw node, kept in-core only when the | 194 | Larger representation of a raw node, kept in-core only when the |
| 132 | struct inode for this particular ino is instantiated. | 195 | struct inode for this particular ino is instantiated. |
| @@ -192,6 +255,7 @@ struct jffs2_eraseblock | |||
| 192 | uint32_t wasted_size; | 255 | uint32_t wasted_size; |
| 193 | uint32_t free_size; /* Note that sector_size - free_size | 256 | uint32_t free_size; /* Note that sector_size - free_size |
| 194 | is the address of the first free space */ | 257 | is the address of the first free space */ |
| 258 | uint32_t allocated_refs; | ||
| 195 | struct jffs2_raw_node_ref *first_node; | 259 | struct jffs2_raw_node_ref *first_node; |
| 196 | struct jffs2_raw_node_ref *last_node; | 260 | struct jffs2_raw_node_ref *last_node; |
| 197 | 261 | ||
| @@ -203,57 +267,7 @@ static inline int jffs2_blocks_use_vmalloc(struct jffs2_sb_info *c) | |||
| 203 | return ((c->flash_size / c->sector_size) * sizeof (struct jffs2_eraseblock)) > (128 * 1024); | 267 | return ((c->flash_size / c->sector_size) * sizeof (struct jffs2_eraseblock)) > (128 * 1024); |
| 204 | } | 268 | } |
| 205 | 269 | ||
| 206 | /* Calculate totlen from surrounding nodes or eraseblock */ | 270 | #define ref_totlen(a, b, c) __jffs2_ref_totlen((a), (b), (c)) |
| 207 | static inline uint32_t __ref_totlen(struct jffs2_sb_info *c, | ||
| 208 | struct jffs2_eraseblock *jeb, | ||
| 209 | struct jffs2_raw_node_ref *ref) | ||
| 210 | { | ||
| 211 | uint32_t ref_end; | ||
| 212 | |||
| 213 | if (ref->next_phys) | ||
| 214 | ref_end = ref_offset(ref->next_phys); | ||
| 215 | else { | ||
| 216 | if (!jeb) | ||
| 217 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; | ||
| 218 | |||
| 219 | /* Last node in block. Use free_space */ | ||
| 220 | BUG_ON(ref != jeb->last_node); | ||
| 221 | ref_end = jeb->offset + c->sector_size - jeb->free_size; | ||
| 222 | } | ||
| 223 | return ref_end - ref_offset(ref); | ||
| 224 | } | ||
| 225 | |||
| 226 | static inline uint32_t ref_totlen(struct jffs2_sb_info *c, | ||
| 227 | struct jffs2_eraseblock *jeb, | ||
| 228 | struct jffs2_raw_node_ref *ref) | ||
| 229 | { | ||
| 230 | uint32_t ret; | ||
| 231 | |||
| 232 | #if CONFIG_JFFS2_FS_DEBUG > 0 | ||
| 233 | if (jeb && jeb != &c->blocks[ref->flash_offset / c->sector_size]) { | ||
| 234 | printk(KERN_CRIT "ref_totlen called with wrong block -- at 0x%08x instead of 0x%08x; ref 0x%08x\n", | ||
| 235 | jeb->offset, c->blocks[ref->flash_offset / c->sector_size].offset, ref_offset(ref)); | ||
| 236 | BUG(); | ||
| 237 | } | ||
| 238 | #endif | ||
| 239 | |||
| 240 | #if 1 | ||
| 241 | ret = ref->__totlen; | ||
| 242 | #else | ||
| 243 | /* This doesn't actually work yet */ | ||
| 244 | ret = __ref_totlen(c, jeb, ref); | ||
| 245 | if (ret != ref->__totlen) { | ||
| 246 | printk(KERN_CRIT "Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n", | ||
| 247 | ref, ref_offset(ref), ref_offset(ref)+ref->__totlen, | ||
| 248 | ret, ref->__totlen); | ||
| 249 | if (!jeb) | ||
| 250 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; | ||
| 251 | jffs2_dbg_dump_node_refs_nolock(c, jeb); | ||
| 252 | BUG(); | ||
| 253 | } | ||
| 254 | #endif | ||
| 255 | return ret; | ||
| 256 | } | ||
| 257 | 271 | ||
| 258 | #define ALLOC_NORMAL 0 /* Normal allocation */ | 272 | #define ALLOC_NORMAL 0 /* Normal allocation */ |
| 259 | #define ALLOC_DELETION 1 /* Deletion node. Best to allow it */ | 273 | #define ALLOC_DELETION 1 /* Deletion node. Best to allow it */ |
| @@ -268,13 +282,15 @@ static inline uint32_t ref_totlen(struct jffs2_sb_info *c, | |||
| 268 | 282 | ||
| 269 | #define PAD(x) (((x)+3)&~3) | 283 | #define PAD(x) (((x)+3)&~3) |
| 270 | 284 | ||
| 271 | static inline struct jffs2_inode_cache *jffs2_raw_ref_to_ic(struct jffs2_raw_node_ref *raw) | 285 | static inline int jffs2_encode_dev(union jffs2_device_node *jdev, dev_t rdev) |
| 272 | { | 286 | { |
| 273 | while(raw->next_in_ino) { | 287 | if (old_valid_dev(rdev)) { |
| 274 | raw = raw->next_in_ino; | 288 | jdev->old = cpu_to_je16(old_encode_dev(rdev)); |
| 289 | return sizeof(jdev->old); | ||
| 290 | } else { | ||
| 291 | jdev->new = cpu_to_je32(new_encode_dev(rdev)); | ||
| 292 | return sizeof(jdev->new); | ||
| 275 | } | 293 | } |
| 276 | |||
| 277 | return ((struct jffs2_inode_cache *)raw); | ||
| 278 | } | 294 | } |
| 279 | 295 | ||
| 280 | static inline struct jffs2_node_frag *frag_first(struct rb_root *root) | 296 | static inline struct jffs2_node_frag *frag_first(struct rb_root *root) |
| @@ -299,7 +315,6 @@ static inline struct jffs2_node_frag *frag_last(struct rb_root *root) | |||
| 299 | return rb_entry(node, struct jffs2_node_frag, rb); | 315 | return rb_entry(node, struct jffs2_node_frag, rb); |
| 300 | } | 316 | } |
| 301 | 317 | ||
| 302 | #define rb_parent(rb) ((rb)->rb_parent) | ||
| 303 | #define frag_next(frag) rb_entry(rb_next(&(frag)->rb), struct jffs2_node_frag, rb) | 318 | #define frag_next(frag) rb_entry(rb_next(&(frag)->rb), struct jffs2_node_frag, rb) |
| 304 | #define frag_prev(frag) rb_entry(rb_prev(&(frag)->rb), struct jffs2_node_frag, rb) | 319 | #define frag_prev(frag) rb_entry(rb_prev(&(frag)->rb), struct jffs2_node_frag, rb) |
| 305 | #define frag_parent(frag) rb_entry(rb_parent(&(frag)->rb), struct jffs2_node_frag, rb) | 320 | #define frag_parent(frag) rb_entry(rb_parent(&(frag)->rb), struct jffs2_node_frag, rb) |
| @@ -324,28 +339,44 @@ void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *t | |||
| 324 | int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn); | 339 | int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn); |
| 325 | void jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size); | 340 | void jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size); |
| 326 | int jffs2_add_older_frag_to_fragtree(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_tmp_dnode_info *tn); | 341 | int jffs2_add_older_frag_to_fragtree(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_tmp_dnode_info *tn); |
| 342 | struct jffs2_raw_node_ref *jffs2_link_node_ref(struct jffs2_sb_info *c, | ||
| 343 | struct jffs2_eraseblock *jeb, | ||
| 344 | uint32_t ofs, uint32_t len, | ||
| 345 | struct jffs2_inode_cache *ic); | ||
| 346 | extern uint32_t __jffs2_ref_totlen(struct jffs2_sb_info *c, | ||
| 347 | struct jffs2_eraseblock *jeb, | ||
| 348 | struct jffs2_raw_node_ref *ref); | ||
| 327 | 349 | ||
| 328 | /* nodemgmt.c */ | 350 | /* nodemgmt.c */ |
| 329 | int jffs2_thread_should_wake(struct jffs2_sb_info *c); | 351 | int jffs2_thread_should_wake(struct jffs2_sb_info *c); |
| 330 | int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, | 352 | int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, |
| 331 | uint32_t *len, int prio, uint32_t sumsize); | 353 | uint32_t *len, int prio, uint32_t sumsize); |
| 332 | int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, | 354 | int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, |
| 333 | uint32_t *len, uint32_t sumsize); | 355 | uint32_t *len, uint32_t sumsize); |
| 334 | int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new); | 356 | struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c, |
| 357 | uint32_t ofs, uint32_t len, | ||
| 358 | struct jffs2_inode_cache *ic); | ||
| 335 | void jffs2_complete_reservation(struct jffs2_sb_info *c); | 359 | void jffs2_complete_reservation(struct jffs2_sb_info *c); |
| 336 | void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *raw); | 360 | void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *raw); |
| 337 | 361 | ||
| 338 | /* write.c */ | 362 | /* write.c */ |
| 339 | int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri); | 363 | int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri); |
| 340 | 364 | ||
| 341 | struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode); | 365 | struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, |
| 342 | struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_dirent *rd, const unsigned char *name, uint32_t namelen, uint32_t flash_ofs, int alloc_mode); | 366 | struct jffs2_raw_inode *ri, const unsigned char *data, |
| 367 | uint32_t datalen, int alloc_mode); | ||
| 368 | struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | ||
| 369 | struct jffs2_raw_dirent *rd, const unsigned char *name, | ||
| 370 | uint32_t namelen, int alloc_mode); | ||
| 343 | int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | 371 | int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, |
| 344 | struct jffs2_raw_inode *ri, unsigned char *buf, | 372 | struct jffs2_raw_inode *ri, unsigned char *buf, |
| 345 | uint32_t offset, uint32_t writelen, uint32_t *retlen); | 373 | uint32_t offset, uint32_t writelen, uint32_t *retlen); |
| 346 | int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const char *name, int namelen); | 374 | int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f, |
| 347 | int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, const char *name, int namelen, struct jffs2_inode_info *dead_f, uint32_t time); | 375 | struct jffs2_raw_inode *ri, const char *name, int namelen); |
| 348 | int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen, uint32_t time); | 376 | int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, const char *name, |
| 377 | int namelen, struct jffs2_inode_info *dead_f, uint32_t time); | ||
| 378 | int jffs2_do_link(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, | ||
| 379 | uint8_t type, const char *name, int namelen, uint32_t time); | ||
| 349 | 380 | ||
| 350 | 381 | ||
| 351 | /* readinode.c */ | 382 | /* readinode.c */ |
| @@ -368,12 +399,19 @@ struct jffs2_raw_inode *jffs2_alloc_raw_inode(void); | |||
| 368 | void jffs2_free_raw_inode(struct jffs2_raw_inode *); | 399 | void jffs2_free_raw_inode(struct jffs2_raw_inode *); |
| 369 | struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void); | 400 | struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void); |
| 370 | void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *); | 401 | void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *); |
| 371 | struct jffs2_raw_node_ref *jffs2_alloc_raw_node_ref(void); | 402 | int jffs2_prealloc_raw_node_refs(struct jffs2_sb_info *c, |
| 372 | void jffs2_free_raw_node_ref(struct jffs2_raw_node_ref *); | 403 | struct jffs2_eraseblock *jeb, int nr); |
| 404 | void jffs2_free_refblock(struct jffs2_raw_node_ref *); | ||
| 373 | struct jffs2_node_frag *jffs2_alloc_node_frag(void); | 405 | struct jffs2_node_frag *jffs2_alloc_node_frag(void); |
| 374 | void jffs2_free_node_frag(struct jffs2_node_frag *); | 406 | void jffs2_free_node_frag(struct jffs2_node_frag *); |
| 375 | struct jffs2_inode_cache *jffs2_alloc_inode_cache(void); | 407 | struct jffs2_inode_cache *jffs2_alloc_inode_cache(void); |
| 376 | void jffs2_free_inode_cache(struct jffs2_inode_cache *); | 408 | void jffs2_free_inode_cache(struct jffs2_inode_cache *); |
| 409 | #ifdef CONFIG_JFFS2_FS_XATTR | ||
| 410 | struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void); | ||
| 411 | void jffs2_free_xattr_datum(struct jffs2_xattr_datum *); | ||
| 412 | struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void); | ||
| 413 | void jffs2_free_xattr_ref(struct jffs2_xattr_ref *); | ||
| 414 | #endif | ||
| 377 | 415 | ||
| 378 | /* gc.c */ | 416 | /* gc.c */ |
| 379 | int jffs2_garbage_collect_pass(struct jffs2_sb_info *c); | 417 | int jffs2_garbage_collect_pass(struct jffs2_sb_info *c); |
| @@ -393,12 +431,14 @@ int jffs2_fill_scan_buf(struct jffs2_sb_info *c, void *buf, | |||
| 393 | uint32_t ofs, uint32_t len); | 431 | uint32_t ofs, uint32_t len); |
| 394 | struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino); | 432 | struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino); |
| 395 | int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); | 433 | int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); |
| 434 | int jffs2_scan_dirty_space(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t size); | ||
| 396 | 435 | ||
| 397 | /* build.c */ | 436 | /* build.c */ |
| 398 | int jffs2_do_mount_fs(struct jffs2_sb_info *c); | 437 | int jffs2_do_mount_fs(struct jffs2_sb_info *c); |
| 399 | 438 | ||
| 400 | /* erase.c */ | 439 | /* erase.c */ |
| 401 | void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count); | 440 | void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count); |
| 441 | void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); | ||
| 402 | 442 | ||
| 403 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER | 443 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER |
| 404 | /* wbuf.c */ | 444 | /* wbuf.c */ |
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c index 49127a1f0458..8bedfd2ff689 100644 --- a/fs/jffs2/nodemgmt.c +++ b/fs/jffs2/nodemgmt.c | |||
| @@ -23,13 +23,12 @@ | |||
| 23 | * jffs2_reserve_space - request physical space to write nodes to flash | 23 | * jffs2_reserve_space - request physical space to write nodes to flash |
| 24 | * @c: superblock info | 24 | * @c: superblock info |
| 25 | * @minsize: Minimum acceptable size of allocation | 25 | * @minsize: Minimum acceptable size of allocation |
| 26 | * @ofs: Returned value of node offset | ||
| 27 | * @len: Returned value of allocation length | 26 | * @len: Returned value of allocation length |
| 28 | * @prio: Allocation type - ALLOC_{NORMAL,DELETION} | 27 | * @prio: Allocation type - ALLOC_{NORMAL,DELETION} |
| 29 | * | 28 | * |
| 30 | * Requests a block of physical space on the flash. Returns zero for success | 29 | * Requests a block of physical space on the flash. Returns zero for success |
| 31 | * and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC | 30 | * and puts 'len' into the appropriate place, or returns -ENOSPC or other |
| 32 | * or other error if appropriate. | 31 | * error if appropriate. Doesn't return len since that's |
| 33 | * | 32 | * |
| 34 | * If it returns zero, jffs2_reserve_space() also downs the per-filesystem | 33 | * If it returns zero, jffs2_reserve_space() also downs the per-filesystem |
| 35 | * allocation semaphore, to prevent more than one allocation from being | 34 | * allocation semaphore, to prevent more than one allocation from being |
| @@ -40,9 +39,9 @@ | |||
| 40 | */ | 39 | */ |
| 41 | 40 | ||
| 42 | static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, | 41 | static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, |
| 43 | uint32_t *ofs, uint32_t *len, uint32_t sumsize); | 42 | uint32_t *len, uint32_t sumsize); |
| 44 | 43 | ||
| 45 | int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, | 44 | int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, |
| 46 | uint32_t *len, int prio, uint32_t sumsize) | 45 | uint32_t *len, int prio, uint32_t sumsize) |
| 47 | { | 46 | { |
| 48 | int ret = -EAGAIN; | 47 | int ret = -EAGAIN; |
| @@ -132,19 +131,21 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs | |||
| 132 | spin_lock(&c->erase_completion_lock); | 131 | spin_lock(&c->erase_completion_lock); |
| 133 | } | 132 | } |
| 134 | 133 | ||
| 135 | ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize); | 134 | ret = jffs2_do_reserve_space(c, minsize, len, sumsize); |
| 136 | if (ret) { | 135 | if (ret) { |
| 137 | D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret)); | 136 | D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret)); |
| 138 | } | 137 | } |
| 139 | } | 138 | } |
| 140 | spin_unlock(&c->erase_completion_lock); | 139 | spin_unlock(&c->erase_completion_lock); |
| 140 | if (!ret) | ||
| 141 | ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); | ||
| 141 | if (ret) | 142 | if (ret) |
| 142 | up(&c->alloc_sem); | 143 | up(&c->alloc_sem); |
| 143 | return ret; | 144 | return ret; |
| 144 | } | 145 | } |
| 145 | 146 | ||
| 146 | int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, | 147 | int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, |
| 147 | uint32_t *len, uint32_t sumsize) | 148 | uint32_t *len, uint32_t sumsize) |
| 148 | { | 149 | { |
| 149 | int ret = -EAGAIN; | 150 | int ret = -EAGAIN; |
| 150 | minsize = PAD(minsize); | 151 | minsize = PAD(minsize); |
| @@ -153,12 +154,15 @@ int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t * | |||
| 153 | 154 | ||
| 154 | spin_lock(&c->erase_completion_lock); | 155 | spin_lock(&c->erase_completion_lock); |
| 155 | while(ret == -EAGAIN) { | 156 | while(ret == -EAGAIN) { |
| 156 | ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize); | 157 | ret = jffs2_do_reserve_space(c, minsize, len, sumsize); |
| 157 | if (ret) { | 158 | if (ret) { |
| 158 | D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret)); | 159 | D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret)); |
| 159 | } | 160 | } |
| 160 | } | 161 | } |
| 161 | spin_unlock(&c->erase_completion_lock); | 162 | spin_unlock(&c->erase_completion_lock); |
| 163 | if (!ret) | ||
| 164 | ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); | ||
| 165 | |||
| 162 | return ret; | 166 | return ret; |
| 163 | } | 167 | } |
| 164 | 168 | ||
| @@ -259,10 +263,11 @@ static int jffs2_find_nextblock(struct jffs2_sb_info *c) | |||
| 259 | } | 263 | } |
| 260 | 264 | ||
| 261 | /* Called with alloc sem _and_ erase_completion_lock */ | 265 | /* Called with alloc sem _and_ erase_completion_lock */ |
| 262 | static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, uint32_t sumsize) | 266 | static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, |
| 267 | uint32_t *len, uint32_t sumsize) | ||
| 263 | { | 268 | { |
| 264 | struct jffs2_eraseblock *jeb = c->nextblock; | 269 | struct jffs2_eraseblock *jeb = c->nextblock; |
| 265 | uint32_t reserved_size; /* for summary information at the end of the jeb */ | 270 | uint32_t reserved_size; /* for summary information at the end of the jeb */ |
| 266 | int ret; | 271 | int ret; |
| 267 | 272 | ||
| 268 | restart: | 273 | restart: |
| @@ -312,6 +317,8 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uin | |||
| 312 | } | 317 | } |
| 313 | } else { | 318 | } else { |
| 314 | if (jeb && minsize > jeb->free_size) { | 319 | if (jeb && minsize > jeb->free_size) { |
| 320 | uint32_t waste; | ||
| 321 | |||
| 315 | /* Skip the end of this block and file it as having some dirty space */ | 322 | /* Skip the end of this block and file it as having some dirty space */ |
| 316 | /* If there's a pending write to it, flush now */ | 323 | /* If there's a pending write to it, flush now */ |
| 317 | 324 | ||
| @@ -324,10 +331,26 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uin | |||
| 324 | goto restart; | 331 | goto restart; |
| 325 | } | 332 | } |
| 326 | 333 | ||
| 327 | c->wasted_size += jeb->free_size; | 334 | spin_unlock(&c->erase_completion_lock); |
| 328 | c->free_size -= jeb->free_size; | 335 | |
| 329 | jeb->wasted_size += jeb->free_size; | 336 | ret = jffs2_prealloc_raw_node_refs(c, jeb, 1); |
| 330 | jeb->free_size = 0; | 337 | if (ret) |
| 338 | return ret; | ||
| 339 | /* Just lock it again and continue. Nothing much can change because | ||
| 340 | we hold c->alloc_sem anyway. In fact, it's not entirely clear why | ||
| 341 | we hold c->erase_completion_lock in the majority of this function... | ||
| 342 | but that's a question for another (more caffeine-rich) day. */ | ||
| 343 | spin_lock(&c->erase_completion_lock); | ||
| 344 | |||
| 345 | waste = jeb->free_size; | ||
| 346 | jffs2_link_node_ref(c, jeb, | ||
| 347 | (jeb->offset + c->sector_size - waste) | REF_OBSOLETE, | ||
| 348 | waste, NULL); | ||
| 349 | /* FIXME: that made it count as dirty. Convert to wasted */ | ||
| 350 | jeb->dirty_size -= waste; | ||
| 351 | c->dirty_size -= waste; | ||
| 352 | jeb->wasted_size += waste; | ||
| 353 | c->wasted_size += waste; | ||
| 331 | 354 | ||
| 332 | jffs2_close_nextblock(c, jeb); | 355 | jffs2_close_nextblock(c, jeb); |
| 333 | jeb = NULL; | 356 | jeb = NULL; |
| @@ -349,7 +372,6 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uin | |||
| 349 | } | 372 | } |
| 350 | /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has | 373 | /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has |
| 351 | enough space */ | 374 | enough space */ |
| 352 | *ofs = jeb->offset + (c->sector_size - jeb->free_size); | ||
| 353 | *len = jeb->free_size - reserved_size; | 375 | *len = jeb->free_size - reserved_size; |
| 354 | 376 | ||
| 355 | if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size && | 377 | if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size && |
| @@ -365,7 +387,8 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uin | |||
| 365 | spin_lock(&c->erase_completion_lock); | 387 | spin_lock(&c->erase_completion_lock); |
| 366 | } | 388 | } |
| 367 | 389 | ||
| 368 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs)); | 390 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", |
| 391 | *len, jeb->offset + (c->sector_size - jeb->free_size))); | ||
| 369 | return 0; | 392 | return 0; |
| 370 | } | 393 | } |
| 371 | 394 | ||
| @@ -374,7 +397,6 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uin | |||
| 374 | * @c: superblock info | 397 | * @c: superblock info |
| 375 | * @new: new node reference to add | 398 | * @new: new node reference to add |
| 376 | * @len: length of this physical node | 399 | * @len: length of this physical node |
| 377 | * @dirty: dirty flag for new node | ||
| 378 | * | 400 | * |
| 379 | * Should only be used to report nodes for which space has been allocated | 401 | * Should only be used to report nodes for which space has been allocated |
| 380 | * by jffs2_reserve_space. | 402 | * by jffs2_reserve_space. |
| @@ -382,42 +404,30 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uin | |||
| 382 | * Must be called with the alloc_sem held. | 404 | * Must be called with the alloc_sem held. |
| 383 | */ | 405 | */ |
| 384 | 406 | ||
| 385 | int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new) | 407 | struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c, |
| 408 | uint32_t ofs, uint32_t len, | ||
| 409 | struct jffs2_inode_cache *ic) | ||
| 386 | { | 410 | { |
| 387 | struct jffs2_eraseblock *jeb; | 411 | struct jffs2_eraseblock *jeb; |
| 388 | uint32_t len; | 412 | struct jffs2_raw_node_ref *new; |
| 389 | 413 | ||
| 390 | jeb = &c->blocks[new->flash_offset / c->sector_size]; | 414 | jeb = &c->blocks[ofs / c->sector_size]; |
| 391 | len = ref_totlen(c, jeb, new); | ||
| 392 | 415 | ||
| 393 | D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len)); | 416 | D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", |
| 417 | ofs & ~3, ofs & 3, len)); | ||
| 394 | #if 1 | 418 | #if 1 |
| 395 | /* we could get some obsolete nodes after nextblock was refiled | 419 | /* Allow non-obsolete nodes only to be added at the end of c->nextblock, |
| 396 | in wbuf.c */ | 420 | if c->nextblock is set. Note that wbuf.c will file obsolete nodes |
| 397 | if ((c->nextblock || !ref_obsolete(new)) | 421 | even after refiling c->nextblock */ |
| 398 | &&(jeb != c->nextblock || ref_offset(new) != jeb->offset + (c->sector_size - jeb->free_size))) { | 422 | if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE)) |
| 423 | && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) { | ||
| 399 | printk(KERN_WARNING "argh. node added in wrong place\n"); | 424 | printk(KERN_WARNING "argh. node added in wrong place\n"); |
| 400 | jffs2_free_raw_node_ref(new); | 425 | return ERR_PTR(-EINVAL); |
| 401 | return -EINVAL; | ||
| 402 | } | 426 | } |
| 403 | #endif | 427 | #endif |
| 404 | spin_lock(&c->erase_completion_lock); | 428 | spin_lock(&c->erase_completion_lock); |
| 405 | 429 | ||
| 406 | if (!jeb->first_node) | 430 | new = jffs2_link_node_ref(c, jeb, ofs, len, ic); |
| 407 | jeb->first_node = new; | ||
| 408 | if (jeb->last_node) | ||
| 409 | jeb->last_node->next_phys = new; | ||
| 410 | jeb->last_node = new; | ||
| 411 | |||
| 412 | jeb->free_size -= len; | ||
| 413 | c->free_size -= len; | ||
| 414 | if (ref_obsolete(new)) { | ||
| 415 | jeb->dirty_size += len; | ||
| 416 | c->dirty_size += len; | ||
| 417 | } else { | ||
| 418 | jeb->used_size += len; | ||
| 419 | c->used_size += len; | ||
| 420 | } | ||
| 421 | 431 | ||
| 422 | if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) { | 432 | if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) { |
| 423 | /* If it lives on the dirty_list, jffs2_reserve_space will put it there */ | 433 | /* If it lives on the dirty_list, jffs2_reserve_space will put it there */ |
| @@ -438,7 +448,7 @@ int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_r | |||
| 438 | 448 | ||
| 439 | spin_unlock(&c->erase_completion_lock); | 449 | spin_unlock(&c->erase_completion_lock); |
| 440 | 450 | ||
| 441 | return 0; | 451 | return new; |
| 442 | } | 452 | } |
| 443 | 453 | ||
| 444 | 454 | ||
| @@ -470,8 +480,9 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
| 470 | struct jffs2_unknown_node n; | 480 | struct jffs2_unknown_node n; |
| 471 | int ret, addedsize; | 481 | int ret, addedsize; |
| 472 | size_t retlen; | 482 | size_t retlen; |
| 483 | uint32_t freed_len; | ||
| 473 | 484 | ||
| 474 | if(!ref) { | 485 | if(unlikely(!ref)) { |
| 475 | printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n"); | 486 | printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n"); |
| 476 | return; | 487 | return; |
| 477 | } | 488 | } |
| @@ -499,32 +510,34 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
| 499 | 510 | ||
| 500 | spin_lock(&c->erase_completion_lock); | 511 | spin_lock(&c->erase_completion_lock); |
| 501 | 512 | ||
| 513 | freed_len = ref_totlen(c, jeb, ref); | ||
| 514 | |||
| 502 | if (ref_flags(ref) == REF_UNCHECKED) { | 515 | if (ref_flags(ref) == REF_UNCHECKED) { |
| 503 | D1(if (unlikely(jeb->unchecked_size < ref_totlen(c, jeb, ref))) { | 516 | D1(if (unlikely(jeb->unchecked_size < freed_len)) { |
| 504 | printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n", | 517 | printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n", |
| 505 | ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size); | 518 | freed_len, blocknr, ref->flash_offset, jeb->used_size); |
| 506 | BUG(); | 519 | BUG(); |
| 507 | }) | 520 | }) |
| 508 | D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref))); | 521 | D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len)); |
| 509 | jeb->unchecked_size -= ref_totlen(c, jeb, ref); | 522 | jeb->unchecked_size -= freed_len; |
| 510 | c->unchecked_size -= ref_totlen(c, jeb, ref); | 523 | c->unchecked_size -= freed_len; |
| 511 | } else { | 524 | } else { |
| 512 | D1(if (unlikely(jeb->used_size < ref_totlen(c, jeb, ref))) { | 525 | D1(if (unlikely(jeb->used_size < freed_len)) { |
| 513 | printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n", | 526 | printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n", |
| 514 | ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size); | 527 | freed_len, blocknr, ref->flash_offset, jeb->used_size); |
| 515 | BUG(); | 528 | BUG(); |
| 516 | }) | 529 | }) |
| 517 | D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), ref_totlen(c, jeb, ref))); | 530 | D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len)); |
| 518 | jeb->used_size -= ref_totlen(c, jeb, ref); | 531 | jeb->used_size -= freed_len; |
| 519 | c->used_size -= ref_totlen(c, jeb, ref); | 532 | c->used_size -= freed_len; |
| 520 | } | 533 | } |
| 521 | 534 | ||
| 522 | // Take care, that wasted size is taken into concern | 535 | // Take care, that wasted size is taken into concern |
| 523 | if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref_totlen(c, jeb, ref))) && jeb != c->nextblock) { | 536 | if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) { |
| 524 | D1(printk(KERN_DEBUG "Dirtying\n")); | 537 | D1(printk("Dirtying\n")); |
| 525 | addedsize = ref_totlen(c, jeb, ref); | 538 | addedsize = freed_len; |
| 526 | jeb->dirty_size += ref_totlen(c, jeb, ref); | 539 | jeb->dirty_size += freed_len; |
| 527 | c->dirty_size += ref_totlen(c, jeb, ref); | 540 | c->dirty_size += freed_len; |
| 528 | 541 | ||
| 529 | /* Convert wasted space to dirty, if not a bad block */ | 542 | /* Convert wasted space to dirty, if not a bad block */ |
| 530 | if (jeb->wasted_size) { | 543 | if (jeb->wasted_size) { |
| @@ -543,10 +556,10 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
| 543 | } | 556 | } |
| 544 | } | 557 | } |
| 545 | } else { | 558 | } else { |
| 546 | D1(printk(KERN_DEBUG "Wasting\n")); | 559 | D1(printk("Wasting\n")); |
| 547 | addedsize = 0; | 560 | addedsize = 0; |
| 548 | jeb->wasted_size += ref_totlen(c, jeb, ref); | 561 | jeb->wasted_size += freed_len; |
| 549 | c->wasted_size += ref_totlen(c, jeb, ref); | 562 | c->wasted_size += freed_len; |
| 550 | } | 563 | } |
| 551 | ref->flash_offset = ref_offset(ref) | REF_OBSOLETE; | 564 | ref->flash_offset = ref_offset(ref) | REF_OBSOLETE; |
| 552 | 565 | ||
| @@ -622,7 +635,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
| 622 | /* The erase_free_sem is locked, and has been since before we marked the node obsolete | 635 | /* The erase_free_sem is locked, and has been since before we marked the node obsolete |
| 623 | and potentially put its eraseblock onto the erase_pending_list. Thus, we know that | 636 | and potentially put its eraseblock onto the erase_pending_list. Thus, we know that |
| 624 | the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet | 637 | the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet |
| 625 | by jffs2_free_all_node_refs() in erase.c. Which is nice. */ | 638 | by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */ |
| 626 | 639 | ||
| 627 | D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref))); | 640 | D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref))); |
| 628 | ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); | 641 | ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); |
| @@ -634,8 +647,8 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
| 634 | printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen); | 647 | printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen); |
| 635 | goto out_erase_sem; | 648 | goto out_erase_sem; |
| 636 | } | 649 | } |
| 637 | if (PAD(je32_to_cpu(n.totlen)) != PAD(ref_totlen(c, jeb, ref))) { | 650 | if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) { |
| 638 | printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref_totlen(c, jeb, ref)); | 651 | printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len); |
| 639 | goto out_erase_sem; | 652 | goto out_erase_sem; |
| 640 | } | 653 | } |
| 641 | if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) { | 654 | if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) { |
| @@ -671,6 +684,10 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
| 671 | spin_lock(&c->erase_completion_lock); | 684 | spin_lock(&c->erase_completion_lock); |
| 672 | 685 | ||
| 673 | ic = jffs2_raw_ref_to_ic(ref); | 686 | ic = jffs2_raw_ref_to_ic(ref); |
| 687 | /* It seems we should never call jffs2_mark_node_obsolete() for | ||
| 688 | XATTR nodes.... yet. Make sure we notice if/when we change | ||
| 689 | that :) */ | ||
| 690 | BUG_ON(ic->class != RAWNODE_CLASS_INODE_CACHE); | ||
| 674 | for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino)) | 691 | for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino)) |
| 675 | ; | 692 | ; |
| 676 | 693 | ||
| @@ -683,51 +700,6 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
| 683 | spin_unlock(&c->erase_completion_lock); | 700 | spin_unlock(&c->erase_completion_lock); |
| 684 | } | 701 | } |
| 685 | 702 | ||
| 686 | |||
| 687 | /* Merge with the next node in the physical list, if there is one | ||
| 688 | and if it's also obsolete and if it doesn't belong to any inode */ | ||
| 689 | if (ref->next_phys && ref_obsolete(ref->next_phys) && | ||
| 690 | !ref->next_phys->next_in_ino) { | ||
| 691 | struct jffs2_raw_node_ref *n = ref->next_phys; | ||
| 692 | |||
| 693 | spin_lock(&c->erase_completion_lock); | ||
| 694 | |||
| 695 | ref->__totlen += n->__totlen; | ||
| 696 | ref->next_phys = n->next_phys; | ||
| 697 | if (jeb->last_node == n) jeb->last_node = ref; | ||
| 698 | if (jeb->gc_node == n) { | ||
| 699 | /* gc will be happy continuing gc on this node */ | ||
| 700 | jeb->gc_node=ref; | ||
| 701 | } | ||
| 702 | spin_unlock(&c->erase_completion_lock); | ||
| 703 | |||
| 704 | jffs2_free_raw_node_ref(n); | ||
| 705 | } | ||
| 706 | |||
| 707 | /* Also merge with the previous node in the list, if there is one | ||
| 708 | and that one is obsolete */ | ||
| 709 | if (ref != jeb->first_node ) { | ||
| 710 | struct jffs2_raw_node_ref *p = jeb->first_node; | ||
| 711 | |||
| 712 | spin_lock(&c->erase_completion_lock); | ||
| 713 | |||
| 714 | while (p->next_phys != ref) | ||
| 715 | p = p->next_phys; | ||
| 716 | |||
| 717 | if (ref_obsolete(p) && !ref->next_in_ino) { | ||
| 718 | p->__totlen += ref->__totlen; | ||
| 719 | if (jeb->last_node == ref) { | ||
| 720 | jeb->last_node = p; | ||
| 721 | } | ||
| 722 | if (jeb->gc_node == ref) { | ||
| 723 | /* gc will be happy continuing gc on this node */ | ||
| 724 | jeb->gc_node=p; | ||
| 725 | } | ||
| 726 | p->next_phys = ref->next_phys; | ||
| 727 | jffs2_free_raw_node_ref(ref); | ||
| 728 | } | ||
| 729 | spin_unlock(&c->erase_completion_lock); | ||
| 730 | } | ||
| 731 | out_erase_sem: | 703 | out_erase_sem: |
| 732 | up(&c->erase_free_sem); | 704 | up(&c->erase_free_sem); |
| 733 | } | 705 | } |
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h index d307cf548625..cd4021bcb944 100644 --- a/fs/jffs2/os-linux.h +++ b/fs/jffs2/os-linux.h | |||
| @@ -31,9 +31,7 @@ struct kvec; | |||
| 31 | #define JFFS2_F_I_MODE(f) (OFNI_EDONI_2SFFJ(f)->i_mode) | 31 | #define JFFS2_F_I_MODE(f) (OFNI_EDONI_2SFFJ(f)->i_mode) |
| 32 | #define JFFS2_F_I_UID(f) (OFNI_EDONI_2SFFJ(f)->i_uid) | 32 | #define JFFS2_F_I_UID(f) (OFNI_EDONI_2SFFJ(f)->i_uid) |
| 33 | #define JFFS2_F_I_GID(f) (OFNI_EDONI_2SFFJ(f)->i_gid) | 33 | #define JFFS2_F_I_GID(f) (OFNI_EDONI_2SFFJ(f)->i_gid) |
| 34 | 34 | #define JFFS2_F_I_RDEV(f) (OFNI_EDONI_2SFFJ(f)->i_rdev) | |
| 35 | #define JFFS2_F_I_RDEV_MIN(f) (iminor(OFNI_EDONI_2SFFJ(f))) | ||
| 36 | #define JFFS2_F_I_RDEV_MAJ(f) (imajor(OFNI_EDONI_2SFFJ(f))) | ||
| 37 | 35 | ||
| 38 | #define ITIME(sec) ((struct timespec){sec, 0}) | 36 | #define ITIME(sec) ((struct timespec){sec, 0}) |
| 39 | #define I_SEC(tv) ((tv).tv_sec) | 37 | #define I_SEC(tv) ((tv).tv_sec) |
| @@ -60,6 +58,10 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f) | |||
| 60 | f->target = NULL; | 58 | f->target = NULL; |
| 61 | f->flags = 0; | 59 | f->flags = 0; |
| 62 | f->usercompr = 0; | 60 | f->usercompr = 0; |
| 61 | #ifdef CONFIG_JFFS2_FS_POSIX_ACL | ||
| 62 | f->i_acl_access = JFFS2_ACL_NOT_CACHED; | ||
| 63 | f->i_acl_default = JFFS2_ACL_NOT_CACHED; | ||
| 64 | #endif | ||
| 63 | } | 65 | } |
| 64 | 66 | ||
| 65 | 67 | ||
| @@ -90,13 +92,10 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f) | |||
| 90 | #define jffs2_flash_writev(a,b,c,d,e,f) jffs2_flash_direct_writev(a,b,c,d,e) | 92 | #define jffs2_flash_writev(a,b,c,d,e,f) jffs2_flash_direct_writev(a,b,c,d,e) |
| 91 | #define jffs2_wbuf_timeout NULL | 93 | #define jffs2_wbuf_timeout NULL |
| 92 | #define jffs2_wbuf_process NULL | 94 | #define jffs2_wbuf_process NULL |
| 93 | #define jffs2_nor_ecc(c) (0) | ||
| 94 | #define jffs2_dataflash(c) (0) | 95 | #define jffs2_dataflash(c) (0) |
| 95 | #define jffs2_nor_wbuf_flash(c) (0) | ||
| 96 | #define jffs2_nor_ecc_flash_setup(c) (0) | ||
| 97 | #define jffs2_nor_ecc_flash_cleanup(c) do {} while (0) | ||
| 98 | #define jffs2_dataflash_setup(c) (0) | 96 | #define jffs2_dataflash_setup(c) (0) |
| 99 | #define jffs2_dataflash_cleanup(c) do {} while (0) | 97 | #define jffs2_dataflash_cleanup(c) do {} while (0) |
| 98 | #define jffs2_nor_wbuf_flash(c) (0) | ||
| 100 | #define jffs2_nor_wbuf_flash_setup(c) (0) | 99 | #define jffs2_nor_wbuf_flash_setup(c) (0) |
| 101 | #define jffs2_nor_wbuf_flash_cleanup(c) do {} while (0) | 100 | #define jffs2_nor_wbuf_flash_cleanup(c) do {} while (0) |
| 102 | 101 | ||
| @@ -107,9 +106,7 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f) | |||
| 107 | #ifdef CONFIG_JFFS2_SUMMARY | 106 | #ifdef CONFIG_JFFS2_SUMMARY |
| 108 | #define jffs2_can_mark_obsolete(c) (0) | 107 | #define jffs2_can_mark_obsolete(c) (0) |
| 109 | #else | 108 | #else |
| 110 | #define jffs2_can_mark_obsolete(c) \ | 109 | #define jffs2_can_mark_obsolete(c) (c->mtd->flags & (MTD_BIT_WRITEABLE)) |
| 111 | ((c->mtd->type == MTD_NORFLASH && !(c->mtd->flags & (MTD_ECC|MTD_PROGRAM_REGIONS))) || \ | ||
| 112 | c->mtd->type == MTD_RAM) | ||
| 113 | #endif | 110 | #endif |
| 114 | 111 | ||
| 115 | #define jffs2_cleanmarker_oob(c) (c->mtd->type == MTD_NANDFLASH) | 112 | #define jffs2_cleanmarker_oob(c) (c->mtd->type == MTD_NANDFLASH) |
| @@ -133,15 +130,11 @@ int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c); | |||
| 133 | int jffs2_nand_flash_setup(struct jffs2_sb_info *c); | 130 | int jffs2_nand_flash_setup(struct jffs2_sb_info *c); |
| 134 | void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c); | 131 | void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c); |
| 135 | 132 | ||
| 136 | #define jffs2_nor_ecc(c) (c->mtd->type == MTD_NORFLASH && (c->mtd->flags & MTD_ECC)) | ||
| 137 | int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c); | ||
| 138 | void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c); | ||
| 139 | |||
| 140 | #define jffs2_dataflash(c) (c->mtd->type == MTD_DATAFLASH) | 133 | #define jffs2_dataflash(c) (c->mtd->type == MTD_DATAFLASH) |
| 141 | int jffs2_dataflash_setup(struct jffs2_sb_info *c); | 134 | int jffs2_dataflash_setup(struct jffs2_sb_info *c); |
| 142 | void jffs2_dataflash_cleanup(struct jffs2_sb_info *c); | 135 | void jffs2_dataflash_cleanup(struct jffs2_sb_info *c); |
| 143 | 136 | ||
| 144 | #define jffs2_nor_wbuf_flash(c) (c->mtd->type == MTD_NORFLASH && (c->mtd->flags & MTD_PROGRAM_REGIONS)) | 137 | #define jffs2_nor_wbuf_flash(c) (c->mtd->type == MTD_NORFLASH && ! (c->mtd->flags & MTD_BIT_WRITEABLE)) |
| 145 | int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c); | 138 | int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c); |
| 146 | void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c); | 139 | void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c); |
| 147 | 140 | ||
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index f1695642d0f7..5fec012b02ed 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c | |||
| @@ -66,7 +66,7 @@ static void jffs2_free_tmp_dnode_info_list(struct rb_root *list) | |||
| 66 | jffs2_free_full_dnode(tn->fn); | 66 | jffs2_free_full_dnode(tn->fn); |
| 67 | jffs2_free_tmp_dnode_info(tn); | 67 | jffs2_free_tmp_dnode_info(tn); |
| 68 | 68 | ||
| 69 | this = this->rb_parent; | 69 | this = rb_parent(this); |
| 70 | if (!this) | 70 | if (!this) |
| 71 | break; | 71 | break; |
| 72 | 72 | ||
| @@ -116,19 +116,42 @@ static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_r | |||
| 116 | uint32_t *latest_mctime, uint32_t *mctime_ver) | 116 | uint32_t *latest_mctime, uint32_t *mctime_ver) |
| 117 | { | 117 | { |
| 118 | struct jffs2_full_dirent *fd; | 118 | struct jffs2_full_dirent *fd; |
| 119 | uint32_t crc; | ||
| 119 | 120 | ||
| 120 | /* The direntry nodes are checked during the flash scanning */ | ||
| 121 | BUG_ON(ref_flags(ref) == REF_UNCHECKED); | ||
| 122 | /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ | 121 | /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ |
| 123 | BUG_ON(ref_obsolete(ref)); | 122 | BUG_ON(ref_obsolete(ref)); |
| 124 | 123 | ||
| 125 | /* Sanity check */ | 124 | crc = crc32(0, rd, sizeof(*rd) - 8); |
| 126 | if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) { | 125 | if (unlikely(crc != je32_to_cpu(rd->node_crc))) { |
| 127 | JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n", | 126 | JFFS2_NOTICE("header CRC failed on dirent node at %#08x: read %#08x, calculated %#08x\n", |
| 128 | ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen)); | 127 | ref_offset(ref), je32_to_cpu(rd->node_crc), crc); |
| 129 | return 1; | 128 | return 1; |
| 130 | } | 129 | } |
| 131 | 130 | ||
| 131 | /* If we've never checked the CRCs on this node, check them now */ | ||
| 132 | if (ref_flags(ref) == REF_UNCHECKED) { | ||
| 133 | struct jffs2_eraseblock *jeb; | ||
| 134 | int len; | ||
| 135 | |||
| 136 | /* Sanity check */ | ||
| 137 | if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) { | ||
| 138 | JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n", | ||
| 139 | ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen)); | ||
| 140 | return 1; | ||
| 141 | } | ||
| 142 | |||
| 143 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; | ||
| 144 | len = ref_totlen(c, jeb, ref); | ||
| 145 | |||
| 146 | spin_lock(&c->erase_completion_lock); | ||
| 147 | jeb->used_size += len; | ||
| 148 | jeb->unchecked_size -= len; | ||
| 149 | c->used_size += len; | ||
| 150 | c->unchecked_size -= len; | ||
| 151 | ref->flash_offset = ref_offset(ref) | REF_PRISTINE; | ||
| 152 | spin_unlock(&c->erase_completion_lock); | ||
| 153 | } | ||
| 154 | |||
| 132 | fd = jffs2_alloc_full_dirent(rd->nsize + 1); | 155 | fd = jffs2_alloc_full_dirent(rd->nsize + 1); |
| 133 | if (unlikely(!fd)) | 156 | if (unlikely(!fd)) |
| 134 | return -ENOMEM; | 157 | return -ENOMEM; |
| @@ -198,13 +221,21 @@ static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
| 198 | struct jffs2_tmp_dnode_info *tn; | 221 | struct jffs2_tmp_dnode_info *tn; |
| 199 | uint32_t len, csize; | 222 | uint32_t len, csize; |
| 200 | int ret = 1; | 223 | int ret = 1; |
| 224 | uint32_t crc; | ||
| 201 | 225 | ||
| 202 | /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ | 226 | /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ |
| 203 | BUG_ON(ref_obsolete(ref)); | 227 | BUG_ON(ref_obsolete(ref)); |
| 204 | 228 | ||
| 229 | crc = crc32(0, rd, sizeof(*rd) - 8); | ||
| 230 | if (unlikely(crc != je32_to_cpu(rd->node_crc))) { | ||
| 231 | JFFS2_NOTICE("node CRC failed on dnode at %#08x: read %#08x, calculated %#08x\n", | ||
| 232 | ref_offset(ref), je32_to_cpu(rd->node_crc), crc); | ||
| 233 | return 1; | ||
| 234 | } | ||
| 235 | |||
| 205 | tn = jffs2_alloc_tmp_dnode_info(); | 236 | tn = jffs2_alloc_tmp_dnode_info(); |
| 206 | if (!tn) { | 237 | if (!tn) { |
| 207 | JFFS2_ERROR("failed to allocate tn (%d bytes).\n", sizeof(*tn)); | 238 | JFFS2_ERROR("failed to allocate tn (%zu bytes).\n", sizeof(*tn)); |
| 208 | return -ENOMEM; | 239 | return -ENOMEM; |
| 209 | } | 240 | } |
| 210 | 241 | ||
| @@ -213,14 +244,6 @@ static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
| 213 | 244 | ||
| 214 | /* If we've never checked the CRCs on this node, check them now */ | 245 | /* If we've never checked the CRCs on this node, check them now */ |
| 215 | if (ref_flags(ref) == REF_UNCHECKED) { | 246 | if (ref_flags(ref) == REF_UNCHECKED) { |
| 216 | uint32_t crc; | ||
| 217 | |||
| 218 | crc = crc32(0, rd, sizeof(*rd) - 8); | ||
| 219 | if (unlikely(crc != je32_to_cpu(rd->node_crc))) { | ||
| 220 | JFFS2_NOTICE("header CRC failed on node at %#08x: read %#08x, calculated %#08x\n", | ||
| 221 | ref_offset(ref), je32_to_cpu(rd->node_crc), crc); | ||
| 222 | goto free_out; | ||
| 223 | } | ||
| 224 | 247 | ||
| 225 | /* Sanity checks */ | 248 | /* Sanity checks */ |
| 226 | if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) || | 249 | if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) || |
| @@ -343,7 +366,7 @@ free_out: | |||
| 343 | * Helper function for jffs2_get_inode_nodes(). | 366 | * Helper function for jffs2_get_inode_nodes(). |
| 344 | * It is called every time an unknown node is found. | 367 | * It is called every time an unknown node is found. |
| 345 | * | 368 | * |
| 346 | * Returns: 0 on succes; | 369 | * Returns: 0 on success; |
| 347 | * 1 if the node should be marked obsolete; | 370 | * 1 if the node should be marked obsolete; |
| 348 | * negative error code on failure. | 371 | * negative error code on failure. |
| 349 | */ | 372 | */ |
| @@ -354,37 +377,30 @@ static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_re | |||
| 354 | 377 | ||
| 355 | un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype)); | 378 | un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype)); |
| 356 | 379 | ||
| 357 | if (crc32(0, un, sizeof(struct jffs2_unknown_node) - 4) != je32_to_cpu(un->hdr_crc)) { | 380 | switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) { |
| 358 | /* Hmmm. This should have been caught at scan time. */ | ||
| 359 | JFFS2_NOTICE("node header CRC failed at %#08x. But it must have been OK earlier.\n", ref_offset(ref)); | ||
| 360 | jffs2_dbg_dump_node(c, ref_offset(ref)); | ||
| 361 | return 1; | ||
| 362 | } else { | ||
| 363 | switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) { | ||
| 364 | 381 | ||
| 365 | case JFFS2_FEATURE_INCOMPAT: | 382 | case JFFS2_FEATURE_INCOMPAT: |
| 366 | JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n", | 383 | JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n", |
| 367 | je16_to_cpu(un->nodetype), ref_offset(ref)); | 384 | je16_to_cpu(un->nodetype), ref_offset(ref)); |
| 368 | /* EEP */ | 385 | /* EEP */ |
| 369 | BUG(); | 386 | BUG(); |
| 370 | break; | 387 | break; |
| 371 | 388 | ||
| 372 | case JFFS2_FEATURE_ROCOMPAT: | 389 | case JFFS2_FEATURE_ROCOMPAT: |
| 373 | JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n", | 390 | JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n", |
| 374 | je16_to_cpu(un->nodetype), ref_offset(ref)); | 391 | je16_to_cpu(un->nodetype), ref_offset(ref)); |
| 375 | BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO)); | 392 | BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO)); |
| 376 | break; | 393 | break; |
| 377 | 394 | ||
| 378 | case JFFS2_FEATURE_RWCOMPAT_COPY: | 395 | case JFFS2_FEATURE_RWCOMPAT_COPY: |
| 379 | JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n", | 396 | JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n", |
| 380 | je16_to_cpu(un->nodetype), ref_offset(ref)); | 397 | je16_to_cpu(un->nodetype), ref_offset(ref)); |
| 381 | break; | 398 | break; |
| 382 | 399 | ||
| 383 | case JFFS2_FEATURE_RWCOMPAT_DELETE: | 400 | case JFFS2_FEATURE_RWCOMPAT_DELETE: |
| 384 | JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n", | 401 | JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n", |
| 385 | je16_to_cpu(un->nodetype), ref_offset(ref)); | 402 | je16_to_cpu(un->nodetype), ref_offset(ref)); |
| 386 | return 1; | 403 | return 1; |
| 387 | } | ||
| 388 | } | 404 | } |
| 389 | 405 | ||
| 390 | return 0; | 406 | return 0; |
| @@ -434,7 +450,7 @@ static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, | |||
| 434 | } | 450 | } |
| 435 | 451 | ||
| 436 | if (retlen < len) { | 452 | if (retlen < len) { |
| 437 | JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", | 453 | JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n", |
| 438 | offs, retlen, len); | 454 | offs, retlen, len); |
| 439 | return -EIO; | 455 | return -EIO; |
| 440 | } | 456 | } |
| @@ -542,13 +558,25 @@ static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_inf | |||
| 542 | } | 558 | } |
| 543 | 559 | ||
| 544 | if (retlen < len) { | 560 | if (retlen < len) { |
| 545 | JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ref_offset(ref), retlen, len); | 561 | JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n", ref_offset(ref), retlen, len); |
| 546 | err = -EIO; | 562 | err = -EIO; |
| 547 | goto free_out; | 563 | goto free_out; |
| 548 | } | 564 | } |
| 549 | 565 | ||
| 550 | node = (union jffs2_node_union *)bufstart; | 566 | node = (union jffs2_node_union *)bufstart; |
| 551 | 567 | ||
| 568 | /* No need to mask in the valid bit; it shouldn't be invalid */ | ||
| 569 | if (je32_to_cpu(node->u.hdr_crc) != crc32(0, node, sizeof(node->u)-4)) { | ||
| 570 | JFFS2_NOTICE("Node header CRC failed at %#08x. {%04x,%04x,%08x,%08x}\n", | ||
| 571 | ref_offset(ref), je16_to_cpu(node->u.magic), | ||
| 572 | je16_to_cpu(node->u.nodetype), | ||
| 573 | je32_to_cpu(node->u.totlen), | ||
| 574 | je32_to_cpu(node->u.hdr_crc)); | ||
| 575 | jffs2_dbg_dump_node(c, ref_offset(ref)); | ||
| 576 | jffs2_mark_node_obsolete(c, ref); | ||
| 577 | goto cont; | ||
| 578 | } | ||
| 579 | |||
| 552 | switch (je16_to_cpu(node->u.nodetype)) { | 580 | switch (je16_to_cpu(node->u.nodetype)) { |
| 553 | 581 | ||
| 554 | case JFFS2_NODETYPE_DIRENT: | 582 | case JFFS2_NODETYPE_DIRENT: |
| @@ -606,6 +634,7 @@ static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_inf | |||
| 606 | goto free_out; | 634 | goto free_out; |
| 607 | 635 | ||
| 608 | } | 636 | } |
| 637 | cont: | ||
| 609 | spin_lock(&c->erase_completion_lock); | 638 | spin_lock(&c->erase_completion_lock); |
| 610 | } | 639 | } |
| 611 | 640 | ||
| @@ -679,12 +708,12 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
| 679 | jffs2_mark_node_obsolete(c, fn->raw); | 708 | jffs2_mark_node_obsolete(c, fn->raw); |
| 680 | 709 | ||
| 681 | BUG_ON(rb->rb_left); | 710 | BUG_ON(rb->rb_left); |
| 682 | if (rb->rb_parent && rb->rb_parent->rb_left == rb) { | 711 | if (rb_parent(rb) && rb_parent(rb)->rb_left == rb) { |
| 683 | /* We were then left-hand child of our parent. We need | 712 | /* We were then left-hand child of our parent. We need |
| 684 | * to move our own right-hand child into our place. */ | 713 | * to move our own right-hand child into our place. */ |
| 685 | repl_rb = rb->rb_right; | 714 | repl_rb = rb->rb_right; |
| 686 | if (repl_rb) | 715 | if (repl_rb) |
| 687 | repl_rb->rb_parent = rb->rb_parent; | 716 | rb_set_parent(repl_rb, rb_parent(rb)); |
| 688 | } else | 717 | } else |
| 689 | repl_rb = NULL; | 718 | repl_rb = NULL; |
| 690 | 719 | ||
| @@ -692,14 +721,14 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, | |||
| 692 | 721 | ||
| 693 | /* Remove the spent tn from the tree; don't bother rebalancing | 722 | /* Remove the spent tn from the tree; don't bother rebalancing |
| 694 | * but put our right-hand child in our own place. */ | 723 | * but put our right-hand child in our own place. */ |
| 695 | if (tn->rb.rb_parent) { | 724 | if (rb_parent(&tn->rb)) { |
| 696 | if (tn->rb.rb_parent->rb_left == &tn->rb) | 725 | if (rb_parent(&tn->rb)->rb_left == &tn->rb) |
| 697 | tn->rb.rb_parent->rb_left = repl_rb; | 726 | rb_parent(&tn->rb)->rb_left = repl_rb; |
| 698 | else if (tn->rb.rb_parent->rb_right == &tn->rb) | 727 | else if (rb_parent(&tn->rb)->rb_right == &tn->rb) |
| 699 | tn->rb.rb_parent->rb_right = repl_rb; | 728 | rb_parent(&tn->rb)->rb_right = repl_rb; |
| 700 | else BUG(); | 729 | else BUG(); |
| 701 | } else if (tn->rb.rb_right) | 730 | } else if (tn->rb.rb_right) |
| 702 | tn->rb.rb_right->rb_parent = NULL; | 731 | rb_set_parent(tn->rb.rb_right, NULL); |
| 703 | 732 | ||
| 704 | jffs2_free_tmp_dnode_info(tn); | 733 | jffs2_free_tmp_dnode_info(tn); |
| 705 | if (ret) { | 734 | if (ret) { |
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c index cf55b221fc2b..61618080b86f 100644 --- a/fs/jffs2/scan.c +++ b/fs/jffs2/scan.c | |||
| @@ -65,6 +65,28 @@ static inline uint32_t EMPTY_SCAN_SIZE(uint32_t sector_size) { | |||
| 65 | return DEFAULT_EMPTY_SCAN_SIZE; | 65 | return DEFAULT_EMPTY_SCAN_SIZE; |
| 66 | } | 66 | } |
| 67 | 67 | ||
| 68 | static int file_dirty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | ||
| 69 | { | ||
| 70 | int ret; | ||
| 71 | |||
| 72 | if ((ret = jffs2_prealloc_raw_node_refs(c, jeb, 1))) | ||
| 73 | return ret; | ||
| 74 | if ((ret = jffs2_scan_dirty_space(c, jeb, jeb->free_size))) | ||
| 75 | return ret; | ||
| 76 | /* Turned wasted size into dirty, since we apparently | ||
| 77 | think it's recoverable now. */ | ||
| 78 | jeb->dirty_size += jeb->wasted_size; | ||
| 79 | c->dirty_size += jeb->wasted_size; | ||
| 80 | c->wasted_size -= jeb->wasted_size; | ||
| 81 | jeb->wasted_size = 0; | ||
| 82 | if (VERYDIRTY(c, jeb->dirty_size)) { | ||
| 83 | list_add(&jeb->list, &c->very_dirty_list); | ||
| 84 | } else { | ||
| 85 | list_add(&jeb->list, &c->dirty_list); | ||
| 86 | } | ||
| 87 | return 0; | ||
| 88 | } | ||
| 89 | |||
| 68 | int jffs2_scan_medium(struct jffs2_sb_info *c) | 90 | int jffs2_scan_medium(struct jffs2_sb_info *c) |
| 69 | { | 91 | { |
| 70 | int i, ret; | 92 | int i, ret; |
| @@ -170,34 +192,20 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
| 170 | (!c->nextblock || c->nextblock->free_size < jeb->free_size)) { | 192 | (!c->nextblock || c->nextblock->free_size < jeb->free_size)) { |
| 171 | /* Better candidate for the next writes to go to */ | 193 | /* Better candidate for the next writes to go to */ |
| 172 | if (c->nextblock) { | 194 | if (c->nextblock) { |
| 173 | c->nextblock->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size; | 195 | ret = file_dirty(c, c->nextblock); |
| 174 | c->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size; | 196 | if (ret) |
| 175 | c->free_size -= c->nextblock->free_size; | 197 | return ret; |
| 176 | c->wasted_size -= c->nextblock->wasted_size; | ||
| 177 | c->nextblock->free_size = c->nextblock->wasted_size = 0; | ||
| 178 | if (VERYDIRTY(c, c->nextblock->dirty_size)) { | ||
| 179 | list_add(&c->nextblock->list, &c->very_dirty_list); | ||
| 180 | } else { | ||
| 181 | list_add(&c->nextblock->list, &c->dirty_list); | ||
| 182 | } | ||
| 183 | /* deleting summary information of the old nextblock */ | 198 | /* deleting summary information of the old nextblock */ |
| 184 | jffs2_sum_reset_collected(c->summary); | 199 | jffs2_sum_reset_collected(c->summary); |
| 185 | } | 200 | } |
| 186 | /* update collected summary infromation for the current nextblock */ | 201 | /* update collected summary information for the current nextblock */ |
| 187 | jffs2_sum_move_collected(c, s); | 202 | jffs2_sum_move_collected(c, s); |
| 188 | D1(printk(KERN_DEBUG "jffs2_scan_medium(): new nextblock = 0x%08x\n", jeb->offset)); | 203 | D1(printk(KERN_DEBUG "jffs2_scan_medium(): new nextblock = 0x%08x\n", jeb->offset)); |
| 189 | c->nextblock = jeb; | 204 | c->nextblock = jeb; |
| 190 | } else { | 205 | } else { |
| 191 | jeb->dirty_size += jeb->free_size + jeb->wasted_size; | 206 | ret = file_dirty(c, jeb); |
| 192 | c->dirty_size += jeb->free_size + jeb->wasted_size; | 207 | if (ret) |
| 193 | c->free_size -= jeb->free_size; | 208 | return ret; |
| 194 | c->wasted_size -= jeb->wasted_size; | ||
| 195 | jeb->free_size = jeb->wasted_size = 0; | ||
| 196 | if (VERYDIRTY(c, jeb->dirty_size)) { | ||
| 197 | list_add(&jeb->list, &c->very_dirty_list); | ||
| 198 | } else { | ||
| 199 | list_add(&jeb->list, &c->dirty_list); | ||
| 200 | } | ||
| 201 | } | 209 | } |
| 202 | break; | 210 | break; |
| 203 | 211 | ||
| @@ -222,9 +230,6 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
| 222 | } | 230 | } |
| 223 | } | 231 | } |
| 224 | 232 | ||
| 225 | if (jffs2_sum_active() && s) | ||
| 226 | kfree(s); | ||
| 227 | |||
| 228 | /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */ | 233 | /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */ |
| 229 | if (c->nextblock && (c->nextblock->dirty_size)) { | 234 | if (c->nextblock && (c->nextblock->dirty_size)) { |
| 230 | c->nextblock->wasted_size += c->nextblock->dirty_size; | 235 | c->nextblock->wasted_size += c->nextblock->dirty_size; |
| @@ -242,11 +247,8 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
| 242 | 247 | ||
| 243 | D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n", | 248 | D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n", |
| 244 | skip)); | 249 | skip)); |
| 245 | c->nextblock->wasted_size += skip; | 250 | jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); |
| 246 | c->wasted_size += skip; | 251 | jffs2_scan_dirty_space(c, c->nextblock, skip); |
| 247 | |||
| 248 | c->nextblock->free_size -= skip; | ||
| 249 | c->free_size -= skip; | ||
| 250 | } | 252 | } |
| 251 | #endif | 253 | #endif |
| 252 | if (c->nr_erasing_blocks) { | 254 | if (c->nr_erasing_blocks) { |
| @@ -266,6 +268,9 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
| 266 | else | 268 | else |
| 267 | c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size); | 269 | c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size); |
| 268 | #endif | 270 | #endif |
| 271 | if (s) | ||
| 272 | kfree(s); | ||
| 273 | |||
| 269 | return ret; | 274 | return ret; |
| 270 | } | 275 | } |
| 271 | 276 | ||
| @@ -290,7 +295,7 @@ int jffs2_fill_scan_buf (struct jffs2_sb_info *c, void *buf, | |||
| 290 | int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | 295 | int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) |
| 291 | { | 296 | { |
| 292 | if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size | 297 | if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size |
| 293 | && (!jeb->first_node || !jeb->first_node->next_phys) ) | 298 | && (!jeb->first_node || !ref_next(jeb->first_node)) ) |
| 294 | return BLK_STATE_CLEANMARKER; | 299 | return BLK_STATE_CLEANMARKER; |
| 295 | 300 | ||
| 296 | /* move blocks with max 4 byte dirty space to cleanlist */ | 301 | /* move blocks with max 4 byte dirty space to cleanlist */ |
| @@ -306,11 +311,119 @@ int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *je | |||
| 306 | return BLK_STATE_ALLDIRTY; | 311 | return BLK_STATE_ALLDIRTY; |
| 307 | } | 312 | } |
| 308 | 313 | ||
| 314 | #ifdef CONFIG_JFFS2_FS_XATTR | ||
| 315 | static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
| 316 | struct jffs2_raw_xattr *rx, uint32_t ofs, | ||
| 317 | struct jffs2_summary *s) | ||
| 318 | { | ||
| 319 | struct jffs2_xattr_datum *xd; | ||
| 320 | uint32_t totlen, crc; | ||
| 321 | int err; | ||
| 322 | |||
| 323 | crc = crc32(0, rx, sizeof(struct jffs2_raw_xattr) - 4); | ||
| 324 | if (crc != je32_to_cpu(rx->node_crc)) { | ||
| 325 | if (je32_to_cpu(rx->node_crc) != 0xffffffff) | ||
| 326 | JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", | ||
| 327 | ofs, je32_to_cpu(rx->node_crc), crc); | ||
| 328 | if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen)))) | ||
| 329 | return err; | ||
| 330 | return 0; | ||
| 331 | } | ||
| 332 | |||
| 333 | totlen = PAD(sizeof(*rx) + rx->name_len + 1 + je16_to_cpu(rx->value_len)); | ||
| 334 | if (totlen != je32_to_cpu(rx->totlen)) { | ||
| 335 | JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%u\n", | ||
| 336 | ofs, je32_to_cpu(rx->totlen), totlen); | ||
| 337 | if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen)))) | ||
| 338 | return err; | ||
| 339 | return 0; | ||
| 340 | } | ||
| 341 | |||
| 342 | xd = jffs2_setup_xattr_datum(c, je32_to_cpu(rx->xid), je32_to_cpu(rx->version)); | ||
| 343 | if (IS_ERR(xd)) { | ||
| 344 | if (PTR_ERR(xd) == -EEXIST) { | ||
| 345 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rx->totlen))))) | ||
| 346 | return err; | ||
| 347 | return 0; | ||
| 348 | } | ||
| 349 | return PTR_ERR(xd); | ||
| 350 | } | ||
| 351 | xd->xprefix = rx->xprefix; | ||
| 352 | xd->name_len = rx->name_len; | ||
| 353 | xd->value_len = je16_to_cpu(rx->value_len); | ||
| 354 | xd->data_crc = je32_to_cpu(rx->data_crc); | ||
| 355 | |||
| 356 | xd->node = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, NULL); | ||
| 357 | /* FIXME */ xd->node->next_in_ino = (void *)xd; | ||
| 358 | |||
| 359 | if (jffs2_sum_active()) | ||
| 360 | jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset); | ||
| 361 | dbg_xattr("scaning xdatum at %#08x (xid=%u, version=%u)\n", | ||
| 362 | ofs, xd->xid, xd->version); | ||
| 363 | return 0; | ||
| 364 | } | ||
| 365 | |||
| 366 | static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | ||
| 367 | struct jffs2_raw_xref *rr, uint32_t ofs, | ||
| 368 | struct jffs2_summary *s) | ||
| 369 | { | ||
| 370 | struct jffs2_xattr_ref *ref; | ||
| 371 | uint32_t crc; | ||
| 372 | int err; | ||
| 373 | |||
| 374 | crc = crc32(0, rr, sizeof(*rr) - 4); | ||
| 375 | if (crc != je32_to_cpu(rr->node_crc)) { | ||
| 376 | if (je32_to_cpu(rr->node_crc) != 0xffffffff) | ||
| 377 | JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", | ||
| 378 | ofs, je32_to_cpu(rr->node_crc), crc); | ||
| 379 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rr->totlen))))) | ||
| 380 | return err; | ||
| 381 | return 0; | ||
| 382 | } | ||
| 383 | |||
| 384 | if (PAD(sizeof(struct jffs2_raw_xref)) != je32_to_cpu(rr->totlen)) { | ||
| 385 | JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%zd\n", | ||
| 386 | ofs, je32_to_cpu(rr->totlen), | ||
| 387 | PAD(sizeof(struct jffs2_raw_xref))); | ||
| 388 | if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rr->totlen)))) | ||
| 389 | return err; | ||
| 390 | return 0; | ||
| 391 | } | ||
| 392 | |||
| 393 | ref = jffs2_alloc_xattr_ref(); | ||
| 394 | if (!ref) | ||
| 395 | return -ENOMEM; | ||
| 396 | |||
| 397 | /* BEFORE jffs2_build_xattr_subsystem() called, | ||
| 398 | * ref->xid is used to store 32bit xid, xd is not used | ||
| 399 | * ref->ino is used to store 32bit inode-number, ic is not used | ||
| 400 | * Thoes variables are declared as union, thus using those | ||
| 401 | * are exclusive. In a similar way, ref->next is temporarily | ||
| 402 | * used to chain all xattr_ref object. It's re-chained to | ||
| 403 | * jffs2_inode_cache in jffs2_build_xattr_subsystem() correctly. | ||
| 404 | */ | ||
| 405 | ref->ino = je32_to_cpu(rr->ino); | ||
| 406 | ref->xid = je32_to_cpu(rr->xid); | ||
| 407 | ref->next = c->xref_temp; | ||
| 408 | c->xref_temp = ref; | ||
| 409 | |||
| 410 | ref->node = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), NULL); | ||
| 411 | /* FIXME */ ref->node->next_in_ino = (void *)ref; | ||
| 412 | |||
| 413 | if (jffs2_sum_active()) | ||
| 414 | jffs2_sum_add_xref_mem(s, rr, ofs - jeb->offset); | ||
| 415 | dbg_xattr("scan xref at %#08x (xid=%u, ino=%u)\n", | ||
| 416 | ofs, ref->xid, ref->ino); | ||
| 417 | return 0; | ||
| 418 | } | ||
| 419 | #endif | ||
| 420 | |||
| 421 | /* Called with 'buf_size == 0' if buf is in fact a pointer _directly_ into | ||
| 422 | the flash, XIP-style */ | ||
| 309 | static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 423 | static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
| 310 | unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s) { | 424 | unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s) { |
| 311 | struct jffs2_unknown_node *node; | 425 | struct jffs2_unknown_node *node; |
| 312 | struct jffs2_unknown_node crcnode; | 426 | struct jffs2_unknown_node crcnode; |
| 313 | struct jffs2_sum_marker *sm; | ||
| 314 | uint32_t ofs, prevofs; | 427 | uint32_t ofs, prevofs; |
| 315 | uint32_t hdr_crc, buf_ofs, buf_len; | 428 | uint32_t hdr_crc, buf_ofs, buf_len; |
| 316 | int err; | 429 | int err; |
| @@ -344,44 +457,75 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
| 344 | #endif | 457 | #endif |
| 345 | 458 | ||
| 346 | if (jffs2_sum_active()) { | 459 | if (jffs2_sum_active()) { |
| 347 | sm = kmalloc(sizeof(struct jffs2_sum_marker), GFP_KERNEL); | 460 | struct jffs2_sum_marker *sm; |
| 348 | if (!sm) { | 461 | void *sumptr = NULL; |
| 349 | return -ENOMEM; | 462 | uint32_t sumlen; |
| 350 | } | 463 | |
| 351 | 464 | if (!buf_size) { | |
| 352 | err = jffs2_fill_scan_buf(c, (unsigned char *) sm, jeb->offset + c->sector_size - | 465 | /* XIP case. Just look, point at the summary if it's there */ |
| 353 | sizeof(struct jffs2_sum_marker), sizeof(struct jffs2_sum_marker)); | 466 | sm = (void *)buf + c->sector_size - sizeof(*sm); |
| 354 | if (err) { | 467 | if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) { |
| 355 | kfree(sm); | 468 | sumptr = buf + je32_to_cpu(sm->offset); |
| 356 | return err; | 469 | sumlen = c->sector_size - je32_to_cpu(sm->offset); |
| 357 | } | 470 | } |
| 358 | 471 | } else { | |
| 359 | if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC ) { | 472 | /* If NAND flash, read a whole page of it. Else just the end */ |
| 360 | err = jffs2_sum_scan_sumnode(c, jeb, je32_to_cpu(sm->offset), &pseudo_random); | 473 | if (c->wbuf_pagesize) |
| 361 | if (err) { | 474 | buf_len = c->wbuf_pagesize; |
| 362 | kfree(sm); | 475 | else |
| 476 | buf_len = sizeof(*sm); | ||
| 477 | |||
| 478 | /* Read as much as we want into the _end_ of the preallocated buffer */ | ||
| 479 | err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len, | ||
| 480 | jeb->offset + c->sector_size - buf_len, | ||
| 481 | buf_len); | ||
| 482 | if (err) | ||
| 363 | return err; | 483 | return err; |
| 484 | |||
| 485 | sm = (void *)buf + buf_size - sizeof(*sm); | ||
| 486 | if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) { | ||
| 487 | sumlen = c->sector_size - je32_to_cpu(sm->offset); | ||
| 488 | sumptr = buf + buf_size - sumlen; | ||
| 489 | |||
| 490 | /* Now, make sure the summary itself is available */ | ||
| 491 | if (sumlen > buf_size) { | ||
| 492 | /* Need to kmalloc for this. */ | ||
| 493 | sumptr = kmalloc(sumlen, GFP_KERNEL); | ||
| 494 | if (!sumptr) | ||
| 495 | return -ENOMEM; | ||
| 496 | memcpy(sumptr + sumlen - buf_len, buf + buf_size - buf_len, buf_len); | ||
| 497 | } | ||
| 498 | if (buf_len < sumlen) { | ||
| 499 | /* Need to read more so that the entire summary node is present */ | ||
| 500 | err = jffs2_fill_scan_buf(c, sumptr, | ||
| 501 | jeb->offset + c->sector_size - sumlen, | ||
| 502 | sumlen - buf_len); | ||
| 503 | if (err) | ||
| 504 | return err; | ||
| 505 | } | ||
| 364 | } | 506 | } |
| 507 | |||
| 365 | } | 508 | } |
| 366 | 509 | ||
| 367 | kfree(sm); | 510 | if (sumptr) { |
| 511 | err = jffs2_sum_scan_sumnode(c, jeb, sumptr, sumlen, &pseudo_random); | ||
| 368 | 512 | ||
| 369 | ofs = jeb->offset; | 513 | if (buf_size && sumlen > buf_size) |
| 370 | prevofs = jeb->offset - 1; | 514 | kfree(sumptr); |
| 515 | /* If it returns with a real error, bail. | ||
| 516 | If it returns positive, that's a block classification | ||
| 517 | (i.e. BLK_STATE_xxx) so return that too. | ||
| 518 | If it returns zero, fall through to full scan. */ | ||
| 519 | if (err) | ||
| 520 | return err; | ||
| 521 | } | ||
| 371 | } | 522 | } |
| 372 | 523 | ||
| 373 | buf_ofs = jeb->offset; | 524 | buf_ofs = jeb->offset; |
| 374 | 525 | ||
| 375 | if (!buf_size) { | 526 | if (!buf_size) { |
| 527 | /* This is the XIP case -- we're reading _directly_ from the flash chip */ | ||
| 376 | buf_len = c->sector_size; | 528 | buf_len = c->sector_size; |
| 377 | |||
| 378 | if (jffs2_sum_active()) { | ||
| 379 | /* must reread because of summary test */ | ||
| 380 | err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len); | ||
| 381 | if (err) | ||
| 382 | return err; | ||
| 383 | } | ||
| 384 | |||
| 385 | } else { | 529 | } else { |
| 386 | buf_len = EMPTY_SCAN_SIZE(c->sector_size); | 530 | buf_len = EMPTY_SCAN_SIZE(c->sector_size); |
| 387 | err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len); | 531 | err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len); |
| @@ -418,7 +562,10 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
| 418 | if (ofs) { | 562 | if (ofs) { |
| 419 | D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset, | 563 | D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset, |
| 420 | jeb->offset + ofs)); | 564 | jeb->offset + ofs)); |
| 421 | DIRTY_SPACE(ofs); | 565 | if ((err = jffs2_prealloc_raw_node_refs(c, jeb, 1))) |
| 566 | return err; | ||
| 567 | if ((err = jffs2_scan_dirty_space(c, jeb, ofs))) | ||
| 568 | return err; | ||
| 422 | } | 569 | } |
| 423 | 570 | ||
| 424 | /* Now ofs is a complete physical flash offset as it always was... */ | 571 | /* Now ofs is a complete physical flash offset as it always was... */ |
| @@ -433,6 +580,11 @@ scan_more: | |||
| 433 | 580 | ||
| 434 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); | 581 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); |
| 435 | 582 | ||
| 583 | /* Make sure there are node refs available for use */ | ||
| 584 | err = jffs2_prealloc_raw_node_refs(c, jeb, 2); | ||
| 585 | if (err) | ||
| 586 | return err; | ||
| 587 | |||
| 436 | cond_resched(); | 588 | cond_resched(); |
| 437 | 589 | ||
| 438 | if (ofs & 3) { | 590 | if (ofs & 3) { |
| @@ -442,7 +594,8 @@ scan_more: | |||
| 442 | } | 594 | } |
| 443 | if (ofs == prevofs) { | 595 | if (ofs == prevofs) { |
| 444 | printk(KERN_WARNING "ofs 0x%08x has already been seen. Skipping\n", ofs); | 596 | printk(KERN_WARNING "ofs 0x%08x has already been seen. Skipping\n", ofs); |
| 445 | DIRTY_SPACE(4); | 597 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) |
| 598 | return err; | ||
| 446 | ofs += 4; | 599 | ofs += 4; |
| 447 | continue; | 600 | continue; |
| 448 | } | 601 | } |
| @@ -451,7 +604,8 @@ scan_more: | |||
| 451 | if (jeb->offset + c->sector_size < ofs + sizeof(*node)) { | 604 | if (jeb->offset + c->sector_size < ofs + sizeof(*node)) { |
| 452 | D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node), | 605 | D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node), |
| 453 | jeb->offset, c->sector_size, ofs, sizeof(*node))); | 606 | jeb->offset, c->sector_size, ofs, sizeof(*node))); |
| 454 | DIRTY_SPACE((jeb->offset + c->sector_size)-ofs); | 607 | if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs))) |
| 608 | return err; | ||
| 455 | break; | 609 | break; |
| 456 | } | 610 | } |
| 457 | 611 | ||
| @@ -481,7 +635,8 @@ scan_more: | |||
| 481 | if (*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff) { | 635 | if (*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff) { |
| 482 | printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n", | 636 | printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n", |
| 483 | empty_start, ofs); | 637 | empty_start, ofs); |
| 484 | DIRTY_SPACE(ofs-empty_start); | 638 | if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start))) |
| 639 | return err; | ||
| 485 | goto scan_more; | 640 | goto scan_more; |
| 486 | } | 641 | } |
| 487 | 642 | ||
| @@ -494,7 +649,7 @@ scan_more: | |||
| 494 | /* If we're only checking the beginning of a block with a cleanmarker, | 649 | /* If we're only checking the beginning of a block with a cleanmarker, |
| 495 | bail now */ | 650 | bail now */ |
| 496 | if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) && | 651 | if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) && |
| 497 | c->cleanmarker_size && !jeb->dirty_size && !jeb->first_node->next_phys) { | 652 | c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) { |
| 498 | D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size))); | 653 | D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size))); |
| 499 | return BLK_STATE_CLEANMARKER; | 654 | return BLK_STATE_CLEANMARKER; |
| 500 | } | 655 | } |
| @@ -518,20 +673,23 @@ scan_more: | |||
| 518 | 673 | ||
| 519 | if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) { | 674 | if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) { |
| 520 | printk(KERN_WARNING "Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs); | 675 | printk(KERN_WARNING "Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs); |
| 521 | DIRTY_SPACE(4); | 676 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) |
| 677 | return err; | ||
| 522 | ofs += 4; | 678 | ofs += 4; |
| 523 | continue; | 679 | continue; |
| 524 | } | 680 | } |
| 525 | if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) { | 681 | if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) { |
| 526 | D1(printk(KERN_DEBUG "Dirty bitmask at 0x%08x\n", ofs)); | 682 | D1(printk(KERN_DEBUG "Dirty bitmask at 0x%08x\n", ofs)); |
| 527 | DIRTY_SPACE(4); | 683 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) |
| 684 | return err; | ||
| 528 | ofs += 4; | 685 | ofs += 4; |
| 529 | continue; | 686 | continue; |
| 530 | } | 687 | } |
| 531 | if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) { | 688 | if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) { |
| 532 | printk(KERN_WARNING "Old JFFS2 bitmask found at 0x%08x\n", ofs); | 689 | printk(KERN_WARNING "Old JFFS2 bitmask found at 0x%08x\n", ofs); |
| 533 | printk(KERN_WARNING "You cannot use older JFFS2 filesystems with newer kernels\n"); | 690 | printk(KERN_WARNING "You cannot use older JFFS2 filesystems with newer kernels\n"); |
| 534 | DIRTY_SPACE(4); | 691 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) |
| 692 | return err; | ||
| 535 | ofs += 4; | 693 | ofs += 4; |
| 536 | continue; | 694 | continue; |
| 537 | } | 695 | } |
| @@ -540,7 +698,8 @@ scan_more: | |||
| 540 | noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n", | 698 | noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n", |
| 541 | JFFS2_MAGIC_BITMASK, ofs, | 699 | JFFS2_MAGIC_BITMASK, ofs, |
| 542 | je16_to_cpu(node->magic)); | 700 | je16_to_cpu(node->magic)); |
| 543 | DIRTY_SPACE(4); | 701 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) |
| 702 | return err; | ||
| 544 | ofs += 4; | 703 | ofs += 4; |
| 545 | continue; | 704 | continue; |
| 546 | } | 705 | } |
| @@ -557,7 +716,8 @@ scan_more: | |||
| 557 | je32_to_cpu(node->totlen), | 716 | je32_to_cpu(node->totlen), |
| 558 | je32_to_cpu(node->hdr_crc), | 717 | je32_to_cpu(node->hdr_crc), |
| 559 | hdr_crc); | 718 | hdr_crc); |
| 560 | DIRTY_SPACE(4); | 719 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) |
| 720 | return err; | ||
| 561 | ofs += 4; | 721 | ofs += 4; |
| 562 | continue; | 722 | continue; |
| 563 | } | 723 | } |
| @@ -568,7 +728,8 @@ scan_more: | |||
| 568 | printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n", | 728 | printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n", |
| 569 | ofs, je32_to_cpu(node->totlen)); | 729 | ofs, je32_to_cpu(node->totlen)); |
| 570 | printk(KERN_WARNING "Perhaps the file system was created with the wrong erase size?\n"); | 730 | printk(KERN_WARNING "Perhaps the file system was created with the wrong erase size?\n"); |
| 571 | DIRTY_SPACE(4); | 731 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) |
| 732 | return err; | ||
| 572 | ofs += 4; | 733 | ofs += 4; |
| 573 | continue; | 734 | continue; |
| 574 | } | 735 | } |
| @@ -576,7 +737,8 @@ scan_more: | |||
| 576 | if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) { | 737 | if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) { |
| 577 | /* Wheee. This is an obsoleted node */ | 738 | /* Wheee. This is an obsoleted node */ |
| 578 | D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs)); | 739 | D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs)); |
| 579 | DIRTY_SPACE(PAD(je32_to_cpu(node->totlen))); | 740 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) |
| 741 | return err; | ||
| 580 | ofs += PAD(je32_to_cpu(node->totlen)); | 742 | ofs += PAD(je32_to_cpu(node->totlen)); |
| 581 | continue; | 743 | continue; |
| 582 | } | 744 | } |
| @@ -614,30 +776,59 @@ scan_more: | |||
| 614 | ofs += PAD(je32_to_cpu(node->totlen)); | 776 | ofs += PAD(je32_to_cpu(node->totlen)); |
| 615 | break; | 777 | break; |
| 616 | 778 | ||
| 779 | #ifdef CONFIG_JFFS2_FS_XATTR | ||
| 780 | case JFFS2_NODETYPE_XATTR: | ||
| 781 | if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { | ||
| 782 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | ||
| 783 | D1(printk(KERN_DEBUG "Fewer than %d bytes (xattr node)" | ||
| 784 | " left to end of buf. Reading 0x%x at 0x%08x\n", | ||
| 785 | je32_to_cpu(node->totlen), buf_len, ofs)); | ||
| 786 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | ||
| 787 | if (err) | ||
| 788 | return err; | ||
| 789 | buf_ofs = ofs; | ||
| 790 | node = (void *)buf; | ||
| 791 | } | ||
| 792 | err = jffs2_scan_xattr_node(c, jeb, (void *)node, ofs, s); | ||
| 793 | if (err) | ||
| 794 | return err; | ||
| 795 | ofs += PAD(je32_to_cpu(node->totlen)); | ||
| 796 | break; | ||
| 797 | case JFFS2_NODETYPE_XREF: | ||
| 798 | if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { | ||
| 799 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | ||
| 800 | D1(printk(KERN_DEBUG "Fewer than %d bytes (xref node)" | ||
| 801 | " left to end of buf. Reading 0x%x at 0x%08x\n", | ||
| 802 | je32_to_cpu(node->totlen), buf_len, ofs)); | ||
| 803 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | ||
| 804 | if (err) | ||
| 805 | return err; | ||
| 806 | buf_ofs = ofs; | ||
| 807 | node = (void *)buf; | ||
| 808 | } | ||
| 809 | err = jffs2_scan_xref_node(c, jeb, (void *)node, ofs, s); | ||
| 810 | if (err) | ||
| 811 | return err; | ||
| 812 | ofs += PAD(je32_to_cpu(node->totlen)); | ||
| 813 | break; | ||
| 814 | #endif /* CONFIG_JFFS2_FS_XATTR */ | ||
| 815 | |||
| 617 | case JFFS2_NODETYPE_CLEANMARKER: | 816 | case JFFS2_NODETYPE_CLEANMARKER: |
| 618 | D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs)); | 817 | D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs)); |
| 619 | if (je32_to_cpu(node->totlen) != c->cleanmarker_size) { | 818 | if (je32_to_cpu(node->totlen) != c->cleanmarker_size) { |
| 620 | printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n", | 819 | printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n", |
| 621 | ofs, je32_to_cpu(node->totlen), c->cleanmarker_size); | 820 | ofs, je32_to_cpu(node->totlen), c->cleanmarker_size); |
| 622 | DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node))); | 821 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node))))) |
| 822 | return err; | ||
| 623 | ofs += PAD(sizeof(struct jffs2_unknown_node)); | 823 | ofs += PAD(sizeof(struct jffs2_unknown_node)); |
| 624 | } else if (jeb->first_node) { | 824 | } else if (jeb->first_node) { |
| 625 | printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset); | 825 | printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset); |
| 626 | DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node))); | 826 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node))))) |
| 827 | return err; | ||
| 627 | ofs += PAD(sizeof(struct jffs2_unknown_node)); | 828 | ofs += PAD(sizeof(struct jffs2_unknown_node)); |
| 628 | } else { | 829 | } else { |
| 629 | struct jffs2_raw_node_ref *marker_ref = jffs2_alloc_raw_node_ref(); | 830 | jffs2_link_node_ref(c, jeb, ofs | REF_NORMAL, c->cleanmarker_size, NULL); |
| 630 | if (!marker_ref) { | ||
| 631 | printk(KERN_NOTICE "Failed to allocate node ref for clean marker\n"); | ||
| 632 | return -ENOMEM; | ||
| 633 | } | ||
| 634 | marker_ref->next_in_ino = NULL; | ||
| 635 | marker_ref->next_phys = NULL; | ||
| 636 | marker_ref->flash_offset = ofs | REF_NORMAL; | ||
| 637 | marker_ref->__totlen = c->cleanmarker_size; | ||
| 638 | jeb->first_node = jeb->last_node = marker_ref; | ||
| 639 | 831 | ||
| 640 | USED_SPACE(PAD(c->cleanmarker_size)); | ||
| 641 | ofs += PAD(c->cleanmarker_size); | 832 | ofs += PAD(c->cleanmarker_size); |
| 642 | } | 833 | } |
| 643 | break; | 834 | break; |
| @@ -645,7 +836,8 @@ scan_more: | |||
| 645 | case JFFS2_NODETYPE_PADDING: | 836 | case JFFS2_NODETYPE_PADDING: |
| 646 | if (jffs2_sum_active()) | 837 | if (jffs2_sum_active()) |
| 647 | jffs2_sum_add_padding_mem(s, je32_to_cpu(node->totlen)); | 838 | jffs2_sum_add_padding_mem(s, je32_to_cpu(node->totlen)); |
| 648 | DIRTY_SPACE(PAD(je32_to_cpu(node->totlen))); | 839 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) |
| 840 | return err; | ||
| 649 | ofs += PAD(je32_to_cpu(node->totlen)); | 841 | ofs += PAD(je32_to_cpu(node->totlen)); |
| 650 | break; | 842 | break; |
| 651 | 843 | ||
| @@ -656,7 +848,8 @@ scan_more: | |||
| 656 | c->flags |= JFFS2_SB_FLAG_RO; | 848 | c->flags |= JFFS2_SB_FLAG_RO; |
| 657 | if (!(jffs2_is_readonly(c))) | 849 | if (!(jffs2_is_readonly(c))) |
| 658 | return -EROFS; | 850 | return -EROFS; |
| 659 | DIRTY_SPACE(PAD(je32_to_cpu(node->totlen))); | 851 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) |
| 852 | return err; | ||
| 660 | ofs += PAD(je32_to_cpu(node->totlen)); | 853 | ofs += PAD(je32_to_cpu(node->totlen)); |
| 661 | break; | 854 | break; |
| 662 | 855 | ||
| @@ -666,15 +859,21 @@ scan_more: | |||
| 666 | 859 | ||
| 667 | case JFFS2_FEATURE_RWCOMPAT_DELETE: | 860 | case JFFS2_FEATURE_RWCOMPAT_DELETE: |
| 668 | D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs)); | 861 | D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs)); |
| 669 | DIRTY_SPACE(PAD(je32_to_cpu(node->totlen))); | 862 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) |
| 863 | return err; | ||
| 670 | ofs += PAD(je32_to_cpu(node->totlen)); | 864 | ofs += PAD(je32_to_cpu(node->totlen)); |
| 671 | break; | 865 | break; |
| 672 | 866 | ||
| 673 | case JFFS2_FEATURE_RWCOMPAT_COPY: | 867 | case JFFS2_FEATURE_RWCOMPAT_COPY: { |
| 674 | D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs)); | 868 | D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs)); |
| 675 | USED_SPACE(PAD(je32_to_cpu(node->totlen))); | 869 | |
| 870 | jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL); | ||
| 871 | |||
| 872 | /* We can't summarise nodes we don't grok */ | ||
| 873 | jffs2_sum_disable_collecting(s); | ||
| 676 | ofs += PAD(je32_to_cpu(node->totlen)); | 874 | ofs += PAD(je32_to_cpu(node->totlen)); |
| 677 | break; | 875 | break; |
| 876 | } | ||
| 678 | } | 877 | } |
| 679 | } | 878 | } |
| 680 | } | 879 | } |
| @@ -687,9 +886,9 @@ scan_more: | |||
| 687 | } | 886 | } |
| 688 | } | 887 | } |
| 689 | 888 | ||
| 690 | D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x\n", jeb->offset, | 889 | D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n", |
| 691 | jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size)); | 890 | jeb->offset,jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size, jeb->wasted_size)); |
| 692 | 891 | ||
| 693 | /* mark_node_obsolete can add to wasted !! */ | 892 | /* mark_node_obsolete can add to wasted !! */ |
| 694 | if (jeb->wasted_size) { | 893 | if (jeb->wasted_size) { |
| 695 | jeb->dirty_size += jeb->wasted_size; | 894 | jeb->dirty_size += jeb->wasted_size; |
| @@ -730,9 +929,9 @@ struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uin | |||
| 730 | static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 929 | static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
| 731 | struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s) | 930 | struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s) |
| 732 | { | 931 | { |
| 733 | struct jffs2_raw_node_ref *raw; | ||
| 734 | struct jffs2_inode_cache *ic; | 932 | struct jffs2_inode_cache *ic; |
| 735 | uint32_t ino = je32_to_cpu(ri->ino); | 933 | uint32_t ino = je32_to_cpu(ri->ino); |
| 934 | int err; | ||
| 736 | 935 | ||
| 737 | D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs)); | 936 | D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs)); |
| 738 | 937 | ||
| @@ -745,12 +944,6 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
| 745 | Which means that the _full_ amount of time to get to proper write mode with GC | 944 | Which means that the _full_ amount of time to get to proper write mode with GC |
| 746 | operational may actually be _longer_ than before. Sucks to be me. */ | 945 | operational may actually be _longer_ than before. Sucks to be me. */ |
| 747 | 946 | ||
| 748 | raw = jffs2_alloc_raw_node_ref(); | ||
| 749 | if (!raw) { | ||
| 750 | printk(KERN_NOTICE "jffs2_scan_inode_node(): allocation of node reference failed\n"); | ||
| 751 | return -ENOMEM; | ||
| 752 | } | ||
| 753 | |||
| 754 | ic = jffs2_get_ino_cache(c, ino); | 947 | ic = jffs2_get_ino_cache(c, ino); |
| 755 | if (!ic) { | 948 | if (!ic) { |
| 756 | /* Inocache get failed. Either we read a bogus ino# or it's just genuinely the | 949 | /* Inocache get failed. Either we read a bogus ino# or it's just genuinely the |
| @@ -762,30 +955,17 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
| 762 | printk(KERN_NOTICE "jffs2_scan_inode_node(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | 955 | printk(KERN_NOTICE "jffs2_scan_inode_node(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", |
| 763 | ofs, je32_to_cpu(ri->node_crc), crc); | 956 | ofs, je32_to_cpu(ri->node_crc), crc); |
| 764 | /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */ | 957 | /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */ |
| 765 | DIRTY_SPACE(PAD(je32_to_cpu(ri->totlen))); | 958 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(ri->totlen))))) |
| 766 | jffs2_free_raw_node_ref(raw); | 959 | return err; |
| 767 | return 0; | 960 | return 0; |
| 768 | } | 961 | } |
| 769 | ic = jffs2_scan_make_ino_cache(c, ino); | 962 | ic = jffs2_scan_make_ino_cache(c, ino); |
| 770 | if (!ic) { | 963 | if (!ic) |
| 771 | jffs2_free_raw_node_ref(raw); | ||
| 772 | return -ENOMEM; | 964 | return -ENOMEM; |
| 773 | } | ||
| 774 | } | 965 | } |
| 775 | 966 | ||
| 776 | /* Wheee. It worked */ | 967 | /* Wheee. It worked */ |
| 777 | 968 | jffs2_link_node_ref(c, jeb, ofs | REF_UNCHECKED, PAD(je32_to_cpu(ri->totlen)), ic); | |
| 778 | raw->flash_offset = ofs | REF_UNCHECKED; | ||
| 779 | raw->__totlen = PAD(je32_to_cpu(ri->totlen)); | ||
| 780 | raw->next_phys = NULL; | ||
| 781 | raw->next_in_ino = ic->nodes; | ||
| 782 | |||
| 783 | ic->nodes = raw; | ||
| 784 | if (!jeb->first_node) | ||
| 785 | jeb->first_node = raw; | ||
| 786 | if (jeb->last_node) | ||
| 787 | jeb->last_node->next_phys = raw; | ||
| 788 | jeb->last_node = raw; | ||
| 789 | 969 | ||
| 790 | D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n", | 970 | D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n", |
| 791 | je32_to_cpu(ri->ino), je32_to_cpu(ri->version), | 971 | je32_to_cpu(ri->ino), je32_to_cpu(ri->version), |
| @@ -794,8 +974,6 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
| 794 | 974 | ||
| 795 | pseudo_random += je32_to_cpu(ri->version); | 975 | pseudo_random += je32_to_cpu(ri->version); |
| 796 | 976 | ||
| 797 | UNCHECKED_SPACE(PAD(je32_to_cpu(ri->totlen))); | ||
| 798 | |||
| 799 | if (jffs2_sum_active()) { | 977 | if (jffs2_sum_active()) { |
| 800 | jffs2_sum_add_inode_mem(s, ri, ofs - jeb->offset); | 978 | jffs2_sum_add_inode_mem(s, ri, ofs - jeb->offset); |
| 801 | } | 979 | } |
| @@ -806,10 +984,10 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
| 806 | static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 984 | static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
| 807 | struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s) | 985 | struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s) |
| 808 | { | 986 | { |
| 809 | struct jffs2_raw_node_ref *raw; | ||
| 810 | struct jffs2_full_dirent *fd; | 987 | struct jffs2_full_dirent *fd; |
| 811 | struct jffs2_inode_cache *ic; | 988 | struct jffs2_inode_cache *ic; |
| 812 | uint32_t crc; | 989 | uint32_t crc; |
| 990 | int err; | ||
| 813 | 991 | ||
| 814 | D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", ofs)); | 992 | D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", ofs)); |
| 815 | 993 | ||
| @@ -821,7 +999,8 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
| 821 | printk(KERN_NOTICE "jffs2_scan_dirent_node(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | 999 | printk(KERN_NOTICE "jffs2_scan_dirent_node(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", |
| 822 | ofs, je32_to_cpu(rd->node_crc), crc); | 1000 | ofs, je32_to_cpu(rd->node_crc), crc); |
| 823 | /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */ | 1001 | /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */ |
| 824 | DIRTY_SPACE(PAD(je32_to_cpu(rd->totlen))); | 1002 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen))))) |
| 1003 | return err; | ||
| 825 | return 0; | 1004 | return 0; |
| 826 | } | 1005 | } |
| 827 | 1006 | ||
| @@ -842,40 +1021,23 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
| 842 | jffs2_free_full_dirent(fd); | 1021 | jffs2_free_full_dirent(fd); |
| 843 | /* FIXME: Why do we believe totlen? */ | 1022 | /* FIXME: Why do we believe totlen? */ |
| 844 | /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */ | 1023 | /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */ |
| 845 | DIRTY_SPACE(PAD(je32_to_cpu(rd->totlen))); | 1024 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen))))) |
| 1025 | return err; | ||
| 846 | return 0; | 1026 | return 0; |
| 847 | } | 1027 | } |
| 848 | raw = jffs2_alloc_raw_node_ref(); | ||
| 849 | if (!raw) { | ||
| 850 | jffs2_free_full_dirent(fd); | ||
| 851 | printk(KERN_NOTICE "jffs2_scan_dirent_node(): allocation of node reference failed\n"); | ||
| 852 | return -ENOMEM; | ||
| 853 | } | ||
| 854 | ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino)); | 1028 | ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino)); |
| 855 | if (!ic) { | 1029 | if (!ic) { |
| 856 | jffs2_free_full_dirent(fd); | 1030 | jffs2_free_full_dirent(fd); |
| 857 | jffs2_free_raw_node_ref(raw); | ||
| 858 | return -ENOMEM; | 1031 | return -ENOMEM; |
| 859 | } | 1032 | } |
| 860 | 1033 | ||
| 861 | raw->__totlen = PAD(je32_to_cpu(rd->totlen)); | 1034 | fd->raw = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rd->totlen)), ic); |
| 862 | raw->flash_offset = ofs | REF_PRISTINE; | ||
| 863 | raw->next_phys = NULL; | ||
| 864 | raw->next_in_ino = ic->nodes; | ||
| 865 | ic->nodes = raw; | ||
| 866 | if (!jeb->first_node) | ||
| 867 | jeb->first_node = raw; | ||
| 868 | if (jeb->last_node) | ||
| 869 | jeb->last_node->next_phys = raw; | ||
| 870 | jeb->last_node = raw; | ||
| 871 | 1035 | ||
| 872 | fd->raw = raw; | ||
| 873 | fd->next = NULL; | 1036 | fd->next = NULL; |
| 874 | fd->version = je32_to_cpu(rd->version); | 1037 | fd->version = je32_to_cpu(rd->version); |
| 875 | fd->ino = je32_to_cpu(rd->ino); | 1038 | fd->ino = je32_to_cpu(rd->ino); |
| 876 | fd->nhash = full_name_hash(fd->name, rd->nsize); | 1039 | fd->nhash = full_name_hash(fd->name, rd->nsize); |
| 877 | fd->type = rd->type; | 1040 | fd->type = rd->type; |
| 878 | USED_SPACE(PAD(je32_to_cpu(rd->totlen))); | ||
| 879 | jffs2_add_fd_to_list(c, fd, &ic->scan_dents); | 1041 | jffs2_add_fd_to_list(c, fd, &ic->scan_dents); |
| 880 | 1042 | ||
| 881 | if (jffs2_sum_active()) { | 1043 | if (jffs2_sum_active()) { |
diff --git a/fs/jffs2/security.c b/fs/jffs2/security.c new file mode 100644 index 000000000000..52a9894a6364 --- /dev/null +++ b/fs/jffs2/security.c | |||
| @@ -0,0 +1,82 @@ | |||
| 1 | /* | ||
| 2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2006 NEC Corporation | ||
| 5 | * | ||
| 6 | * Created by KaiGai Kohei <kaigai@ak.jp.nec.com> | ||
| 7 | * | ||
| 8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
| 9 | * | ||
| 10 | */ | ||
| 11 | #include <linux/kernel.h> | ||
| 12 | #include <linux/slab.h> | ||
| 13 | #include <linux/fs.h> | ||
| 14 | #include <linux/time.h> | ||
| 15 | #include <linux/pagemap.h> | ||
| 16 | #include <linux/highmem.h> | ||
| 17 | #include <linux/crc32.h> | ||
| 18 | #include <linux/jffs2.h> | ||
| 19 | #include <linux/xattr.h> | ||
| 20 | #include <linux/mtd/mtd.h> | ||
| 21 | #include <linux/security.h> | ||
| 22 | #include "nodelist.h" | ||
| 23 | |||
| 24 | /* ---- Initial Security Label Attachment -------------- */ | ||
| 25 | int jffs2_init_security(struct inode *inode, struct inode *dir) | ||
| 26 | { | ||
| 27 | int rc; | ||
| 28 | size_t len; | ||
| 29 | void *value; | ||
| 30 | char *name; | ||
| 31 | |||
| 32 | rc = security_inode_init_security(inode, dir, &name, &value, &len); | ||
| 33 | if (rc) { | ||
| 34 | if (rc == -EOPNOTSUPP) | ||
| 35 | return 0; | ||
| 36 | return rc; | ||
| 37 | } | ||
| 38 | rc = do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY, name, value, len, 0); | ||
| 39 | |||
| 40 | kfree(name); | ||
| 41 | kfree(value); | ||
| 42 | return rc; | ||
| 43 | } | ||
| 44 | |||
| 45 | /* ---- XATTR Handler for "security.*" ----------------- */ | ||
| 46 | static int jffs2_security_getxattr(struct inode *inode, const char *name, | ||
| 47 | void *buffer, size_t size) | ||
| 48 | { | ||
| 49 | if (!strcmp(name, "")) | ||
| 50 | return -EINVAL; | ||
| 51 | |||
| 52 | return do_jffs2_getxattr(inode, JFFS2_XPREFIX_SECURITY, name, buffer, size); | ||
| 53 | } | ||
| 54 | |||
| 55 | static int jffs2_security_setxattr(struct inode *inode, const char *name, const void *buffer, | ||
| 56 | size_t size, int flags) | ||
| 57 | { | ||
| 58 | if (!strcmp(name, "")) | ||
| 59 | return -EINVAL; | ||
| 60 | |||
| 61 | return do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY, name, buffer, size, flags); | ||
| 62 | } | ||
| 63 | |||
| 64 | static size_t jffs2_security_listxattr(struct inode *inode, char *list, size_t list_size, | ||
| 65 | const char *name, size_t name_len) | ||
| 66 | { | ||
| 67 | size_t retlen = XATTR_SECURITY_PREFIX_LEN + name_len + 1; | ||
| 68 | |||
| 69 | if (list && retlen <= list_size) { | ||
| 70 | strcpy(list, XATTR_SECURITY_PREFIX); | ||
| 71 | strcpy(list + XATTR_SECURITY_PREFIX_LEN, name); | ||
| 72 | } | ||
| 73 | |||
| 74 | return retlen; | ||
| 75 | } | ||
| 76 | |||
| 77 | struct xattr_handler jffs2_security_xattr_handler = { | ||
| 78 | .prefix = XATTR_SECURITY_PREFIX, | ||
| 79 | .list = jffs2_security_listxattr, | ||
| 80 | .set = jffs2_security_setxattr, | ||
| 81 | .get = jffs2_security_getxattr | ||
| 82 | }; | ||
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c index fb9cec61fcf2..0b02fc79e4d1 100644 --- a/fs/jffs2/summary.c +++ b/fs/jffs2/summary.c | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | * Zoltan Sogor <weth@inf.u-szeged.hu>, | 5 | * Zoltan Sogor <weth@inf.u-szeged.hu>, |
| 6 | * Patrik Kluba <pajko@halom.u-szeged.hu>, | 6 | * Patrik Kluba <pajko@halom.u-szeged.hu>, |
| 7 | * University of Szeged, Hungary | 7 | * University of Szeged, Hungary |
| 8 | * 2005 KaiGai Kohei <kaigai@ak.jp.nec.com> | ||
| 8 | * | 9 | * |
| 9 | * For licensing information, see the file 'LICENCE' in this directory. | 10 | * For licensing information, see the file 'LICENCE' in this directory. |
| 10 | * | 11 | * |
| @@ -81,6 +82,19 @@ static int jffs2_sum_add_mem(struct jffs2_summary *s, union jffs2_sum_mem *item) | |||
| 81 | dbg_summary("dirent (%u) added to summary\n", | 82 | dbg_summary("dirent (%u) added to summary\n", |
| 82 | je32_to_cpu(item->d.ino)); | 83 | je32_to_cpu(item->d.ino)); |
| 83 | break; | 84 | break; |
| 85 | #ifdef CONFIG_JFFS2_FS_XATTR | ||
| 86 | case JFFS2_NODETYPE_XATTR: | ||
| 87 | s->sum_size += JFFS2_SUMMARY_XATTR_SIZE; | ||
| 88 | s->sum_num++; | ||
| 89 | dbg_summary("xattr (xid=%u, version=%u) added to summary\n", | ||
| 90 | je32_to_cpu(item->x.xid), je32_to_cpu(item->x.version)); | ||
| 91 | break; | ||
| 92 | case JFFS2_NODETYPE_XREF: | ||
| 93 | s->sum_size += JFFS2_SUMMARY_XREF_SIZE; | ||
| 94 | s->sum_num++; | ||
| 95 | dbg_summary("xref added to summary\n"); | ||
| 96 | break; | ||
| 97 | #endif | ||
| 84 | default: | 98 | default: |
| 85 | JFFS2_WARNING("UNKNOWN node type %u\n", | 99 | JFFS2_WARNING("UNKNOWN node type %u\n", |
| 86 | je16_to_cpu(item->u.nodetype)); | 100 | je16_to_cpu(item->u.nodetype)); |
| @@ -141,6 +155,40 @@ int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *r | |||
| 141 | return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); | 155 | return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); |
| 142 | } | 156 | } |
| 143 | 157 | ||
| 158 | #ifdef CONFIG_JFFS2_FS_XATTR | ||
| 159 | int jffs2_sum_add_xattr_mem(struct jffs2_summary *s, struct jffs2_raw_xattr *rx, uint32_t ofs) | ||
| 160 | { | ||
| 161 | struct jffs2_sum_xattr_mem *temp; | ||
| 162 | |||
| 163 | temp = kmalloc(sizeof(struct jffs2_sum_xattr_mem), GFP_KERNEL); | ||
| 164 | if (!temp) | ||
| 165 | return -ENOMEM; | ||
| 166 | |||
| 167 | temp->nodetype = rx->nodetype; | ||
| 168 | temp->xid = rx->xid; | ||
| 169 | temp->version = rx->version; | ||
| 170 | temp->offset = cpu_to_je32(ofs); | ||
| 171 | temp->totlen = rx->totlen; | ||
| 172 | temp->next = NULL; | ||
| 173 | |||
| 174 | return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); | ||
| 175 | } | ||
| 176 | |||
| 177 | int jffs2_sum_add_xref_mem(struct jffs2_summary *s, struct jffs2_raw_xref *rr, uint32_t ofs) | ||
| 178 | { | ||
| 179 | struct jffs2_sum_xref_mem *temp; | ||
| 180 | |||
| 181 | temp = kmalloc(sizeof(struct jffs2_sum_xref_mem), GFP_KERNEL); | ||
| 182 | if (!temp) | ||
| 183 | return -ENOMEM; | ||
| 184 | |||
| 185 | temp->nodetype = rr->nodetype; | ||
| 186 | temp->offset = cpu_to_je32(ofs); | ||
| 187 | temp->next = NULL; | ||
| 188 | |||
| 189 | return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); | ||
| 190 | } | ||
| 191 | #endif | ||
| 144 | /* Cleanup every collected summary information */ | 192 | /* Cleanup every collected summary information */ |
| 145 | 193 | ||
| 146 | static void jffs2_sum_clean_collected(struct jffs2_summary *s) | 194 | static void jffs2_sum_clean_collected(struct jffs2_summary *s) |
| @@ -259,7 +307,40 @@ int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs, | |||
| 259 | 307 | ||
| 260 | return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); | 308 | return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); |
| 261 | } | 309 | } |
| 310 | #ifdef CONFIG_JFFS2_FS_XATTR | ||
| 311 | case JFFS2_NODETYPE_XATTR: { | ||
| 312 | struct jffs2_sum_xattr_mem *temp; | ||
| 313 | if (je32_to_cpu(node->x.version) == 0xffffffff) | ||
| 314 | return 0; | ||
| 315 | temp = kmalloc(sizeof(struct jffs2_sum_xattr_mem), GFP_KERNEL); | ||
| 316 | if (!temp) | ||
| 317 | goto no_mem; | ||
| 318 | |||
| 319 | temp->nodetype = node->x.nodetype; | ||
| 320 | temp->xid = node->x.xid; | ||
| 321 | temp->version = node->x.version; | ||
| 322 | temp->totlen = node->x.totlen; | ||
| 323 | temp->offset = cpu_to_je32(ofs); | ||
| 324 | temp->next = NULL; | ||
| 325 | |||
| 326 | return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); | ||
| 327 | } | ||
| 328 | case JFFS2_NODETYPE_XREF: { | ||
| 329 | struct jffs2_sum_xref_mem *temp; | ||
| 330 | |||
| 331 | if (je32_to_cpu(node->r.ino) == 0xffffffff | ||
| 332 | && je32_to_cpu(node->r.xid) == 0xffffffff) | ||
| 333 | return 0; | ||
| 334 | temp = kmalloc(sizeof(struct jffs2_sum_xref_mem), GFP_KERNEL); | ||
| 335 | if (!temp) | ||
| 336 | goto no_mem; | ||
| 337 | temp->nodetype = node->r.nodetype; | ||
| 338 | temp->offset = cpu_to_je32(ofs); | ||
| 339 | temp->next = NULL; | ||
| 262 | 340 | ||
| 341 | return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); | ||
| 342 | } | ||
| 343 | #endif | ||
| 263 | case JFFS2_NODETYPE_PADDING: | 344 | case JFFS2_NODETYPE_PADDING: |
| 264 | dbg_summary("node PADDING\n"); | 345 | dbg_summary("node PADDING\n"); |
| 265 | c->summary->sum_padded += je32_to_cpu(node->u.totlen); | 346 | c->summary->sum_padded += je32_to_cpu(node->u.totlen); |
| @@ -288,23 +369,41 @@ no_mem: | |||
| 288 | return -ENOMEM; | 369 | return -ENOMEM; |
| 289 | } | 370 | } |
| 290 | 371 | ||
| 372 | static struct jffs2_raw_node_ref *sum_link_node_ref(struct jffs2_sb_info *c, | ||
| 373 | struct jffs2_eraseblock *jeb, | ||
| 374 | uint32_t ofs, uint32_t len, | ||
| 375 | struct jffs2_inode_cache *ic) | ||
| 376 | { | ||
| 377 | /* If there was a gap, mark it dirty */ | ||
| 378 | if ((ofs & ~3) > c->sector_size - jeb->free_size) { | ||
| 379 | /* Ew. Summary doesn't actually tell us explicitly about dirty space */ | ||
| 380 | jffs2_scan_dirty_space(c, jeb, (ofs & ~3) - (c->sector_size - jeb->free_size)); | ||
| 381 | } | ||
| 382 | |||
| 383 | return jffs2_link_node_ref(c, jeb, jeb->offset + ofs, len, ic); | ||
| 384 | } | ||
| 291 | 385 | ||
| 292 | /* Process the stored summary information - helper function for jffs2_sum_scan_sumnode() */ | 386 | /* Process the stored summary information - helper function for jffs2_sum_scan_sumnode() */ |
| 293 | 387 | ||
| 294 | static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 388 | static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
| 295 | struct jffs2_raw_summary *summary, uint32_t *pseudo_random) | 389 | struct jffs2_raw_summary *summary, uint32_t *pseudo_random) |
| 296 | { | 390 | { |
| 297 | struct jffs2_raw_node_ref *raw; | ||
| 298 | struct jffs2_inode_cache *ic; | 391 | struct jffs2_inode_cache *ic; |
| 299 | struct jffs2_full_dirent *fd; | 392 | struct jffs2_full_dirent *fd; |
| 300 | void *sp; | 393 | void *sp; |
| 301 | int i, ino; | 394 | int i, ino; |
| 395 | int err; | ||
| 302 | 396 | ||
| 303 | sp = summary->sum; | 397 | sp = summary->sum; |
| 304 | 398 | ||
| 305 | for (i=0; i<je32_to_cpu(summary->sum_num); i++) { | 399 | for (i=0; i<je32_to_cpu(summary->sum_num); i++) { |
| 306 | dbg_summary("processing summary index %d\n", i); | 400 | dbg_summary("processing summary index %d\n", i); |
| 307 | 401 | ||
| 402 | /* Make sure there's a spare ref for dirty space */ | ||
| 403 | err = jffs2_prealloc_raw_node_refs(c, jeb, 2); | ||
| 404 | if (err) | ||
| 405 | return err; | ||
| 406 | |||
| 308 | switch (je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype)) { | 407 | switch (je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype)) { |
| 309 | case JFFS2_NODETYPE_INODE: { | 408 | case JFFS2_NODETYPE_INODE: { |
| 310 | struct jffs2_sum_inode_flash *spi; | 409 | struct jffs2_sum_inode_flash *spi; |
| @@ -312,38 +411,20 @@ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eras | |||
| 312 | 411 | ||
| 313 | ino = je32_to_cpu(spi->inode); | 412 | ino = je32_to_cpu(spi->inode); |
| 314 | 413 | ||
| 315 | dbg_summary("Inode at 0x%08x\n", | 414 | dbg_summary("Inode at 0x%08x-0x%08x\n", |
| 316 | jeb->offset + je32_to_cpu(spi->offset)); | 415 | jeb->offset + je32_to_cpu(spi->offset), |
| 317 | 416 | jeb->offset + je32_to_cpu(spi->offset) + je32_to_cpu(spi->totlen)); | |
| 318 | raw = jffs2_alloc_raw_node_ref(); | ||
| 319 | if (!raw) { | ||
| 320 | JFFS2_NOTICE("allocation of node reference failed\n"); | ||
| 321 | kfree(summary); | ||
| 322 | return -ENOMEM; | ||
| 323 | } | ||
| 324 | 417 | ||
| 325 | ic = jffs2_scan_make_ino_cache(c, ino); | 418 | ic = jffs2_scan_make_ino_cache(c, ino); |
| 326 | if (!ic) { | 419 | if (!ic) { |
| 327 | JFFS2_NOTICE("scan_make_ino_cache failed\n"); | 420 | JFFS2_NOTICE("scan_make_ino_cache failed\n"); |
| 328 | jffs2_free_raw_node_ref(raw); | ||
| 329 | kfree(summary); | ||
| 330 | return -ENOMEM; | 421 | return -ENOMEM; |
| 331 | } | 422 | } |
| 332 | 423 | ||
| 333 | raw->flash_offset = (jeb->offset + je32_to_cpu(spi->offset)) | REF_UNCHECKED; | 424 | sum_link_node_ref(c, jeb, je32_to_cpu(spi->offset) | REF_UNCHECKED, |
| 334 | raw->__totlen = PAD(je32_to_cpu(spi->totlen)); | 425 | PAD(je32_to_cpu(spi->totlen)), ic); |
| 335 | raw->next_phys = NULL; | ||
| 336 | raw->next_in_ino = ic->nodes; | ||
| 337 | |||
| 338 | ic->nodes = raw; | ||
| 339 | if (!jeb->first_node) | ||
| 340 | jeb->first_node = raw; | ||
| 341 | if (jeb->last_node) | ||
| 342 | jeb->last_node->next_phys = raw; | ||
| 343 | jeb->last_node = raw; | ||
| 344 | *pseudo_random += je32_to_cpu(spi->version); | ||
| 345 | 426 | ||
| 346 | UNCHECKED_SPACE(PAD(je32_to_cpu(spi->totlen))); | 427 | *pseudo_random += je32_to_cpu(spi->version); |
| 347 | 428 | ||
| 348 | sp += JFFS2_SUMMARY_INODE_SIZE; | 429 | sp += JFFS2_SUMMARY_INODE_SIZE; |
| 349 | 430 | ||
| @@ -354,52 +435,33 @@ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eras | |||
| 354 | struct jffs2_sum_dirent_flash *spd; | 435 | struct jffs2_sum_dirent_flash *spd; |
| 355 | spd = sp; | 436 | spd = sp; |
| 356 | 437 | ||
| 357 | dbg_summary("Dirent at 0x%08x\n", | 438 | dbg_summary("Dirent at 0x%08x-0x%08x\n", |
| 358 | jeb->offset + je32_to_cpu(spd->offset)); | 439 | jeb->offset + je32_to_cpu(spd->offset), |
| 440 | jeb->offset + je32_to_cpu(spd->offset) + je32_to_cpu(spd->totlen)); | ||
| 441 | |||
| 359 | 442 | ||
| 360 | fd = jffs2_alloc_full_dirent(spd->nsize+1); | 443 | fd = jffs2_alloc_full_dirent(spd->nsize+1); |
| 361 | if (!fd) { | 444 | if (!fd) |
| 362 | kfree(summary); | ||
| 363 | return -ENOMEM; | 445 | return -ENOMEM; |
| 364 | } | ||
| 365 | 446 | ||
| 366 | memcpy(&fd->name, spd->name, spd->nsize); | 447 | memcpy(&fd->name, spd->name, spd->nsize); |
| 367 | fd->name[spd->nsize] = 0; | 448 | fd->name[spd->nsize] = 0; |
| 368 | 449 | ||
| 369 | raw = jffs2_alloc_raw_node_ref(); | ||
| 370 | if (!raw) { | ||
| 371 | jffs2_free_full_dirent(fd); | ||
| 372 | JFFS2_NOTICE("allocation of node reference failed\n"); | ||
| 373 | kfree(summary); | ||
| 374 | return -ENOMEM; | ||
| 375 | } | ||
| 376 | |||
| 377 | ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(spd->pino)); | 450 | ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(spd->pino)); |
| 378 | if (!ic) { | 451 | if (!ic) { |
| 379 | jffs2_free_full_dirent(fd); | 452 | jffs2_free_full_dirent(fd); |
| 380 | jffs2_free_raw_node_ref(raw); | ||
| 381 | kfree(summary); | ||
| 382 | return -ENOMEM; | 453 | return -ENOMEM; |
| 383 | } | 454 | } |
| 384 | 455 | ||
| 385 | raw->__totlen = PAD(je32_to_cpu(spd->totlen)); | 456 | fd->raw = sum_link_node_ref(c, jeb, je32_to_cpu(spd->offset) | REF_UNCHECKED, |
| 386 | raw->flash_offset = (jeb->offset + je32_to_cpu(spd->offset)) | REF_PRISTINE; | 457 | PAD(je32_to_cpu(spd->totlen)), ic); |
| 387 | raw->next_phys = NULL; | 458 | |
| 388 | raw->next_in_ino = ic->nodes; | ||
| 389 | ic->nodes = raw; | ||
| 390 | if (!jeb->first_node) | ||
| 391 | jeb->first_node = raw; | ||
| 392 | if (jeb->last_node) | ||
| 393 | jeb->last_node->next_phys = raw; | ||
| 394 | jeb->last_node = raw; | ||
| 395 | |||
| 396 | fd->raw = raw; | ||
| 397 | fd->next = NULL; | 459 | fd->next = NULL; |
| 398 | fd->version = je32_to_cpu(spd->version); | 460 | fd->version = je32_to_cpu(spd->version); |
| 399 | fd->ino = je32_to_cpu(spd->ino); | 461 | fd->ino = je32_to_cpu(spd->ino); |
| 400 | fd->nhash = full_name_hash(fd->name, spd->nsize); | 462 | fd->nhash = full_name_hash(fd->name, spd->nsize); |
| 401 | fd->type = spd->type; | 463 | fd->type = spd->type; |
| 402 | USED_SPACE(PAD(je32_to_cpu(spd->totlen))); | 464 | |
| 403 | jffs2_add_fd_to_list(c, fd, &ic->scan_dents); | 465 | jffs2_add_fd_to_list(c, fd, &ic->scan_dents); |
| 404 | 466 | ||
| 405 | *pseudo_random += je32_to_cpu(spd->version); | 467 | *pseudo_random += je32_to_cpu(spd->version); |
| @@ -408,48 +470,105 @@ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eras | |||
| 408 | 470 | ||
| 409 | break; | 471 | break; |
| 410 | } | 472 | } |
| 473 | #ifdef CONFIG_JFFS2_FS_XATTR | ||
| 474 | case JFFS2_NODETYPE_XATTR: { | ||
| 475 | struct jffs2_xattr_datum *xd; | ||
| 476 | struct jffs2_sum_xattr_flash *spx; | ||
| 477 | |||
| 478 | spx = (struct jffs2_sum_xattr_flash *)sp; | ||
| 479 | dbg_summary("xattr at %#08x-%#08x (xid=%u, version=%u)\n", | ||
| 480 | jeb->offset + je32_to_cpu(spx->offset), | ||
| 481 | jeb->offset + je32_to_cpu(spx->offset) + je32_to_cpu(spx->totlen), | ||
| 482 | je32_to_cpu(spx->xid), je32_to_cpu(spx->version)); | ||
| 483 | |||
| 484 | xd = jffs2_setup_xattr_datum(c, je32_to_cpu(spx->xid), | ||
| 485 | je32_to_cpu(spx->version)); | ||
| 486 | if (IS_ERR(xd)) { | ||
| 487 | if (PTR_ERR(xd) == -EEXIST) { | ||
| 488 | /* a newer version of xd exists */ | ||
| 489 | if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(spx->totlen)))) | ||
| 490 | return err; | ||
| 491 | sp += JFFS2_SUMMARY_XATTR_SIZE; | ||
| 492 | break; | ||
| 493 | } | ||
| 494 | JFFS2_NOTICE("allocation of xattr_datum failed\n"); | ||
| 495 | return PTR_ERR(xd); | ||
| 496 | } | ||
| 497 | |||
| 498 | xd->node = sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED, | ||
| 499 | PAD(je32_to_cpu(spx->totlen)), NULL); | ||
| 500 | /* FIXME */ xd->node->next_in_ino = (void *)xd; | ||
| 501 | |||
| 502 | *pseudo_random += je32_to_cpu(spx->xid); | ||
| 503 | sp += JFFS2_SUMMARY_XATTR_SIZE; | ||
| 504 | |||
| 505 | break; | ||
| 506 | } | ||
| 507 | case JFFS2_NODETYPE_XREF: { | ||
| 508 | struct jffs2_xattr_ref *ref; | ||
| 509 | struct jffs2_sum_xref_flash *spr; | ||
| 510 | |||
| 511 | spr = (struct jffs2_sum_xref_flash *)sp; | ||
| 512 | dbg_summary("xref at %#08x-%#08x\n", | ||
| 513 | jeb->offset + je32_to_cpu(spr->offset), | ||
| 514 | jeb->offset + je32_to_cpu(spr->offset) + | ||
| 515 | (uint32_t)PAD(sizeof(struct jffs2_raw_xref))); | ||
| 516 | |||
| 517 | ref = jffs2_alloc_xattr_ref(); | ||
| 518 | if (!ref) { | ||
| 519 | JFFS2_NOTICE("allocation of xattr_datum failed\n"); | ||
| 520 | return -ENOMEM; | ||
| 521 | } | ||
| 522 | ref->ino = 0xfffffffe; | ||
| 523 | ref->xid = 0xfffffffd; | ||
| 524 | ref->next = c->xref_temp; | ||
| 525 | c->xref_temp = ref; | ||
| 411 | 526 | ||
| 527 | ref->node = sum_link_node_ref(c, jeb, je32_to_cpu(spr->offset) | REF_UNCHECKED, | ||
| 528 | PAD(sizeof(struct jffs2_raw_xref)), NULL); | ||
| 529 | /* FIXME */ ref->node->next_in_ino = (void *)ref; | ||
| 530 | |||
| 531 | *pseudo_random += ref->node->flash_offset; | ||
| 532 | sp += JFFS2_SUMMARY_XREF_SIZE; | ||
| 533 | |||
| 534 | break; | ||
| 535 | } | ||
| 536 | #endif | ||
| 412 | default : { | 537 | default : { |
| 413 | JFFS2_WARNING("Unsupported node type found in summary! Exiting..."); | 538 | uint16_t nodetype = je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype); |
| 414 | kfree(summary); | 539 | JFFS2_WARNING("Unsupported node type %x found in summary! Exiting...\n", nodetype); |
| 415 | return -EIO; | 540 | if ((nodetype & JFFS2_COMPAT_MASK) == JFFS2_FEATURE_INCOMPAT) |
| 541 | return -EIO; | ||
| 542 | |||
| 543 | /* For compatible node types, just fall back to the full scan */ | ||
| 544 | c->wasted_size -= jeb->wasted_size; | ||
| 545 | c->free_size += c->sector_size - jeb->free_size; | ||
| 546 | c->used_size -= jeb->used_size; | ||
| 547 | c->dirty_size -= jeb->dirty_size; | ||
| 548 | jeb->wasted_size = jeb->used_size = jeb->dirty_size = 0; | ||
| 549 | jeb->free_size = c->sector_size; | ||
| 550 | |||
| 551 | jffs2_free_jeb_node_refs(c, jeb); | ||
| 552 | return -ENOTRECOVERABLE; | ||
| 416 | } | 553 | } |
| 417 | } | 554 | } |
| 418 | } | 555 | } |
| 419 | |||
| 420 | kfree(summary); | ||
| 421 | return 0; | 556 | return 0; |
| 422 | } | 557 | } |
| 423 | 558 | ||
| 424 | /* Process the summary node - called from jffs2_scan_eraseblock() */ | 559 | /* Process the summary node - called from jffs2_scan_eraseblock() */ |
| 425 | |||
| 426 | int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 560 | int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
| 427 | uint32_t ofs, uint32_t *pseudo_random) | 561 | struct jffs2_raw_summary *summary, uint32_t sumsize, |
| 562 | uint32_t *pseudo_random) | ||
| 428 | { | 563 | { |
| 429 | struct jffs2_unknown_node crcnode; | 564 | struct jffs2_unknown_node crcnode; |
| 430 | struct jffs2_raw_node_ref *cache_ref; | 565 | int ret, ofs; |
| 431 | struct jffs2_raw_summary *summary; | ||
| 432 | int ret, sumsize; | ||
| 433 | uint32_t crc; | 566 | uint32_t crc; |
| 434 | 567 | ||
| 435 | sumsize = c->sector_size - ofs; | 568 | ofs = c->sector_size - sumsize; |
| 436 | ofs += jeb->offset; | ||
| 437 | 569 | ||
| 438 | dbg_summary("summary found for 0x%08x at 0x%08x (0x%x bytes)\n", | 570 | dbg_summary("summary found for 0x%08x at 0x%08x (0x%x bytes)\n", |
| 439 | jeb->offset, ofs, sumsize); | 571 | jeb->offset, jeb->offset + ofs, sumsize); |
| 440 | |||
| 441 | summary = kmalloc(sumsize, GFP_KERNEL); | ||
| 442 | |||
| 443 | if (!summary) { | ||
| 444 | return -ENOMEM; | ||
| 445 | } | ||
| 446 | |||
| 447 | ret = jffs2_fill_scan_buf(c, (unsigned char *)summary, ofs, sumsize); | ||
| 448 | |||
| 449 | if (ret) { | ||
| 450 | kfree(summary); | ||
| 451 | return ret; | ||
| 452 | } | ||
| 453 | 572 | ||
| 454 | /* OK, now check for node validity and CRC */ | 573 | /* OK, now check for node validity and CRC */ |
| 455 | crcnode.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | 574 | crcnode.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); |
| @@ -486,66 +605,49 @@ int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb | |||
| 486 | 605 | ||
| 487 | dbg_summary("Summary : CLEANMARKER node \n"); | 606 | dbg_summary("Summary : CLEANMARKER node \n"); |
| 488 | 607 | ||
| 608 | ret = jffs2_prealloc_raw_node_refs(c, jeb, 1); | ||
| 609 | if (ret) | ||
| 610 | return ret; | ||
| 611 | |||
| 489 | if (je32_to_cpu(summary->cln_mkr) != c->cleanmarker_size) { | 612 | if (je32_to_cpu(summary->cln_mkr) != c->cleanmarker_size) { |
| 490 | dbg_summary("CLEANMARKER node has totlen 0x%x != normal 0x%x\n", | 613 | dbg_summary("CLEANMARKER node has totlen 0x%x != normal 0x%x\n", |
| 491 | je32_to_cpu(summary->cln_mkr), c->cleanmarker_size); | 614 | je32_to_cpu(summary->cln_mkr), c->cleanmarker_size); |
| 492 | UNCHECKED_SPACE(PAD(je32_to_cpu(summary->cln_mkr))); | 615 | if ((ret = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(summary->cln_mkr))))) |
| 616 | return ret; | ||
| 493 | } else if (jeb->first_node) { | 617 | } else if (jeb->first_node) { |
| 494 | dbg_summary("CLEANMARKER node not first node in block " | 618 | dbg_summary("CLEANMARKER node not first node in block " |
| 495 | "(0x%08x)\n", jeb->offset); | 619 | "(0x%08x)\n", jeb->offset); |
| 496 | UNCHECKED_SPACE(PAD(je32_to_cpu(summary->cln_mkr))); | 620 | if ((ret = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(summary->cln_mkr))))) |
| 621 | return ret; | ||
| 497 | } else { | 622 | } else { |
| 498 | struct jffs2_raw_node_ref *marker_ref = jffs2_alloc_raw_node_ref(); | 623 | jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, |
| 499 | 624 | je32_to_cpu(summary->cln_mkr), NULL); | |
| 500 | if (!marker_ref) { | ||
| 501 | JFFS2_NOTICE("Failed to allocate node ref for clean marker\n"); | ||
| 502 | kfree(summary); | ||
| 503 | return -ENOMEM; | ||
| 504 | } | ||
| 505 | |||
| 506 | marker_ref->next_in_ino = NULL; | ||
| 507 | marker_ref->next_phys = NULL; | ||
| 508 | marker_ref->flash_offset = jeb->offset | REF_NORMAL; | ||
| 509 | marker_ref->__totlen = je32_to_cpu(summary->cln_mkr); | ||
| 510 | jeb->first_node = jeb->last_node = marker_ref; | ||
| 511 | |||
| 512 | USED_SPACE( PAD(je32_to_cpu(summary->cln_mkr)) ); | ||
| 513 | } | 625 | } |
| 514 | } | 626 | } |
| 515 | 627 | ||
| 516 | if (je32_to_cpu(summary->padded)) { | ||
| 517 | DIRTY_SPACE(je32_to_cpu(summary->padded)); | ||
| 518 | } | ||
| 519 | |||
| 520 | ret = jffs2_sum_process_sum_data(c, jeb, summary, pseudo_random); | 628 | ret = jffs2_sum_process_sum_data(c, jeb, summary, pseudo_random); |
| 629 | /* -ENOTRECOVERABLE isn't a fatal error -- it means we should do a full | ||
| 630 | scan of this eraseblock. So return zero */ | ||
| 631 | if (ret == -ENOTRECOVERABLE) | ||
| 632 | return 0; | ||
| 521 | if (ret) | 633 | if (ret) |
| 522 | return ret; | 634 | return ret; /* real error */ |
| 523 | 635 | ||
| 524 | /* for PARANOIA_CHECK */ | 636 | /* for PARANOIA_CHECK */ |
| 525 | cache_ref = jffs2_alloc_raw_node_ref(); | 637 | ret = jffs2_prealloc_raw_node_refs(c, jeb, 2); |
| 526 | 638 | if (ret) | |
| 527 | if (!cache_ref) { | 639 | return ret; |
| 528 | JFFS2_NOTICE("Failed to allocate node ref for cache\n"); | ||
| 529 | return -ENOMEM; | ||
| 530 | } | ||
| 531 | |||
| 532 | cache_ref->next_in_ino = NULL; | ||
| 533 | cache_ref->next_phys = NULL; | ||
| 534 | cache_ref->flash_offset = ofs | REF_NORMAL; | ||
| 535 | cache_ref->__totlen = sumsize; | ||
| 536 | |||
| 537 | if (!jeb->first_node) | ||
| 538 | jeb->first_node = cache_ref; | ||
| 539 | if (jeb->last_node) | ||
| 540 | jeb->last_node->next_phys = cache_ref; | ||
| 541 | jeb->last_node = cache_ref; | ||
| 542 | 640 | ||
| 543 | USED_SPACE(sumsize); | 641 | sum_link_node_ref(c, jeb, ofs | REF_NORMAL, sumsize, NULL); |
| 544 | 642 | ||
| 545 | jeb->wasted_size += jeb->free_size; | 643 | if (unlikely(jeb->free_size)) { |
| 546 | c->wasted_size += jeb->free_size; | 644 | JFFS2_WARNING("Free size 0x%x bytes in eraseblock @0x%08x with summary?\n", |
| 547 | c->free_size -= jeb->free_size; | 645 | jeb->free_size, jeb->offset); |
| 548 | jeb->free_size = 0; | 646 | jeb->wasted_size += jeb->free_size; |
| 647 | c->wasted_size += jeb->free_size; | ||
| 648 | c->free_size -= jeb->free_size; | ||
| 649 | jeb->free_size = 0; | ||
| 650 | } | ||
| 549 | 651 | ||
| 550 | return jffs2_scan_classify_jeb(c, jeb); | 652 | return jffs2_scan_classify_jeb(c, jeb); |
| 551 | 653 | ||
| @@ -564,6 +666,7 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
| 564 | union jffs2_sum_mem *temp; | 666 | union jffs2_sum_mem *temp; |
| 565 | struct jffs2_sum_marker *sm; | 667 | struct jffs2_sum_marker *sm; |
| 566 | struct kvec vecs[2]; | 668 | struct kvec vecs[2]; |
| 669 | uint32_t sum_ofs; | ||
| 567 | void *wpage; | 670 | void *wpage; |
| 568 | int ret; | 671 | int ret; |
| 569 | size_t retlen; | 672 | size_t retlen; |
| @@ -581,16 +684,17 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
| 581 | wpage = c->summary->sum_buf; | 684 | wpage = c->summary->sum_buf; |
| 582 | 685 | ||
| 583 | while (c->summary->sum_num) { | 686 | while (c->summary->sum_num) { |
| 687 | temp = c->summary->sum_list_head; | ||
| 584 | 688 | ||
| 585 | switch (je16_to_cpu(c->summary->sum_list_head->u.nodetype)) { | 689 | switch (je16_to_cpu(temp->u.nodetype)) { |
| 586 | case JFFS2_NODETYPE_INODE: { | 690 | case JFFS2_NODETYPE_INODE: { |
| 587 | struct jffs2_sum_inode_flash *sino_ptr = wpage; | 691 | struct jffs2_sum_inode_flash *sino_ptr = wpage; |
| 588 | 692 | ||
| 589 | sino_ptr->nodetype = c->summary->sum_list_head->i.nodetype; | 693 | sino_ptr->nodetype = temp->i.nodetype; |
| 590 | sino_ptr->inode = c->summary->sum_list_head->i.inode; | 694 | sino_ptr->inode = temp->i.inode; |
| 591 | sino_ptr->version = c->summary->sum_list_head->i.version; | 695 | sino_ptr->version = temp->i.version; |
| 592 | sino_ptr->offset = c->summary->sum_list_head->i.offset; | 696 | sino_ptr->offset = temp->i.offset; |
| 593 | sino_ptr->totlen = c->summary->sum_list_head->i.totlen; | 697 | sino_ptr->totlen = temp->i.totlen; |
| 594 | 698 | ||
| 595 | wpage += JFFS2_SUMMARY_INODE_SIZE; | 699 | wpage += JFFS2_SUMMARY_INODE_SIZE; |
| 596 | 700 | ||
| @@ -600,30 +704,60 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
| 600 | case JFFS2_NODETYPE_DIRENT: { | 704 | case JFFS2_NODETYPE_DIRENT: { |
| 601 | struct jffs2_sum_dirent_flash *sdrnt_ptr = wpage; | 705 | struct jffs2_sum_dirent_flash *sdrnt_ptr = wpage; |
| 602 | 706 | ||
| 603 | sdrnt_ptr->nodetype = c->summary->sum_list_head->d.nodetype; | 707 | sdrnt_ptr->nodetype = temp->d.nodetype; |
| 604 | sdrnt_ptr->totlen = c->summary->sum_list_head->d.totlen; | 708 | sdrnt_ptr->totlen = temp->d.totlen; |
| 605 | sdrnt_ptr->offset = c->summary->sum_list_head->d.offset; | 709 | sdrnt_ptr->offset = temp->d.offset; |
| 606 | sdrnt_ptr->pino = c->summary->sum_list_head->d.pino; | 710 | sdrnt_ptr->pino = temp->d.pino; |
| 607 | sdrnt_ptr->version = c->summary->sum_list_head->d.version; | 711 | sdrnt_ptr->version = temp->d.version; |
| 608 | sdrnt_ptr->ino = c->summary->sum_list_head->d.ino; | 712 | sdrnt_ptr->ino = temp->d.ino; |
| 609 | sdrnt_ptr->nsize = c->summary->sum_list_head->d.nsize; | 713 | sdrnt_ptr->nsize = temp->d.nsize; |
| 610 | sdrnt_ptr->type = c->summary->sum_list_head->d.type; | 714 | sdrnt_ptr->type = temp->d.type; |
| 611 | 715 | ||
| 612 | memcpy(sdrnt_ptr->name, c->summary->sum_list_head->d.name, | 716 | memcpy(sdrnt_ptr->name, temp->d.name, |
| 613 | c->summary->sum_list_head->d.nsize); | 717 | temp->d.nsize); |
| 614 | 718 | ||
| 615 | wpage += JFFS2_SUMMARY_DIRENT_SIZE(c->summary->sum_list_head->d.nsize); | 719 | wpage += JFFS2_SUMMARY_DIRENT_SIZE(temp->d.nsize); |
| 616 | 720 | ||
| 617 | break; | 721 | break; |
| 618 | } | 722 | } |
| 723 | #ifdef CONFIG_JFFS2_FS_XATTR | ||
| 724 | case JFFS2_NODETYPE_XATTR: { | ||
| 725 | struct jffs2_sum_xattr_flash *sxattr_ptr = wpage; | ||
| 726 | |||
| 727 | temp = c->summary->sum_list_head; | ||
| 728 | sxattr_ptr->nodetype = temp->x.nodetype; | ||
| 729 | sxattr_ptr->xid = temp->x.xid; | ||
| 730 | sxattr_ptr->version = temp->x.version; | ||
| 731 | sxattr_ptr->offset = temp->x.offset; | ||
| 732 | sxattr_ptr->totlen = temp->x.totlen; | ||
| 733 | |||
| 734 | wpage += JFFS2_SUMMARY_XATTR_SIZE; | ||
| 735 | break; | ||
| 736 | } | ||
| 737 | case JFFS2_NODETYPE_XREF: { | ||
| 738 | struct jffs2_sum_xref_flash *sxref_ptr = wpage; | ||
| 619 | 739 | ||
| 740 | temp = c->summary->sum_list_head; | ||
| 741 | sxref_ptr->nodetype = temp->r.nodetype; | ||
| 742 | sxref_ptr->offset = temp->r.offset; | ||
| 743 | |||
| 744 | wpage += JFFS2_SUMMARY_XREF_SIZE; | ||
| 745 | break; | ||
| 746 | } | ||
| 747 | #endif | ||
| 620 | default : { | 748 | default : { |
| 621 | BUG(); /* unknown node in summary information */ | 749 | if ((je16_to_cpu(temp->u.nodetype) & JFFS2_COMPAT_MASK) |
| 750 | == JFFS2_FEATURE_RWCOMPAT_COPY) { | ||
| 751 | dbg_summary("Writing unknown RWCOMPAT_COPY node type %x\n", | ||
| 752 | je16_to_cpu(temp->u.nodetype)); | ||
| 753 | jffs2_sum_disable_collecting(c->summary); | ||
| 754 | } else { | ||
| 755 | BUG(); /* unknown node in summary information */ | ||
| 756 | } | ||
| 622 | } | 757 | } |
| 623 | } | 758 | } |
| 624 | 759 | ||
| 625 | temp = c->summary->sum_list_head; | 760 | c->summary->sum_list_head = temp->u.next; |
| 626 | c->summary->sum_list_head = c->summary->sum_list_head->u.next; | ||
| 627 | kfree(temp); | 761 | kfree(temp); |
| 628 | 762 | ||
| 629 | c->summary->sum_num--; | 763 | c->summary->sum_num--; |
| @@ -645,25 +779,34 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
| 645 | vecs[1].iov_base = c->summary->sum_buf; | 779 | vecs[1].iov_base = c->summary->sum_buf; |
| 646 | vecs[1].iov_len = datasize; | 780 | vecs[1].iov_len = datasize; |
| 647 | 781 | ||
| 648 | dbg_summary("JFFS2: writing out data to flash to pos : 0x%08x\n", | 782 | sum_ofs = jeb->offset + c->sector_size - jeb->free_size; |
| 649 | jeb->offset + c->sector_size - jeb->free_size); | ||
| 650 | 783 | ||
| 651 | spin_unlock(&c->erase_completion_lock); | 784 | dbg_summary("JFFS2: writing out data to flash to pos : 0x%08x\n", |
| 652 | ret = jffs2_flash_writev(c, vecs, 2, jeb->offset + c->sector_size - | 785 | sum_ofs); |
| 653 | jeb->free_size, &retlen, 0); | ||
| 654 | spin_lock(&c->erase_completion_lock); | ||
| 655 | 786 | ||
| 787 | ret = jffs2_flash_writev(c, vecs, 2, sum_ofs, &retlen, 0); | ||
| 656 | 788 | ||
| 657 | if (ret || (retlen != infosize)) { | 789 | if (ret || (retlen != infosize)) { |
| 658 | JFFS2_WARNING("Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", | 790 | |
| 659 | infosize, jeb->offset + c->sector_size - jeb->free_size, ret, retlen); | 791 | JFFS2_WARNING("Write of %u bytes at 0x%08x failed. returned %d, retlen %zd\n", |
| 792 | infosize, sum_ofs, ret, retlen); | ||
| 793 | |||
| 794 | if (retlen) { | ||
| 795 | /* Waste remaining space */ | ||
| 796 | spin_lock(&c->erase_completion_lock); | ||
| 797 | jffs2_link_node_ref(c, jeb, sum_ofs | REF_OBSOLETE, infosize, NULL); | ||
| 798 | spin_unlock(&c->erase_completion_lock); | ||
| 799 | } | ||
| 660 | 800 | ||
| 661 | c->summary->sum_size = JFFS2_SUMMARY_NOSUM_SIZE; | 801 | c->summary->sum_size = JFFS2_SUMMARY_NOSUM_SIZE; |
| 662 | WASTED_SPACE(infosize); | ||
| 663 | 802 | ||
| 664 | return 1; | 803 | return 0; |
| 665 | } | 804 | } |
| 666 | 805 | ||
| 806 | spin_lock(&c->erase_completion_lock); | ||
| 807 | jffs2_link_node_ref(c, jeb, sum_ofs | REF_NORMAL, infosize, NULL); | ||
| 808 | spin_unlock(&c->erase_completion_lock); | ||
| 809 | |||
| 667 | return 0; | 810 | return 0; |
| 668 | } | 811 | } |
| 669 | 812 | ||
| @@ -671,13 +814,16 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
| 671 | 814 | ||
| 672 | int jffs2_sum_write_sumnode(struct jffs2_sb_info *c) | 815 | int jffs2_sum_write_sumnode(struct jffs2_sb_info *c) |
| 673 | { | 816 | { |
| 674 | struct jffs2_raw_node_ref *summary_ref; | 817 | int datasize, infosize, padsize; |
| 675 | int datasize, infosize, padsize, ret; | ||
| 676 | struct jffs2_eraseblock *jeb; | 818 | struct jffs2_eraseblock *jeb; |
| 819 | int ret; | ||
| 677 | 820 | ||
| 678 | dbg_summary("called\n"); | 821 | dbg_summary("called\n"); |
| 679 | 822 | ||
| 823 | spin_unlock(&c->erase_completion_lock); | ||
| 824 | |||
| 680 | jeb = c->nextblock; | 825 | jeb = c->nextblock; |
| 826 | jffs2_prealloc_raw_node_refs(c, jeb, 1); | ||
| 681 | 827 | ||
| 682 | if (!c->summary->sum_num || !c->summary->sum_list_head) { | 828 | if (!c->summary->sum_num || !c->summary->sum_list_head) { |
| 683 | JFFS2_WARNING("Empty summary info!!!\n"); | 829 | JFFS2_WARNING("Empty summary info!!!\n"); |
| @@ -696,35 +842,11 @@ int jffs2_sum_write_sumnode(struct jffs2_sb_info *c) | |||
| 696 | jffs2_sum_disable_collecting(c->summary); | 842 | jffs2_sum_disable_collecting(c->summary); |
| 697 | 843 | ||
| 698 | JFFS2_WARNING("Not enough space for summary, padsize = %d\n", padsize); | 844 | JFFS2_WARNING("Not enough space for summary, padsize = %d\n", padsize); |
| 845 | spin_lock(&c->erase_completion_lock); | ||
| 699 | return 0; | 846 | return 0; |
| 700 | } | 847 | } |
| 701 | 848 | ||
| 702 | ret = jffs2_sum_write_data(c, jeb, infosize, datasize, padsize); | 849 | ret = jffs2_sum_write_data(c, jeb, infosize, datasize, padsize); |
| 703 | if (ret) | ||
| 704 | return 0; /* can't write out summary, block is marked as NOSUM_SIZE */ | ||
| 705 | |||
| 706 | /* for ACCT_PARANOIA_CHECK */ | ||
| 707 | spin_unlock(&c->erase_completion_lock); | ||
| 708 | summary_ref = jffs2_alloc_raw_node_ref(); | ||
| 709 | spin_lock(&c->erase_completion_lock); | 850 | spin_lock(&c->erase_completion_lock); |
| 710 | 851 | return ret; | |
| 711 | if (!summary_ref) { | ||
| 712 | JFFS2_NOTICE("Failed to allocate node ref for summary\n"); | ||
| 713 | return -ENOMEM; | ||
| 714 | } | ||
| 715 | |||
| 716 | summary_ref->next_in_ino = NULL; | ||
| 717 | summary_ref->next_phys = NULL; | ||
| 718 | summary_ref->flash_offset = (jeb->offset + c->sector_size - jeb->free_size) | REF_NORMAL; | ||
| 719 | summary_ref->__totlen = infosize; | ||
| 720 | |||
| 721 | if (!jeb->first_node) | ||
| 722 | jeb->first_node = summary_ref; | ||
| 723 | if (jeb->last_node) | ||
| 724 | jeb->last_node->next_phys = summary_ref; | ||
| 725 | jeb->last_node = summary_ref; | ||
| 726 | |||
| 727 | USED_SPACE(infosize); | ||
| 728 | |||
| 729 | return 0; | ||
| 730 | } | 852 | } |
diff --git a/fs/jffs2/summary.h b/fs/jffs2/summary.h index b7a678be1709..6bf1f6aa4552 100644 --- a/fs/jffs2/summary.h +++ b/fs/jffs2/summary.h | |||
| @@ -18,23 +18,6 @@ | |||
| 18 | #include <linux/uio.h> | 18 | #include <linux/uio.h> |
| 19 | #include <linux/jffs2.h> | 19 | #include <linux/jffs2.h> |
| 20 | 20 | ||
| 21 | #define DIRTY_SPACE(x) do { typeof(x) _x = (x); \ | ||
| 22 | c->free_size -= _x; c->dirty_size += _x; \ | ||
| 23 | jeb->free_size -= _x ; jeb->dirty_size += _x; \ | ||
| 24 | }while(0) | ||
| 25 | #define USED_SPACE(x) do { typeof(x) _x = (x); \ | ||
| 26 | c->free_size -= _x; c->used_size += _x; \ | ||
| 27 | jeb->free_size -= _x ; jeb->used_size += _x; \ | ||
| 28 | }while(0) | ||
| 29 | #define WASTED_SPACE(x) do { typeof(x) _x = (x); \ | ||
| 30 | c->free_size -= _x; c->wasted_size += _x; \ | ||
| 31 | jeb->free_size -= _x ; jeb->wasted_size += _x; \ | ||
| 32 | }while(0) | ||
| 33 | #define UNCHECKED_SPACE(x) do { typeof(x) _x = (x); \ | ||
| 34 | c->free_size -= _x; c->unchecked_size += _x; \ | ||
| 35 | jeb->free_size -= _x ; jeb->unchecked_size += _x; \ | ||
| 36 | }while(0) | ||
| 37 | |||
| 38 | #define BLK_STATE_ALLFF 0 | 21 | #define BLK_STATE_ALLFF 0 |
| 39 | #define BLK_STATE_CLEAN 1 | 22 | #define BLK_STATE_CLEAN 1 |
| 40 | #define BLK_STATE_PARTDIRTY 2 | 23 | #define BLK_STATE_PARTDIRTY 2 |
| @@ -45,6 +28,8 @@ | |||
| 45 | #define JFFS2_SUMMARY_NOSUM_SIZE 0xffffffff | 28 | #define JFFS2_SUMMARY_NOSUM_SIZE 0xffffffff |
| 46 | #define JFFS2_SUMMARY_INODE_SIZE (sizeof(struct jffs2_sum_inode_flash)) | 29 | #define JFFS2_SUMMARY_INODE_SIZE (sizeof(struct jffs2_sum_inode_flash)) |
| 47 | #define JFFS2_SUMMARY_DIRENT_SIZE(x) (sizeof(struct jffs2_sum_dirent_flash) + (x)) | 30 | #define JFFS2_SUMMARY_DIRENT_SIZE(x) (sizeof(struct jffs2_sum_dirent_flash) + (x)) |
| 31 | #define JFFS2_SUMMARY_XATTR_SIZE (sizeof(struct jffs2_sum_xattr_flash)) | ||
| 32 | #define JFFS2_SUMMARY_XREF_SIZE (sizeof(struct jffs2_sum_xref_flash)) | ||
| 48 | 33 | ||
| 49 | /* Summary structures used on flash */ | 34 | /* Summary structures used on flash */ |
| 50 | 35 | ||
| @@ -75,11 +60,28 @@ struct jffs2_sum_dirent_flash | |||
| 75 | uint8_t name[0]; /* dirent name */ | 60 | uint8_t name[0]; /* dirent name */ |
| 76 | } __attribute__((packed)); | 61 | } __attribute__((packed)); |
| 77 | 62 | ||
| 63 | struct jffs2_sum_xattr_flash | ||
| 64 | { | ||
| 65 | jint16_t nodetype; /* == JFFS2_NODETYPE_XATR */ | ||
| 66 | jint32_t xid; /* xattr identifier */ | ||
| 67 | jint32_t version; /* version number */ | ||
| 68 | jint32_t offset; /* offset on jeb */ | ||
| 69 | jint32_t totlen; /* node length */ | ||
| 70 | } __attribute__((packed)); | ||
| 71 | |||
| 72 | struct jffs2_sum_xref_flash | ||
| 73 | { | ||
| 74 | jint16_t nodetype; /* == JFFS2_NODETYPE_XREF */ | ||
| 75 | jint32_t offset; /* offset on jeb */ | ||
| 76 | } __attribute__((packed)); | ||
| 77 | |||
| 78 | union jffs2_sum_flash | 78 | union jffs2_sum_flash |
| 79 | { | 79 | { |
| 80 | struct jffs2_sum_unknown_flash u; | 80 | struct jffs2_sum_unknown_flash u; |
| 81 | struct jffs2_sum_inode_flash i; | 81 | struct jffs2_sum_inode_flash i; |
| 82 | struct jffs2_sum_dirent_flash d; | 82 | struct jffs2_sum_dirent_flash d; |
| 83 | struct jffs2_sum_xattr_flash x; | ||
| 84 | struct jffs2_sum_xref_flash r; | ||
| 83 | }; | 85 | }; |
| 84 | 86 | ||
| 85 | /* Summary structures used in the memory */ | 87 | /* Summary structures used in the memory */ |
| @@ -114,11 +116,30 @@ struct jffs2_sum_dirent_mem | |||
| 114 | uint8_t name[0]; /* dirent name */ | 116 | uint8_t name[0]; /* dirent name */ |
| 115 | } __attribute__((packed)); | 117 | } __attribute__((packed)); |
| 116 | 118 | ||
| 119 | struct jffs2_sum_xattr_mem | ||
| 120 | { | ||
| 121 | union jffs2_sum_mem *next; | ||
| 122 | jint16_t nodetype; | ||
| 123 | jint32_t xid; | ||
| 124 | jint32_t version; | ||
| 125 | jint32_t offset; | ||
| 126 | jint32_t totlen; | ||
| 127 | } __attribute__((packed)); | ||
| 128 | |||
| 129 | struct jffs2_sum_xref_mem | ||
| 130 | { | ||
| 131 | union jffs2_sum_mem *next; | ||
| 132 | jint16_t nodetype; | ||
| 133 | jint32_t offset; | ||
| 134 | } __attribute__((packed)); | ||
| 135 | |||
| 117 | union jffs2_sum_mem | 136 | union jffs2_sum_mem |
| 118 | { | 137 | { |
| 119 | struct jffs2_sum_unknown_mem u; | 138 | struct jffs2_sum_unknown_mem u; |
| 120 | struct jffs2_sum_inode_mem i; | 139 | struct jffs2_sum_inode_mem i; |
| 121 | struct jffs2_sum_dirent_mem d; | 140 | struct jffs2_sum_dirent_mem d; |
| 141 | struct jffs2_sum_xattr_mem x; | ||
| 142 | struct jffs2_sum_xref_mem r; | ||
| 122 | }; | 143 | }; |
| 123 | 144 | ||
| 124 | /* Summary related information stored in superblock */ | 145 | /* Summary related information stored in superblock */ |
| @@ -159,8 +180,11 @@ int jffs2_sum_write_sumnode(struct jffs2_sb_info *c); | |||
| 159 | int jffs2_sum_add_padding_mem(struct jffs2_summary *s, uint32_t size); | 180 | int jffs2_sum_add_padding_mem(struct jffs2_summary *s, uint32_t size); |
| 160 | int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri, uint32_t ofs); | 181 | int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri, uint32_t ofs); |
| 161 | int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *rd, uint32_t ofs); | 182 | int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *rd, uint32_t ofs); |
| 183 | int jffs2_sum_add_xattr_mem(struct jffs2_summary *s, struct jffs2_raw_xattr *rx, uint32_t ofs); | ||
| 184 | int jffs2_sum_add_xref_mem(struct jffs2_summary *s, struct jffs2_raw_xref *rr, uint32_t ofs); | ||
| 162 | int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 185 | int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, |
| 163 | uint32_t ofs, uint32_t *pseudo_random); | 186 | struct jffs2_raw_summary *summary, uint32_t sumlen, |
| 187 | uint32_t *pseudo_random); | ||
| 164 | 188 | ||
| 165 | #else /* SUMMARY DISABLED */ | 189 | #else /* SUMMARY DISABLED */ |
| 166 | 190 | ||
| @@ -176,7 +200,9 @@ int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb | |||
| 176 | #define jffs2_sum_add_padding_mem(a,b) | 200 | #define jffs2_sum_add_padding_mem(a,b) |
| 177 | #define jffs2_sum_add_inode_mem(a,b,c) | 201 | #define jffs2_sum_add_inode_mem(a,b,c) |
| 178 | #define jffs2_sum_add_dirent_mem(a,b,c) | 202 | #define jffs2_sum_add_dirent_mem(a,b,c) |
| 179 | #define jffs2_sum_scan_sumnode(a,b,c,d) (0) | 203 | #define jffs2_sum_add_xattr_mem(a,b,c) |
| 204 | #define jffs2_sum_add_xref_mem(a,b,c) | ||
| 205 | #define jffs2_sum_scan_sumnode(a,b,c,d,e) (0) | ||
| 180 | 206 | ||
| 181 | #endif /* CONFIG_JFFS2_SUMMARY */ | 207 | #endif /* CONFIG_JFFS2_SUMMARY */ |
| 182 | 208 | ||
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index ffd8e84b22cc..9d0521451f59 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c | |||
| @@ -151,7 +151,10 @@ static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type, | |||
| 151 | 151 | ||
| 152 | sb->s_op = &jffs2_super_operations; | 152 | sb->s_op = &jffs2_super_operations; |
| 153 | sb->s_flags = flags | MS_NOATIME; | 153 | sb->s_flags = flags | MS_NOATIME; |
| 154 | 154 | sb->s_xattr = jffs2_xattr_handlers; | |
| 155 | #ifdef CONFIG_JFFS2_FS_POSIX_ACL | ||
| 156 | sb->s_flags |= MS_POSIXACL; | ||
| 157 | #endif | ||
| 155 | ret = jffs2_do_fill_super(sb, data, flags & MS_SILENT ? 1 : 0); | 158 | ret = jffs2_do_fill_super(sb, data, flags & MS_SILENT ? 1 : 0); |
| 156 | 159 | ||
| 157 | if (ret) { | 160 | if (ret) { |
| @@ -293,6 +296,7 @@ static void jffs2_put_super (struct super_block *sb) | |||
| 293 | kfree(c->blocks); | 296 | kfree(c->blocks); |
| 294 | jffs2_flash_cleanup(c); | 297 | jffs2_flash_cleanup(c); |
| 295 | kfree(c->inocache_list); | 298 | kfree(c->inocache_list); |
| 299 | jffs2_clear_xattr_subsystem(c); | ||
| 296 | if (c->mtd->sync) | 300 | if (c->mtd->sync) |
| 297 | c->mtd->sync(c->mtd); | 301 | c->mtd->sync(c->mtd); |
| 298 | 302 | ||
| @@ -320,6 +324,18 @@ static int __init init_jffs2_fs(void) | |||
| 320 | { | 324 | { |
| 321 | int ret; | 325 | int ret; |
| 322 | 326 | ||
| 327 | /* Paranoia checks for on-medium structures. If we ask GCC | ||
| 328 | to pack them with __attribute__((packed)) then it _also_ | ||
| 329 | assumes that they're not aligned -- so it emits crappy | ||
| 330 | code on some architectures. Ideally we want an attribute | ||
| 331 | which means just 'no padding', without the alignment | ||
| 332 | thing. But GCC doesn't have that -- we have to just | ||
| 333 | hope the structs are the right sizes, instead. */ | ||
| 334 | BUG_ON(sizeof(struct jffs2_unknown_node) != 12); | ||
| 335 | BUG_ON(sizeof(struct jffs2_raw_dirent) != 40); | ||
| 336 | BUG_ON(sizeof(struct jffs2_raw_inode) != 68); | ||
| 337 | BUG_ON(sizeof(struct jffs2_raw_summary) != 32); | ||
| 338 | |||
| 323 | printk(KERN_INFO "JFFS2 version 2.2." | 339 | printk(KERN_INFO "JFFS2 version 2.2." |
| 324 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER | 340 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER |
| 325 | " (NAND)" | 341 | " (NAND)" |
| @@ -327,7 +343,7 @@ static int __init init_jffs2_fs(void) | |||
| 327 | #ifdef CONFIG_JFFS2_SUMMARY | 343 | #ifdef CONFIG_JFFS2_SUMMARY |
| 328 | " (SUMMARY) " | 344 | " (SUMMARY) " |
| 329 | #endif | 345 | #endif |
| 330 | " (C) 2001-2003 Red Hat, Inc.\n"); | 346 | " (C) 2001-2006 Red Hat, Inc.\n"); |
| 331 | 347 | ||
| 332 | jffs2_inode_cachep = kmem_cache_create("jffs2_i", | 348 | jffs2_inode_cachep = kmem_cache_create("jffs2_i", |
| 333 | sizeof(struct jffs2_inode_info), | 349 | sizeof(struct jffs2_inode_info), |
diff --git a/fs/jffs2/symlink.c b/fs/jffs2/symlink.c index d55754fe8925..fc211b6e9b03 100644 --- a/fs/jffs2/symlink.c +++ b/fs/jffs2/symlink.c | |||
| @@ -24,7 +24,12 @@ struct inode_operations jffs2_symlink_inode_operations = | |||
| 24 | { | 24 | { |
| 25 | .readlink = generic_readlink, | 25 | .readlink = generic_readlink, |
| 26 | .follow_link = jffs2_follow_link, | 26 | .follow_link = jffs2_follow_link, |
| 27 | .setattr = jffs2_setattr | 27 | .permission = jffs2_permission, |
| 28 | .setattr = jffs2_setattr, | ||
| 29 | .setxattr = jffs2_setxattr, | ||
| 30 | .getxattr = jffs2_getxattr, | ||
| 31 | .listxattr = jffs2_listxattr, | ||
| 32 | .removexattr = jffs2_removexattr | ||
| 28 | }; | 33 | }; |
| 29 | 34 | ||
| 30 | static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd) | 35 | static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd) |
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c index 4cebf0e57c46..a7f153f79ecb 100644 --- a/fs/jffs2/wbuf.c +++ b/fs/jffs2/wbuf.c | |||
| @@ -156,69 +156,130 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
| 156 | jffs2_erase_pending_trigger(c); | 156 | jffs2_erase_pending_trigger(c); |
| 157 | } | 157 | } |
| 158 | 158 | ||
| 159 | /* Adjust its size counts accordingly */ | 159 | if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) { |
| 160 | c->wasted_size += jeb->free_size; | 160 | uint32_t oldfree = jeb->free_size; |
| 161 | c->free_size -= jeb->free_size; | 161 | |
| 162 | jeb->wasted_size += jeb->free_size; | 162 | jffs2_link_node_ref(c, jeb, |
| 163 | jeb->free_size = 0; | 163 | (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE, |
| 164 | oldfree, NULL); | ||
| 165 | /* convert to wasted */ | ||
| 166 | c->wasted_size += oldfree; | ||
| 167 | jeb->wasted_size += oldfree; | ||
| 168 | c->dirty_size -= oldfree; | ||
| 169 | jeb->dirty_size -= oldfree; | ||
| 170 | } | ||
| 164 | 171 | ||
| 165 | jffs2_dbg_dump_block_lists_nolock(c); | 172 | jffs2_dbg_dump_block_lists_nolock(c); |
| 166 | jffs2_dbg_acct_sanity_check_nolock(c,jeb); | 173 | jffs2_dbg_acct_sanity_check_nolock(c,jeb); |
| 167 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); | 174 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); |
| 168 | } | 175 | } |
| 169 | 176 | ||
| 177 | static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c, | ||
| 178 | struct jffs2_inode_info *f, | ||
| 179 | struct jffs2_raw_node_ref *raw, | ||
| 180 | union jffs2_node_union *node) | ||
| 181 | { | ||
| 182 | struct jffs2_node_frag *frag; | ||
| 183 | struct jffs2_full_dirent *fd; | ||
| 184 | |||
| 185 | dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n", | ||
| 186 | node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype)); | ||
| 187 | |||
| 188 | BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 && | ||
| 189 | je16_to_cpu(node->u.magic) != 0); | ||
| 190 | |||
| 191 | switch (je16_to_cpu(node->u.nodetype)) { | ||
| 192 | case JFFS2_NODETYPE_INODE: | ||
| 193 | if (f->metadata && f->metadata->raw == raw) { | ||
| 194 | dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata); | ||
| 195 | return &f->metadata->raw; | ||
| 196 | } | ||
| 197 | frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset)); | ||
| 198 | BUG_ON(!frag); | ||
| 199 | /* Find a frag which refers to the full_dnode we want to modify */ | ||
| 200 | while (!frag->node || frag->node->raw != raw) { | ||
| 201 | frag = frag_next(frag); | ||
| 202 | BUG_ON(!frag); | ||
| 203 | } | ||
| 204 | dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node); | ||
| 205 | return &frag->node->raw; | ||
| 206 | |||
| 207 | case JFFS2_NODETYPE_DIRENT: | ||
| 208 | for (fd = f->dents; fd; fd = fd->next) { | ||
| 209 | if (fd->raw == raw) { | ||
| 210 | dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd); | ||
| 211 | return &fd->raw; | ||
| 212 | } | ||
| 213 | } | ||
| 214 | BUG(); | ||
| 215 | |||
| 216 | default: | ||
| 217 | dbg_noderef("Don't care about replacing raw for nodetype %x\n", | ||
| 218 | je16_to_cpu(node->u.nodetype)); | ||
| 219 | break; | ||
| 220 | } | ||
| 221 | return NULL; | ||
| 222 | } | ||
| 223 | |||
| 170 | /* Recover from failure to write wbuf. Recover the nodes up to the | 224 | /* Recover from failure to write wbuf. Recover the nodes up to the |
| 171 | * wbuf, not the one which we were starting to try to write. */ | 225 | * wbuf, not the one which we were starting to try to write. */ |
| 172 | 226 | ||
| 173 | static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | 227 | static void jffs2_wbuf_recover(struct jffs2_sb_info *c) |
| 174 | { | 228 | { |
| 175 | struct jffs2_eraseblock *jeb, *new_jeb; | 229 | struct jffs2_eraseblock *jeb, *new_jeb; |
| 176 | struct jffs2_raw_node_ref **first_raw, **raw; | 230 | struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL; |
| 177 | size_t retlen; | 231 | size_t retlen; |
| 178 | int ret; | 232 | int ret; |
| 233 | int nr_refile = 0; | ||
| 179 | unsigned char *buf; | 234 | unsigned char *buf; |
| 180 | uint32_t start, end, ofs, len; | 235 | uint32_t start, end, ofs, len; |
| 181 | 236 | ||
| 182 | spin_lock(&c->erase_completion_lock); | ||
| 183 | |||
| 184 | jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; | 237 | jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; |
| 185 | 238 | ||
| 239 | spin_lock(&c->erase_completion_lock); | ||
| 186 | jffs2_block_refile(c, jeb, REFILE_NOTEMPTY); | 240 | jffs2_block_refile(c, jeb, REFILE_NOTEMPTY); |
| 241 | spin_unlock(&c->erase_completion_lock); | ||
| 242 | |||
| 243 | BUG_ON(!ref_obsolete(jeb->last_node)); | ||
| 187 | 244 | ||
| 188 | /* Find the first node to be recovered, by skipping over every | 245 | /* Find the first node to be recovered, by skipping over every |
| 189 | node which ends before the wbuf starts, or which is obsolete. */ | 246 | node which ends before the wbuf starts, or which is obsolete. */ |
| 190 | first_raw = &jeb->first_node; | 247 | for (next = raw = jeb->first_node; next; raw = next) { |
| 191 | while (*first_raw && | 248 | next = ref_next(raw); |
| 192 | (ref_obsolete(*first_raw) || | 249 | |
| 193 | (ref_offset(*first_raw)+ref_totlen(c, jeb, *first_raw)) < c->wbuf_ofs)) { | 250 | if (ref_obsolete(raw) || |
| 194 | D1(printk(KERN_DEBUG "Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n", | 251 | (next && ref_offset(next) <= c->wbuf_ofs)) { |
| 195 | ref_offset(*first_raw), ref_flags(*first_raw), | 252 | dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n", |
| 196 | (ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw)), | 253 | ref_offset(raw), ref_flags(raw), |
| 197 | c->wbuf_ofs)); | 254 | (ref_offset(raw) + ref_totlen(c, jeb, raw)), |
| 198 | first_raw = &(*first_raw)->next_phys; | 255 | c->wbuf_ofs); |
| 256 | continue; | ||
| 257 | } | ||
| 258 | dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n", | ||
| 259 | ref_offset(raw), ref_flags(raw), | ||
| 260 | (ref_offset(raw) + ref_totlen(c, jeb, raw))); | ||
| 261 | |||
| 262 | first_raw = raw; | ||
| 263 | break; | ||
| 199 | } | 264 | } |
| 200 | 265 | ||
| 201 | if (!*first_raw) { | 266 | if (!first_raw) { |
| 202 | /* All nodes were obsolete. Nothing to recover. */ | 267 | /* All nodes were obsolete. Nothing to recover. */ |
| 203 | D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n")); | 268 | D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n")); |
| 204 | spin_unlock(&c->erase_completion_lock); | 269 | c->wbuf_len = 0; |
| 205 | return; | 270 | return; |
| 206 | } | 271 | } |
| 207 | 272 | ||
| 208 | start = ref_offset(*first_raw); | 273 | start = ref_offset(first_raw); |
| 209 | end = ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw); | 274 | end = ref_offset(jeb->last_node); |
| 210 | 275 | nr_refile = 1; | |
| 211 | /* Find the last node to be recovered */ | ||
| 212 | raw = first_raw; | ||
| 213 | while ((*raw)) { | ||
| 214 | if (!ref_obsolete(*raw)) | ||
| 215 | end = ref_offset(*raw) + ref_totlen(c, jeb, *raw); | ||
| 216 | 276 | ||
| 217 | raw = &(*raw)->next_phys; | 277 | /* Count the number of refs which need to be copied */ |
| 218 | } | 278 | while ((raw = ref_next(raw)) != jeb->last_node) |
| 219 | spin_unlock(&c->erase_completion_lock); | 279 | nr_refile++; |
| 220 | 280 | ||
| 221 | D1(printk(KERN_DEBUG "wbuf recover %08x-%08x\n", start, end)); | 281 | dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n", |
| 282 | start, end, end - start, nr_refile); | ||
| 222 | 283 | ||
| 223 | buf = NULL; | 284 | buf = NULL; |
| 224 | if (start < c->wbuf_ofs) { | 285 | if (start < c->wbuf_ofs) { |
| @@ -233,28 +294,37 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
| 233 | } | 294 | } |
| 234 | 295 | ||
| 235 | /* Do the read... */ | 296 | /* Do the read... */ |
| 236 | if (jffs2_cleanmarker_oob(c)) | 297 | ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf); |
| 237 | ret = c->mtd->read_ecc(c->mtd, start, c->wbuf_ofs - start, &retlen, buf, NULL, c->oobinfo); | ||
| 238 | else | ||
| 239 | ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf); | ||
| 240 | 298 | ||
| 241 | if (ret == -EBADMSG && retlen == c->wbuf_ofs - start) { | 299 | /* ECC recovered ? */ |
| 242 | /* ECC recovered */ | 300 | if ((ret == -EUCLEAN || ret == -EBADMSG) && |
| 301 | (retlen == c->wbuf_ofs - start)) | ||
| 243 | ret = 0; | 302 | ret = 0; |
| 244 | } | 303 | |
| 245 | if (ret || retlen != c->wbuf_ofs - start) { | 304 | if (ret || retlen != c->wbuf_ofs - start) { |
| 246 | printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n"); | 305 | printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n"); |
| 247 | 306 | ||
| 248 | kfree(buf); | 307 | kfree(buf); |
| 249 | buf = NULL; | 308 | buf = NULL; |
| 250 | read_failed: | 309 | read_failed: |
| 251 | first_raw = &(*first_raw)->next_phys; | 310 | first_raw = ref_next(first_raw); |
| 311 | nr_refile--; | ||
| 312 | while (first_raw && ref_obsolete(first_raw)) { | ||
| 313 | first_raw = ref_next(first_raw); | ||
| 314 | nr_refile--; | ||
| 315 | } | ||
| 316 | |||
| 252 | /* If this was the only node to be recovered, give up */ | 317 | /* If this was the only node to be recovered, give up */ |
| 253 | if (!(*first_raw)) | 318 | if (!first_raw) { |
| 319 | c->wbuf_len = 0; | ||
| 254 | return; | 320 | return; |
| 321 | } | ||
| 255 | 322 | ||
| 256 | /* It wasn't. Go on and try to recover nodes complete in the wbuf */ | 323 | /* It wasn't. Go on and try to recover nodes complete in the wbuf */ |
| 257 | start = ref_offset(*first_raw); | 324 | start = ref_offset(first_raw); |
| 325 | dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n", | ||
| 326 | start, end, end - start, nr_refile); | ||
| 327 | |||
| 258 | } else { | 328 | } else { |
| 259 | /* Read succeeded. Copy the remaining data from the wbuf */ | 329 | /* Read succeeded. Copy the remaining data from the wbuf */ |
| 260 | memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs); | 330 | memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs); |
| @@ -263,14 +333,23 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
| 263 | /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards. | 333 | /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards. |
| 264 | Either 'buf' contains the data, or we find it in the wbuf */ | 334 | Either 'buf' contains the data, or we find it in the wbuf */ |
| 265 | 335 | ||
| 266 | |||
| 267 | /* ... and get an allocation of space from a shiny new block instead */ | 336 | /* ... and get an allocation of space from a shiny new block instead */ |
| 268 | ret = jffs2_reserve_space_gc(c, end-start, &ofs, &len, JFFS2_SUMMARY_NOSUM_SIZE); | 337 | ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE); |
| 269 | if (ret) { | 338 | if (ret) { |
| 270 | printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n"); | 339 | printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n"); |
| 271 | kfree(buf); | 340 | kfree(buf); |
| 272 | return; | 341 | return; |
| 273 | } | 342 | } |
| 343 | |||
| 344 | ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile); | ||
| 345 | if (ret) { | ||
| 346 | printk(KERN_WARNING "Failed to allocate node refs for wbuf recovery. Data loss ensues.\n"); | ||
| 347 | kfree(buf); | ||
| 348 | return; | ||
| 349 | } | ||
| 350 | |||
| 351 | ofs = write_ofs(c); | ||
| 352 | |||
| 274 | if (end-start >= c->wbuf_pagesize) { | 353 | if (end-start >= c->wbuf_pagesize) { |
| 275 | /* Need to do another write immediately, but it's possible | 354 | /* Need to do another write immediately, but it's possible |
| 276 | that this is just because the wbuf itself is completely | 355 | that this is just because the wbuf itself is completely |
| @@ -288,36 +367,22 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
| 288 | if (breakme++ == 20) { | 367 | if (breakme++ == 20) { |
| 289 | printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs); | 368 | printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs); |
| 290 | breakme = 0; | 369 | breakme = 0; |
| 291 | c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen, | 370 | c->mtd->write(c->mtd, ofs, towrite, &retlen, |
| 292 | brokenbuf, NULL, c->oobinfo); | 371 | brokenbuf); |
| 293 | ret = -EIO; | 372 | ret = -EIO; |
| 294 | } else | 373 | } else |
| 295 | #endif | 374 | #endif |
| 296 | if (jffs2_cleanmarker_oob(c)) | 375 | ret = c->mtd->write(c->mtd, ofs, towrite, &retlen, |
| 297 | ret = c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen, | 376 | rewrite_buf); |
| 298 | rewrite_buf, NULL, c->oobinfo); | ||
| 299 | else | ||
| 300 | ret = c->mtd->write(c->mtd, ofs, towrite, &retlen, rewrite_buf); | ||
| 301 | 377 | ||
| 302 | if (ret || retlen != towrite) { | 378 | if (ret || retlen != towrite) { |
| 303 | /* Argh. We tried. Really we did. */ | 379 | /* Argh. We tried. Really we did. */ |
| 304 | printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n"); | 380 | printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n"); |
| 305 | kfree(buf); | 381 | kfree(buf); |
| 306 | 382 | ||
| 307 | if (retlen) { | 383 | if (retlen) |
| 308 | struct jffs2_raw_node_ref *raw2; | 384 | jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL); |
| 309 | |||
| 310 | raw2 = jffs2_alloc_raw_node_ref(); | ||
| 311 | if (!raw2) | ||
| 312 | return; | ||
| 313 | 385 | ||
| 314 | raw2->flash_offset = ofs | REF_OBSOLETE; | ||
| 315 | raw2->__totlen = ref_totlen(c, jeb, *first_raw); | ||
| 316 | raw2->next_phys = NULL; | ||
| 317 | raw2->next_in_ino = NULL; | ||
| 318 | |||
| 319 | jffs2_add_physical_node_ref(c, raw2); | ||
| 320 | } | ||
| 321 | return; | 386 | return; |
| 322 | } | 387 | } |
| 323 | printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs); | 388 | printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs); |
| @@ -326,12 +391,10 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
| 326 | c->wbuf_ofs = ofs + towrite; | 391 | c->wbuf_ofs = ofs + towrite; |
| 327 | memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len); | 392 | memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len); |
| 328 | /* Don't muck about with c->wbuf_inodes. False positives are harmless. */ | 393 | /* Don't muck about with c->wbuf_inodes. False positives are harmless. */ |
| 329 | kfree(buf); | ||
| 330 | } else { | 394 | } else { |
| 331 | /* OK, now we're left with the dregs in whichever buffer we're using */ | 395 | /* OK, now we're left with the dregs in whichever buffer we're using */ |
| 332 | if (buf) { | 396 | if (buf) { |
| 333 | memcpy(c->wbuf, buf, end-start); | 397 | memcpy(c->wbuf, buf, end-start); |
| 334 | kfree(buf); | ||
| 335 | } else { | 398 | } else { |
| 336 | memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start); | 399 | memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start); |
| 337 | } | 400 | } |
| @@ -343,62 +406,111 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
| 343 | new_jeb = &c->blocks[ofs / c->sector_size]; | 406 | new_jeb = &c->blocks[ofs / c->sector_size]; |
| 344 | 407 | ||
| 345 | spin_lock(&c->erase_completion_lock); | 408 | spin_lock(&c->erase_completion_lock); |
| 346 | if (new_jeb->first_node) { | 409 | for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) { |
| 347 | /* Odd, but possible with ST flash later maybe */ | 410 | uint32_t rawlen = ref_totlen(c, jeb, raw); |
| 348 | new_jeb->last_node->next_phys = *first_raw; | 411 | struct jffs2_inode_cache *ic; |
| 349 | } else { | 412 | struct jffs2_raw_node_ref *new_ref; |
| 350 | new_jeb->first_node = *first_raw; | 413 | struct jffs2_raw_node_ref **adjust_ref = NULL; |
| 351 | } | 414 | struct jffs2_inode_info *f = NULL; |
| 352 | |||
| 353 | raw = first_raw; | ||
| 354 | while (*raw) { | ||
| 355 | uint32_t rawlen = ref_totlen(c, jeb, *raw); | ||
| 356 | 415 | ||
| 357 | D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n", | 416 | D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n", |
| 358 | rawlen, ref_offset(*raw), ref_flags(*raw), ofs)); | 417 | rawlen, ref_offset(raw), ref_flags(raw), ofs)); |
| 418 | |||
| 419 | ic = jffs2_raw_ref_to_ic(raw); | ||
| 420 | |||
| 421 | /* Ick. This XATTR mess should be fixed shortly... */ | ||
| 422 | if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) { | ||
| 423 | struct jffs2_xattr_datum *xd = (void *)ic; | ||
| 424 | BUG_ON(xd->node != raw); | ||
| 425 | adjust_ref = &xd->node; | ||
| 426 | raw->next_in_ino = NULL; | ||
| 427 | ic = NULL; | ||
| 428 | } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) { | ||
| 429 | struct jffs2_xattr_datum *xr = (void *)ic; | ||
| 430 | BUG_ON(xr->node != raw); | ||
| 431 | adjust_ref = &xr->node; | ||
| 432 | raw->next_in_ino = NULL; | ||
| 433 | ic = NULL; | ||
| 434 | } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) { | ||
| 435 | struct jffs2_raw_node_ref **p = &ic->nodes; | ||
| 436 | |||
| 437 | /* Remove the old node from the per-inode list */ | ||
| 438 | while (*p && *p != (void *)ic) { | ||
| 439 | if (*p == raw) { | ||
| 440 | (*p) = (raw->next_in_ino); | ||
| 441 | raw->next_in_ino = NULL; | ||
| 442 | break; | ||
| 443 | } | ||
| 444 | p = &((*p)->next_in_ino); | ||
| 445 | } | ||
| 359 | 446 | ||
| 360 | if (ref_obsolete(*raw)) { | 447 | if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) { |
| 361 | /* Shouldn't really happen much */ | 448 | /* If it's an in-core inode, then we have to adjust any |
| 362 | new_jeb->dirty_size += rawlen; | 449 | full_dirent or full_dnode structure to point to the |
| 363 | new_jeb->free_size -= rawlen; | 450 | new version instead of the old */ |
| 364 | c->dirty_size += rawlen; | 451 | f = jffs2_gc_fetch_inode(c, ic->ino, ic->nlink); |
| 365 | } else { | 452 | if (IS_ERR(f)) { |
| 366 | new_jeb->used_size += rawlen; | 453 | /* Should never happen; it _must_ be present */ |
| 367 | new_jeb->free_size -= rawlen; | 454 | JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n", |
| 455 | ic->ino, PTR_ERR(f)); | ||
| 456 | BUG(); | ||
| 457 | } | ||
| 458 | /* We don't lock f->sem. There's a number of ways we could | ||
| 459 | end up in here with it already being locked, and nobody's | ||
| 460 | going to modify it on us anyway because we hold the | ||
| 461 | alloc_sem. We're only changing one ->raw pointer too, | ||
| 462 | which we can get away with without upsetting readers. */ | ||
| 463 | adjust_ref = jffs2_incore_replace_raw(c, f, raw, | ||
| 464 | (void *)(buf?:c->wbuf) + (ref_offset(raw) - start)); | ||
| 465 | } else if (unlikely(ic->state != INO_STATE_PRESENT && | ||
| 466 | ic->state != INO_STATE_CHECKEDABSENT && | ||
| 467 | ic->state != INO_STATE_GC)) { | ||
| 468 | JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state); | ||
| 469 | BUG(); | ||
| 470 | } | ||
| 471 | } | ||
| 472 | |||
| 473 | new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic); | ||
| 474 | |||
| 475 | if (adjust_ref) { | ||
| 476 | BUG_ON(*adjust_ref != raw); | ||
| 477 | *adjust_ref = new_ref; | ||
| 478 | } | ||
| 479 | if (f) | ||
| 480 | jffs2_gc_release_inode(c, f); | ||
| 481 | |||
| 482 | if (!ref_obsolete(raw)) { | ||
| 368 | jeb->dirty_size += rawlen; | 483 | jeb->dirty_size += rawlen; |
| 369 | jeb->used_size -= rawlen; | 484 | jeb->used_size -= rawlen; |
| 370 | c->dirty_size += rawlen; | 485 | c->dirty_size += rawlen; |
| 486 | c->used_size -= rawlen; | ||
| 487 | raw->flash_offset = ref_offset(raw) | REF_OBSOLETE; | ||
| 488 | BUG_ON(raw->next_in_ino); | ||
| 371 | } | 489 | } |
| 372 | c->free_size -= rawlen; | ||
| 373 | (*raw)->flash_offset = ofs | ref_flags(*raw); | ||
| 374 | ofs += rawlen; | 490 | ofs += rawlen; |
| 375 | new_jeb->last_node = *raw; | ||
| 376 | |||
| 377 | raw = &(*raw)->next_phys; | ||
| 378 | } | 491 | } |
| 379 | 492 | ||
| 493 | kfree(buf); | ||
| 494 | |||
| 380 | /* Fix up the original jeb now it's on the bad_list */ | 495 | /* Fix up the original jeb now it's on the bad_list */ |
| 381 | *first_raw = NULL; | 496 | if (first_raw == jeb->first_node) { |
| 382 | if (first_raw == &jeb->first_node) { | ||
| 383 | jeb->last_node = NULL; | ||
| 384 | D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset)); | 497 | D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset)); |
| 385 | list_del(&jeb->list); | 498 | list_del(&jeb->list); |
| 386 | list_add(&jeb->list, &c->erase_pending_list); | 499 | list_add(&jeb->list, &c->erase_pending_list); |
| 387 | c->nr_erasing_blocks++; | 500 | c->nr_erasing_blocks++; |
| 388 | jffs2_erase_pending_trigger(c); | 501 | jffs2_erase_pending_trigger(c); |
| 389 | } | 502 | } |
| 390 | else | ||
| 391 | jeb->last_node = container_of(first_raw, struct jffs2_raw_node_ref, next_phys); | ||
| 392 | 503 | ||
| 393 | jffs2_dbg_acct_sanity_check_nolock(c, jeb); | 504 | jffs2_dbg_acct_sanity_check_nolock(c, jeb); |
| 394 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); | 505 | jffs2_dbg_acct_paranoia_check_nolock(c, jeb); |
| 395 | 506 | ||
| 396 | jffs2_dbg_acct_sanity_check_nolock(c, new_jeb); | 507 | jffs2_dbg_acct_sanity_check_nolock(c, new_jeb); |
| 397 | jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb); | 508 | jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb); |
| 398 | 509 | ||
| 399 | spin_unlock(&c->erase_completion_lock); | 510 | spin_unlock(&c->erase_completion_lock); |
| 400 | 511 | ||
| 401 | D1(printk(KERN_DEBUG "wbuf recovery completed OK\n")); | 512 | D1(printk(KERN_DEBUG "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", c->wbuf_ofs, c->wbuf_len)); |
| 513 | |||
| 402 | } | 514 | } |
| 403 | 515 | ||
| 404 | /* Meaning of pad argument: | 516 | /* Meaning of pad argument: |
| @@ -412,6 +524,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
| 412 | 524 | ||
| 413 | static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | 525 | static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) |
| 414 | { | 526 | { |
| 527 | struct jffs2_eraseblock *wbuf_jeb; | ||
| 415 | int ret; | 528 | int ret; |
| 416 | size_t retlen; | 529 | size_t retlen; |
| 417 | 530 | ||
| @@ -429,6 +542,10 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
| 429 | if (!c->wbuf_len) /* already checked c->wbuf above */ | 542 | if (!c->wbuf_len) /* already checked c->wbuf above */ |
| 430 | return 0; | 543 | return 0; |
| 431 | 544 | ||
| 545 | wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; | ||
| 546 | if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1)) | ||
| 547 | return -ENOMEM; | ||
| 548 | |||
| 432 | /* claim remaining space on the page | 549 | /* claim remaining space on the page |
| 433 | this happens, if we have a change to a new block, | 550 | this happens, if we have a change to a new block, |
| 434 | or if fsync forces us to flush the writebuffer. | 551 | or if fsync forces us to flush the writebuffer. |
| @@ -458,15 +575,12 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
| 458 | if (breakme++ == 20) { | 575 | if (breakme++ == 20) { |
| 459 | printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs); | 576 | printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs); |
| 460 | breakme = 0; | 577 | breakme = 0; |
| 461 | c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, | 578 | c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, |
| 462 | &retlen, brokenbuf, NULL, c->oobinfo); | 579 | brokenbuf); |
| 463 | ret = -EIO; | 580 | ret = -EIO; |
| 464 | } else | 581 | } else |
| 465 | #endif | 582 | #endif |
| 466 | 583 | ||
| 467 | if (jffs2_cleanmarker_oob(c)) | ||
| 468 | ret = c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf, NULL, c->oobinfo); | ||
| 469 | else | ||
| 470 | ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf); | 584 | ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf); |
| 471 | 585 | ||
| 472 | if (ret || retlen != c->wbuf_pagesize) { | 586 | if (ret || retlen != c->wbuf_pagesize) { |
| @@ -483,32 +597,34 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
| 483 | return ret; | 597 | return ret; |
| 484 | } | 598 | } |
| 485 | 599 | ||
| 486 | spin_lock(&c->erase_completion_lock); | ||
| 487 | |||
| 488 | /* Adjust free size of the block if we padded. */ | 600 | /* Adjust free size of the block if we padded. */ |
| 489 | if (pad) { | 601 | if (pad) { |
| 490 | struct jffs2_eraseblock *jeb; | 602 | uint32_t waste = c->wbuf_pagesize - c->wbuf_len; |
| 491 | |||
| 492 | jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; | ||
| 493 | 603 | ||
| 494 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n", | 604 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n", |
| 495 | (jeb==c->nextblock)?"next":"", jeb->offset)); | 605 | (wbuf_jeb==c->nextblock)?"next":"", wbuf_jeb->offset)); |
| 496 | 606 | ||
| 497 | /* wbuf_pagesize - wbuf_len is the amount of space that's to be | 607 | /* wbuf_pagesize - wbuf_len is the amount of space that's to be |
| 498 | padded. If there is less free space in the block than that, | 608 | padded. If there is less free space in the block than that, |
| 499 | something screwed up */ | 609 | something screwed up */ |
| 500 | if (jeb->free_size < (c->wbuf_pagesize - c->wbuf_len)) { | 610 | if (wbuf_jeb->free_size < waste) { |
| 501 | printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n", | 611 | printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n", |
| 502 | c->wbuf_ofs, c->wbuf_len, c->wbuf_pagesize-c->wbuf_len); | 612 | c->wbuf_ofs, c->wbuf_len, waste); |
| 503 | printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n", | 613 | printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n", |
| 504 | jeb->offset, jeb->free_size); | 614 | wbuf_jeb->offset, wbuf_jeb->free_size); |
| 505 | BUG(); | 615 | BUG(); |
| 506 | } | 616 | } |
| 507 | jeb->free_size -= (c->wbuf_pagesize - c->wbuf_len); | 617 | |
| 508 | c->free_size -= (c->wbuf_pagesize - c->wbuf_len); | 618 | spin_lock(&c->erase_completion_lock); |
| 509 | jeb->wasted_size += (c->wbuf_pagesize - c->wbuf_len); | 619 | |
| 510 | c->wasted_size += (c->wbuf_pagesize - c->wbuf_len); | 620 | jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL); |
| 511 | } | 621 | /* FIXME: that made it count as dirty. Convert to wasted */ |
| 622 | wbuf_jeb->dirty_size -= waste; | ||
| 623 | c->dirty_size -= waste; | ||
| 624 | wbuf_jeb->wasted_size += waste; | ||
| 625 | c->wasted_size += waste; | ||
| 626 | } else | ||
| 627 | spin_lock(&c->erase_completion_lock); | ||
| 512 | 628 | ||
| 513 | /* Stick any now-obsoleted blocks on the erase_pending_list */ | 629 | /* Stick any now-obsoleted blocks on the erase_pending_list */ |
| 514 | jffs2_refile_wbuf_blocks(c); | 630 | jffs2_refile_wbuf_blocks(c); |
| @@ -603,20 +719,30 @@ int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c) | |||
| 603 | 719 | ||
| 604 | return ret; | 720 | return ret; |
| 605 | } | 721 | } |
| 606 | int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino) | 722 | |
| 723 | static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf, | ||
| 724 | size_t len) | ||
| 607 | { | 725 | { |
| 608 | struct kvec outvecs[3]; | 726 | if (len && !c->wbuf_len && (len >= c->wbuf_pagesize)) |
| 609 | uint32_t totlen = 0; | 727 | return 0; |
| 610 | uint32_t split_ofs = 0; | 728 | |
| 611 | uint32_t old_totlen; | 729 | if (len > (c->wbuf_pagesize - c->wbuf_len)) |
| 612 | int ret, splitvec = -1; | 730 | len = c->wbuf_pagesize - c->wbuf_len; |
| 613 | int invec, outvec; | 731 | memcpy(c->wbuf + c->wbuf_len, buf, len); |
| 614 | size_t wbuf_retlen; | 732 | c->wbuf_len += (uint32_t) len; |
| 615 | unsigned char *wbuf_ptr; | 733 | return len; |
| 616 | size_t donelen = 0; | 734 | } |
| 735 | |||
| 736 | int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, | ||
| 737 | unsigned long count, loff_t to, size_t *retlen, | ||
| 738 | uint32_t ino) | ||
| 739 | { | ||
| 740 | struct jffs2_eraseblock *jeb; | ||
| 741 | size_t wbuf_retlen, donelen = 0; | ||
| 617 | uint32_t outvec_to = to; | 742 | uint32_t outvec_to = to; |
| 743 | int ret, invec; | ||
| 618 | 744 | ||
| 619 | /* If not NAND flash, don't bother */ | 745 | /* If not writebuffered flash, don't bother */ |
| 620 | if (!jffs2_is_writebuffered(c)) | 746 | if (!jffs2_is_writebuffered(c)) |
| 621 | return jffs2_flash_direct_writev(c, invecs, count, to, retlen); | 747 | return jffs2_flash_direct_writev(c, invecs, count, to, retlen); |
| 622 | 748 | ||
| @@ -629,34 +755,22 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
| 629 | memset(c->wbuf,0xff,c->wbuf_pagesize); | 755 | memset(c->wbuf,0xff,c->wbuf_pagesize); |
| 630 | } | 756 | } |
| 631 | 757 | ||
| 632 | /* Fixup the wbuf if we are moving to a new eraseblock. The checks below | 758 | /* |
| 633 | fail for ECC'd NOR because cleanmarker == 16, so a block starts at | 759 | * Sanity checks on target address. It's permitted to write |
| 634 | xxx0010. */ | 760 | * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to |
| 635 | if (jffs2_nor_ecc(c)) { | 761 | * write at the beginning of a new erase block. Anything else, |
| 636 | if (((c->wbuf_ofs % c->sector_size) == 0) && !c->wbuf_len) { | 762 | * and you die. New block starts at xxx000c (0-b = block |
| 637 | c->wbuf_ofs = PAGE_DIV(to); | 763 | * header) |
| 638 | c->wbuf_len = PAGE_MOD(to); | 764 | */ |
| 639 | memset(c->wbuf,0xff,c->wbuf_pagesize); | ||
| 640 | } | ||
| 641 | } | ||
| 642 | |||
| 643 | /* Sanity checks on target address. | ||
| 644 | It's permitted to write at PAD(c->wbuf_len+c->wbuf_ofs), | ||
| 645 | and it's permitted to write at the beginning of a new | ||
| 646 | erase block. Anything else, and you die. | ||
| 647 | New block starts at xxx000c (0-b = block header) | ||
| 648 | */ | ||
| 649 | if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) { | 765 | if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) { |
| 650 | /* It's a write to a new block */ | 766 | /* It's a write to a new block */ |
| 651 | if (c->wbuf_len) { | 767 | if (c->wbuf_len) { |
| 652 | D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx causes flush of wbuf at 0x%08x\n", (unsigned long)to, c->wbuf_ofs)); | 768 | D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx " |
| 769 | "causes flush of wbuf at 0x%08x\n", | ||
| 770 | (unsigned long)to, c->wbuf_ofs)); | ||
| 653 | ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); | 771 | ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); |
| 654 | if (ret) { | 772 | if (ret) |
| 655 | /* the underlying layer has to check wbuf_len to do the cleanup */ | 773 | goto outerr; |
| 656 | D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret)); | ||
| 657 | *retlen = 0; | ||
| 658 | goto exit; | ||
| 659 | } | ||
| 660 | } | 774 | } |
| 661 | /* set pointer to new block */ | 775 | /* set pointer to new block */ |
| 662 | c->wbuf_ofs = PAGE_DIV(to); | 776 | c->wbuf_ofs = PAGE_DIV(to); |
| @@ -665,165 +779,70 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig | |||
| 665 | 779 | ||
| 666 | if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { | 780 | if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { |
| 667 | /* We're not writing immediately after the writebuffer. Bad. */ | 781 | /* We're not writing immediately after the writebuffer. Bad. */ |
| 668 | printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write to %08lx\n", (unsigned long)to); | 782 | printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write " |
| 783 | "to %08lx\n", (unsigned long)to); | ||
| 669 | if (c->wbuf_len) | 784 | if (c->wbuf_len) |
| 670 | printk(KERN_CRIT "wbuf was previously %08x-%08x\n", | 785 | printk(KERN_CRIT "wbuf was previously %08x-%08x\n", |
| 671 | c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len); | 786 | c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len); |
| 672 | BUG(); | 787 | BUG(); |
| 673 | } | 788 | } |
| 674 | 789 | ||
| 675 | /* Note outvecs[3] above. We know count is never greater than 2 */ | 790 | /* adjust alignment offset */ |
| 676 | if (count > 2) { | 791 | if (c->wbuf_len != PAGE_MOD(to)) { |
| 677 | printk(KERN_CRIT "jffs2_flash_writev(): count is %ld\n", count); | 792 | c->wbuf_len = PAGE_MOD(to); |
| 678 | BUG(); | 793 | /* take care of alignment to next page */ |
| 679 | } | 794 | if (!c->wbuf_len) { |
| 680 | 795 | c->wbuf_len = c->wbuf_pagesize; | |
| 681 | invec = 0; | 796 | ret = __jffs2_flush_wbuf(c, NOPAD); |
| 682 | outvec = 0; | 797 | if (ret) |
| 683 | 798 | goto outerr; | |
| 684 | /* Fill writebuffer first, if already in use */ | ||
| 685 | if (c->wbuf_len) { | ||
| 686 | uint32_t invec_ofs = 0; | ||
| 687 | |||
| 688 | /* adjust alignment offset */ | ||
| 689 | if (c->wbuf_len != PAGE_MOD(to)) { | ||
| 690 | c->wbuf_len = PAGE_MOD(to); | ||
| 691 | /* take care of alignment to next page */ | ||
| 692 | if (!c->wbuf_len) | ||
| 693 | c->wbuf_len = c->wbuf_pagesize; | ||
| 694 | } | ||
| 695 | |||
| 696 | while(c->wbuf_len < c->wbuf_pagesize) { | ||
| 697 | uint32_t thislen; | ||
| 698 | |||
| 699 | if (invec == count) | ||
| 700 | goto alldone; | ||
| 701 | |||
| 702 | thislen = c->wbuf_pagesize - c->wbuf_len; | ||
| 703 | |||
| 704 | if (thislen >= invecs[invec].iov_len) | ||
| 705 | thislen = invecs[invec].iov_len; | ||
| 706 | |||
| 707 | invec_ofs = thislen; | ||
| 708 | |||
| 709 | memcpy(c->wbuf + c->wbuf_len, invecs[invec].iov_base, thislen); | ||
| 710 | c->wbuf_len += thislen; | ||
| 711 | donelen += thislen; | ||
| 712 | /* Get next invec, if actual did not fill the buffer */ | ||
| 713 | if (c->wbuf_len < c->wbuf_pagesize) | ||
| 714 | invec++; | ||
| 715 | } | ||
| 716 | |||
| 717 | /* write buffer is full, flush buffer */ | ||
| 718 | ret = __jffs2_flush_wbuf(c, NOPAD); | ||
| 719 | if (ret) { | ||
| 720 | /* the underlying layer has to check wbuf_len to do the cleanup */ | ||
| 721 | D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret)); | ||
| 722 | /* Retlen zero to make sure our caller doesn't mark the space dirty. | ||
| 723 | We've already done everything that's necessary */ | ||
| 724 | *retlen = 0; | ||
| 725 | goto exit; | ||
| 726 | } | ||
| 727 | outvec_to += donelen; | ||
| 728 | c->wbuf_ofs = outvec_to; | ||
| 729 | |||
| 730 | /* All invecs done ? */ | ||
| 731 | if (invec == count) | ||
| 732 | goto alldone; | ||
| 733 | |||
| 734 | /* Set up the first outvec, containing the remainder of the | ||
| 735 | invec we partially used */ | ||
| 736 | if (invecs[invec].iov_len > invec_ofs) { | ||
| 737 | outvecs[0].iov_base = invecs[invec].iov_base+invec_ofs; | ||
| 738 | totlen = outvecs[0].iov_len = invecs[invec].iov_len-invec_ofs; | ||
| 739 | if (totlen > c->wbuf_pagesize) { | ||
| 740 | splitvec = outvec; | ||
| 741 | split_ofs = outvecs[0].iov_len - PAGE_MOD(totlen); | ||
| 742 | } | ||
| 743 | outvec++; | ||
| 744 | } | ||
| 745 | invec++; | ||
| 746 | } | ||
| 747 | |||
| 748 | /* OK, now we've flushed the wbuf and the start of the bits | ||
| 749 | we have been asked to write, now to write the rest.... */ | ||
| 750 | |||
| 751 | /* totlen holds the amount of data still to be written */ | ||
| 752 | old_totlen = totlen; | ||
| 753 | for ( ; invec < count; invec++,outvec++ ) { | ||
| 754 | outvecs[outvec].iov_base = invecs[invec].iov_base; | ||
| 755 | totlen += outvecs[outvec].iov_len = invecs[invec].iov_len; | ||
| 756 | if (PAGE_DIV(totlen) != PAGE_DIV(old_totlen)) { | ||
| 757 | splitvec = outvec; | ||
| 758 | split_ofs = outvecs[outvec].iov_len - PAGE_MOD(totlen); | ||
| 759 | old_totlen = totlen; | ||
| 760 | } | 799 | } |
| 761 | } | 800 | } |
| 762 | 801 | ||
| 763 | /* Now the outvecs array holds all the remaining data to write */ | 802 | for (invec = 0; invec < count; invec++) { |
| 764 | /* Up to splitvec,split_ofs is to be written immediately. The rest | 803 | int vlen = invecs[invec].iov_len; |
| 765 | goes into the (now-empty) wbuf */ | 804 | uint8_t *v = invecs[invec].iov_base; |
| 766 | |||
| 767 | if (splitvec != -1) { | ||
| 768 | uint32_t remainder; | ||
| 769 | |||
| 770 | remainder = outvecs[splitvec].iov_len - split_ofs; | ||
| 771 | outvecs[splitvec].iov_len = split_ofs; | ||
| 772 | |||
| 773 | /* We did cross a page boundary, so we write some now */ | ||
| 774 | if (jffs2_cleanmarker_oob(c)) | ||
| 775 | ret = c->mtd->writev_ecc(c->mtd, outvecs, splitvec+1, outvec_to, &wbuf_retlen, NULL, c->oobinfo); | ||
| 776 | else | ||
| 777 | ret = jffs2_flash_direct_writev(c, outvecs, splitvec+1, outvec_to, &wbuf_retlen); | ||
| 778 | |||
| 779 | if (ret < 0 || wbuf_retlen != PAGE_DIV(totlen)) { | ||
| 780 | /* At this point we have no problem, | ||
| 781 | c->wbuf is empty. However refile nextblock to avoid | ||
| 782 | writing again to same address. | ||
| 783 | */ | ||
| 784 | struct jffs2_eraseblock *jeb; | ||
| 785 | 805 | ||
| 786 | spin_lock(&c->erase_completion_lock); | 806 | wbuf_retlen = jffs2_fill_wbuf(c, v, vlen); |
| 787 | 807 | ||
| 788 | jeb = &c->blocks[outvec_to / c->sector_size]; | 808 | if (c->wbuf_len == c->wbuf_pagesize) { |
| 789 | jffs2_block_refile(c, jeb, REFILE_ANYWAY); | 809 | ret = __jffs2_flush_wbuf(c, NOPAD); |
| 790 | 810 | if (ret) | |
| 791 | *retlen = 0; | 811 | goto outerr; |
| 792 | spin_unlock(&c->erase_completion_lock); | ||
| 793 | goto exit; | ||
| 794 | } | 812 | } |
| 795 | 813 | vlen -= wbuf_retlen; | |
| 814 | outvec_to += wbuf_retlen; | ||
| 796 | donelen += wbuf_retlen; | 815 | donelen += wbuf_retlen; |
| 797 | c->wbuf_ofs = PAGE_DIV(outvec_to) + PAGE_DIV(totlen); | 816 | v += wbuf_retlen; |
| 798 | 817 | ||
| 799 | if (remainder) { | 818 | if (vlen >= c->wbuf_pagesize) { |
| 800 | outvecs[splitvec].iov_base += split_ofs; | 819 | ret = c->mtd->write(c->mtd, outvec_to, PAGE_DIV(vlen), |
| 801 | outvecs[splitvec].iov_len = remainder; | 820 | &wbuf_retlen, v); |
| 802 | } else { | 821 | if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen)) |
| 803 | splitvec++; | 822 | goto outfile; |
| 823 | |||
| 824 | vlen -= wbuf_retlen; | ||
| 825 | outvec_to += wbuf_retlen; | ||
| 826 | c->wbuf_ofs = outvec_to; | ||
| 827 | donelen += wbuf_retlen; | ||
| 828 | v += wbuf_retlen; | ||
| 804 | } | 829 | } |
| 805 | 830 | ||
| 806 | } else { | 831 | wbuf_retlen = jffs2_fill_wbuf(c, v, vlen); |
| 807 | splitvec = 0; | 832 | if (c->wbuf_len == c->wbuf_pagesize) { |
| 808 | } | 833 | ret = __jffs2_flush_wbuf(c, NOPAD); |
| 809 | 834 | if (ret) | |
| 810 | /* Now splitvec points to the start of the bits we have to copy | 835 | goto outerr; |
| 811 | into the wbuf */ | 836 | } |
| 812 | wbuf_ptr = c->wbuf; | ||
| 813 | 837 | ||
| 814 | for ( ; splitvec < outvec; splitvec++) { | 838 | outvec_to += wbuf_retlen; |
| 815 | /* Don't copy the wbuf into itself */ | 839 | donelen += wbuf_retlen; |
| 816 | if (outvecs[splitvec].iov_base == c->wbuf) | ||
| 817 | continue; | ||
| 818 | memcpy(wbuf_ptr, outvecs[splitvec].iov_base, outvecs[splitvec].iov_len); | ||
| 819 | wbuf_ptr += outvecs[splitvec].iov_len; | ||
| 820 | donelen += outvecs[splitvec].iov_len; | ||
| 821 | } | 840 | } |
| 822 | c->wbuf_len = wbuf_ptr - c->wbuf; | ||
| 823 | 841 | ||
| 824 | /* If there's a remainder in the wbuf and it's a non-GC write, | 842 | /* |
| 825 | remember that the wbuf affects this ino */ | 843 | * If there's a remainder in the wbuf and it's a non-GC write, |
| 826 | alldone: | 844 | * remember that the wbuf affects this ino |
| 845 | */ | ||
| 827 | *retlen = donelen; | 846 | *retlen = donelen; |
| 828 | 847 | ||
| 829 | if (jffs2_sum_active()) { | 848 | if (jffs2_sum_active()) { |
| @@ -836,8 +855,24 @@ alldone: | |||
| 836 | jffs2_wbuf_dirties_inode(c, ino); | 855 | jffs2_wbuf_dirties_inode(c, ino); |
| 837 | 856 | ||
| 838 | ret = 0; | 857 | ret = 0; |
| 858 | up_write(&c->wbuf_sem); | ||
| 859 | return ret; | ||
| 839 | 860 | ||
| 840 | exit: | 861 | outfile: |
| 862 | /* | ||
| 863 | * At this point we have no problem, c->wbuf is empty. However | ||
| 864 | * refile nextblock to avoid writing again to same address. | ||
| 865 | */ | ||
| 866 | |||
| 867 | spin_lock(&c->erase_completion_lock); | ||
| 868 | |||
| 869 | jeb = &c->blocks[outvec_to / c->sector_size]; | ||
| 870 | jffs2_block_refile(c, jeb, REFILE_ANYWAY); | ||
| 871 | |||
| 872 | spin_unlock(&c->erase_completion_lock); | ||
| 873 | |||
| 874 | outerr: | ||
| 875 | *retlen = 0; | ||
| 841 | up_write(&c->wbuf_sem); | 876 | up_write(&c->wbuf_sem); |
| 842 | return ret; | 877 | return ret; |
| 843 | } | 878 | } |
| @@ -846,7 +881,8 @@ exit: | |||
| 846 | * This is the entry for flash write. | 881 | * This is the entry for flash write. |
| 847 | * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev | 882 | * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev |
| 848 | */ | 883 | */ |
| 849 | int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf) | 884 | int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, |
| 885 | size_t *retlen, const u_char *buf) | ||
| 850 | { | 886 | { |
| 851 | struct kvec vecs[1]; | 887 | struct kvec vecs[1]; |
| 852 | 888 | ||
| @@ -871,25 +907,23 @@ int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *re | |||
| 871 | 907 | ||
| 872 | /* Read flash */ | 908 | /* Read flash */ |
| 873 | down_read(&c->wbuf_sem); | 909 | down_read(&c->wbuf_sem); |
| 874 | if (jffs2_cleanmarker_oob(c)) | 910 | ret = c->mtd->read(c->mtd, ofs, len, retlen, buf); |
| 875 | ret = c->mtd->read_ecc(c->mtd, ofs, len, retlen, buf, NULL, c->oobinfo); | 911 | |
| 876 | else | 912 | if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) { |
| 877 | ret = c->mtd->read(c->mtd, ofs, len, retlen, buf); | 913 | if (ret == -EBADMSG) |
| 878 | 914 | printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx)" | |
| 879 | if ( (ret == -EBADMSG) && (*retlen == len) ) { | 915 | " returned ECC error\n", len, ofs); |
| 880 | printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n", | ||
| 881 | len, ofs); | ||
| 882 | /* | 916 | /* |
| 883 | * We have the raw data without ECC correction in the buffer, maybe | 917 | * We have the raw data without ECC correction in the buffer, |
| 884 | * we are lucky and all data or parts are correct. We check the node. | 918 | * maybe we are lucky and all data or parts are correct. We |
| 885 | * If data are corrupted node check will sort it out. | 919 | * check the node. If data are corrupted node check will sort |
| 886 | * We keep this block, it will fail on write or erase and the we | 920 | * it out. We keep this block, it will fail on write or erase |
| 887 | * mark it bad. Or should we do that now? But we should give him a chance. | 921 | * and the we mark it bad. Or should we do that now? But we |
| 888 | * Maybe we had a system crash or power loss before the ecc write or | 922 | * should give him a chance. Maybe we had a system crash or |
| 889 | * a erase was completed. | 923 | * power loss before the ecc write or a erase was completed. |
| 890 | * So we return success. :) | 924 | * So we return success. :) |
| 891 | */ | 925 | */ |
| 892 | ret = 0; | 926 | ret = 0; |
| 893 | } | 927 | } |
| 894 | 928 | ||
| 895 | /* if no writebuffer available or write buffer empty, return */ | 929 | /* if no writebuffer available or write buffer empty, return */ |
| @@ -911,7 +945,7 @@ int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *re | |||
| 911 | orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */ | 945 | orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */ |
| 912 | if (orbf > len) /* is write beyond write buffer ? */ | 946 | if (orbf > len) /* is write beyond write buffer ? */ |
| 913 | goto exit; | 947 | goto exit; |
| 914 | lwbf = len - orbf; /* number of bytes to copy */ | 948 | lwbf = len - orbf; /* number of bytes to copy */ |
| 915 | if (lwbf > c->wbuf_len) | 949 | if (lwbf > c->wbuf_len) |
| 916 | lwbf = c->wbuf_len; | 950 | lwbf = c->wbuf_len; |
| 917 | } | 951 | } |
| @@ -923,158 +957,159 @@ exit: | |||
| 923 | return ret; | 957 | return ret; |
| 924 | } | 958 | } |
| 925 | 959 | ||
| 960 | #define NR_OOB_SCAN_PAGES 4 | ||
| 961 | |||
| 926 | /* | 962 | /* |
| 927 | * Check, if the out of band area is empty | 963 | * Check, if the out of band area is empty |
| 928 | */ | 964 | */ |
| 929 | int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int mode) | 965 | int jffs2_check_oob_empty(struct jffs2_sb_info *c, |
| 966 | struct jffs2_eraseblock *jeb, int mode) | ||
| 930 | { | 967 | { |
| 931 | unsigned char *buf; | 968 | int i, page, ret; |
| 932 | int ret = 0; | 969 | int oobsize = c->mtd->oobsize; |
| 933 | int i,len,page; | 970 | struct mtd_oob_ops ops; |
| 934 | size_t retlen; | 971 | |
| 935 | int oob_size; | 972 | ops.len = NR_OOB_SCAN_PAGES * oobsize; |
| 936 | 973 | ops.ooblen = oobsize; | |
| 937 | /* allocate a buffer for all oob data in this sector */ | 974 | ops.oobbuf = c->oobbuf; |
| 938 | oob_size = c->mtd->oobsize; | 975 | ops.ooboffs = 0; |
| 939 | len = 4 * oob_size; | 976 | ops.datbuf = NULL; |
| 940 | buf = kmalloc(len, GFP_KERNEL); | 977 | ops.mode = MTD_OOB_PLACE; |
| 941 | if (!buf) { | 978 | |
| 942 | printk(KERN_NOTICE "jffs2_check_oob_empty(): allocation of temporary data buffer for oob check failed\n"); | 979 | ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops); |
| 943 | return -ENOMEM; | ||
| 944 | } | ||
| 945 | /* | ||
| 946 | * if mode = 0, we scan for a total empty oob area, else we have | ||
| 947 | * to take care of the cleanmarker in the first page of the block | ||
| 948 | */ | ||
| 949 | ret = jffs2_flash_read_oob(c, jeb->offset, len , &retlen, buf); | ||
| 950 | if (ret) { | 980 | if (ret) { |
| 951 | D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB failed %d for block at %08x\n", ret, jeb->offset)); | 981 | D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB " |
| 952 | goto out; | 982 | "failed %d for block at %08x\n", ret, jeb->offset)); |
| 983 | return ret; | ||
| 953 | } | 984 | } |
| 954 | 985 | ||
| 955 | if (retlen < len) { | 986 | if (ops.retlen < ops.len) { |
| 956 | D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB return short read " | 987 | D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB " |
| 957 | "(%zd bytes not %d) for block at %08x\n", retlen, len, jeb->offset)); | 988 | "returned short read (%zd bytes not %d) for block " |
| 958 | ret = -EIO; | 989 | "at %08x\n", ops.retlen, ops.len, jeb->offset)); |
| 959 | goto out; | 990 | return -EIO; |
| 960 | } | 991 | } |
| 961 | 992 | ||
| 962 | /* Special check for first page */ | 993 | /* Special check for first page */ |
| 963 | for(i = 0; i < oob_size ; i++) { | 994 | for(i = 0; i < oobsize ; i++) { |
| 964 | /* Yeah, we know about the cleanmarker. */ | 995 | /* Yeah, we know about the cleanmarker. */ |
| 965 | if (mode && i >= c->fsdata_pos && | 996 | if (mode && i >= c->fsdata_pos && |
| 966 | i < c->fsdata_pos + c->fsdata_len) | 997 | i < c->fsdata_pos + c->fsdata_len) |
| 967 | continue; | 998 | continue; |
| 968 | 999 | ||
| 969 | if (buf[i] != 0xFF) { | 1000 | if (ops.oobbuf[i] != 0xFF) { |
| 970 | D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n", | 1001 | D2(printk(KERN_DEBUG "Found %02x at %x in OOB for " |
| 971 | buf[i], i, jeb->offset)); | 1002 | "%08x\n", ops.oobbuf[i], i, jeb->offset)); |
| 972 | ret = 1; | 1003 | return 1; |
| 973 | goto out; | ||
| 974 | } | 1004 | } |
| 975 | } | 1005 | } |
| 976 | 1006 | ||
| 977 | /* we know, we are aligned :) */ | 1007 | /* we know, we are aligned :) */ |
| 978 | for (page = oob_size; page < len; page += sizeof(long)) { | 1008 | for (page = oobsize; page < ops.len; page += sizeof(long)) { |
| 979 | unsigned long dat = *(unsigned long *)(&buf[page]); | 1009 | long dat = *(long *)(&ops.oobbuf[page]); |
| 980 | if(dat != -1) { | 1010 | if(dat != -1) |
| 981 | ret = 1; | 1011 | return 1; |
| 982 | goto out; | ||
| 983 | } | ||
| 984 | } | 1012 | } |
| 985 | 1013 | return 0; | |
| 986 | out: | ||
| 987 | kfree(buf); | ||
| 988 | |||
| 989 | return ret; | ||
| 990 | } | 1014 | } |
| 991 | 1015 | ||
| 992 | /* | 1016 | /* |
| 993 | * Scan for a valid cleanmarker and for bad blocks | 1017 | * Scan for a valid cleanmarker and for bad blocks |
| 994 | * For virtual blocks (concatenated physical blocks) check the cleanmarker | 1018 | */ |
| 995 | * only in the first page of the first physical block, but scan for bad blocks in all | 1019 | int jffs2_check_nand_cleanmarker (struct jffs2_sb_info *c, |
| 996 | * physical blocks | 1020 | struct jffs2_eraseblock *jeb) |
| 997 | */ | ||
| 998 | int jffs2_check_nand_cleanmarker (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | ||
| 999 | { | 1021 | { |
| 1000 | struct jffs2_unknown_node n; | 1022 | struct jffs2_unknown_node n; |
| 1001 | unsigned char buf[2 * NAND_MAX_OOBSIZE]; | 1023 | struct mtd_oob_ops ops; |
| 1002 | unsigned char *p; | 1024 | int oobsize = c->mtd->oobsize; |
| 1003 | int ret, i, cnt, retval = 0; | 1025 | unsigned char *p,*b; |
| 1004 | size_t retlen, offset; | 1026 | int i, ret; |
| 1005 | int oob_size; | 1027 | size_t offset = jeb->offset; |
| 1006 | 1028 | ||
| 1007 | offset = jeb->offset; | 1029 | /* Check first if the block is bad. */ |
| 1008 | oob_size = c->mtd->oobsize; | 1030 | if (c->mtd->block_isbad(c->mtd, offset)) { |
| 1009 | 1031 | D1 (printk(KERN_WARNING "jffs2_check_nand_cleanmarker()" | |
| 1010 | /* Loop through the physical blocks */ | 1032 | ": Bad block at %08x\n", jeb->offset)); |
| 1011 | for (cnt = 0; cnt < (c->sector_size / c->mtd->erasesize); cnt++) { | 1033 | return 2; |
| 1012 | /* Check first if the block is bad. */ | 1034 | } |
| 1013 | if (c->mtd->block_isbad (c->mtd, offset)) { | ||
| 1014 | D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Bad block at %08x\n", jeb->offset)); | ||
| 1015 | return 2; | ||
| 1016 | } | ||
| 1017 | /* | ||
| 1018 | * We read oob data from page 0 and 1 of the block. | ||
| 1019 | * page 0 contains cleanmarker and badblock info | ||
| 1020 | * page 1 contains failure count of this block | ||
| 1021 | */ | ||
| 1022 | ret = c->mtd->read_oob (c->mtd, offset, oob_size << 1, &retlen, buf); | ||
| 1023 | 1035 | ||
| 1024 | if (ret) { | 1036 | ops.len = oobsize; |
| 1025 | D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB failed %d for block at %08x\n", ret, jeb->offset)); | 1037 | ops.ooblen = oobsize; |
| 1026 | return ret; | 1038 | ops.oobbuf = c->oobbuf; |
| 1027 | } | 1039 | ops.ooboffs = 0; |
| 1028 | if (retlen < (oob_size << 1)) { | 1040 | ops.datbuf = NULL; |
| 1029 | D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB return short read (%zd bytes not %d) for block at %08x\n", retlen, oob_size << 1, jeb->offset)); | 1041 | ops.mode = MTD_OOB_PLACE; |
| 1030 | return -EIO; | ||
| 1031 | } | ||
| 1032 | 1042 | ||
| 1033 | /* Check cleanmarker only on the first physical block */ | 1043 | ret = c->mtd->read_oob(c->mtd, offset, &ops); |
| 1034 | if (!cnt) { | 1044 | if (ret) { |
| 1035 | n.magic = cpu_to_je16 (JFFS2_MAGIC_BITMASK); | 1045 | D1 (printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): " |
| 1036 | n.nodetype = cpu_to_je16 (JFFS2_NODETYPE_CLEANMARKER); | 1046 | "Read OOB failed %d for block at %08x\n", |
| 1037 | n.totlen = cpu_to_je32 (8); | 1047 | ret, jeb->offset)); |
| 1038 | p = (unsigned char *) &n; | 1048 | return ret; |
| 1049 | } | ||
| 1039 | 1050 | ||
| 1040 | for (i = 0; i < c->fsdata_len; i++) { | 1051 | if (ops.retlen < ops.len) { |
| 1041 | if (buf[c->fsdata_pos + i] != p[i]) { | 1052 | D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): " |
| 1042 | retval = 1; | 1053 | "Read OOB return short read (%zd bytes not %d) " |
| 1043 | } | 1054 | "for block at %08x\n", ops.retlen, ops.len, |
| 1044 | } | 1055 | jeb->offset)); |
| 1045 | D1(if (retval == 1) { | 1056 | return -EIO; |
| 1046 | printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): Cleanmarker node not detected in block at %08x\n", jeb->offset); | ||
| 1047 | printk(KERN_WARNING "OOB at %08x was ", offset); | ||
| 1048 | for (i=0; i < oob_size; i++) { | ||
| 1049 | printk("%02x ", buf[i]); | ||
| 1050 | } | ||
| 1051 | printk("\n"); | ||
| 1052 | }) | ||
| 1053 | } | ||
| 1054 | offset += c->mtd->erasesize; | ||
| 1055 | } | 1057 | } |
| 1056 | return retval; | 1058 | |
| 1059 | n.magic = cpu_to_je16 (JFFS2_MAGIC_BITMASK); | ||
| 1060 | n.nodetype = cpu_to_je16 (JFFS2_NODETYPE_CLEANMARKER); | ||
| 1061 | n.totlen = cpu_to_je32 (8); | ||
| 1062 | p = (unsigned char *) &n; | ||
| 1063 | b = c->oobbuf + c->fsdata_pos; | ||
| 1064 | |||
| 1065 | for (i = c->fsdata_len; i; i--) { | ||
| 1066 | if (*b++ != *p++) | ||
| 1067 | ret = 1; | ||
| 1068 | } | ||
| 1069 | |||
| 1070 | D1(if (ret == 1) { | ||
| 1071 | printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): " | ||
| 1072 | "Cleanmarker node not detected in block at %08x\n", | ||
| 1073 | offset); | ||
| 1074 | printk(KERN_WARNING "OOB at %08zx was ", offset); | ||
| 1075 | for (i=0; i < oobsize; i++) | ||
| 1076 | printk("%02x ", c->oobbuf[i]); | ||
| 1077 | printk("\n"); | ||
| 1078 | }); | ||
| 1079 | return ret; | ||
| 1057 | } | 1080 | } |
| 1058 | 1081 | ||
| 1059 | int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | 1082 | int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, |
| 1083 | struct jffs2_eraseblock *jeb) | ||
| 1060 | { | 1084 | { |
| 1061 | struct jffs2_unknown_node n; | 1085 | struct jffs2_unknown_node n; |
| 1062 | int ret; | 1086 | int ret; |
| 1063 | size_t retlen; | 1087 | struct mtd_oob_ops ops; |
| 1064 | 1088 | ||
| 1065 | n.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | 1089 | n.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); |
| 1066 | n.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER); | 1090 | n.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER); |
| 1067 | n.totlen = cpu_to_je32(8); | 1091 | n.totlen = cpu_to_je32(8); |
| 1068 | 1092 | ||
| 1069 | ret = jffs2_flash_write_oob(c, jeb->offset + c->fsdata_pos, c->fsdata_len, &retlen, (unsigned char *)&n); | 1093 | ops.len = c->fsdata_len; |
| 1094 | ops.ooblen = c->fsdata_len;; | ||
| 1095 | ops.oobbuf = (uint8_t *)&n; | ||
| 1096 | ops.ooboffs = c->fsdata_pos; | ||
| 1097 | ops.datbuf = NULL; | ||
| 1098 | ops.mode = MTD_OOB_PLACE; | ||
| 1099 | |||
| 1100 | ret = c->mtd->write_oob(c->mtd, jeb->offset, &ops); | ||
| 1070 | 1101 | ||
| 1071 | if (ret) { | 1102 | if (ret) { |
| 1072 | D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Write failed for block at %08x: error %d\n", jeb->offset, ret)); | 1103 | D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): " |
| 1104 | "Write failed for block at %08x: error %d\n", | ||
| 1105 | jeb->offset, ret)); | ||
| 1073 | return ret; | 1106 | return ret; |
| 1074 | } | 1107 | } |
| 1075 | if (retlen != c->fsdata_len) { | 1108 | if (ops.retlen != ops.len) { |
| 1076 | D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Short write for block at %08x: %zd not %d\n", jeb->offset, retlen, c->fsdata_len)); | 1109 | D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): " |
| 1077 | return ret; | 1110 | "Short write for block at %08x: %zd not %d\n", |
| 1111 | jeb->offset, ops.retlen, ops.len)); | ||
| 1112 | return -EIO; | ||
| 1078 | } | 1113 | } |
| 1079 | return 0; | 1114 | return 0; |
| 1080 | } | 1115 | } |
| @@ -1108,18 +1143,9 @@ int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock * | |||
| 1108 | return 1; | 1143 | return 1; |
| 1109 | } | 1144 | } |
| 1110 | 1145 | ||
| 1111 | #define NAND_JFFS2_OOB16_FSDALEN 8 | ||
| 1112 | |||
| 1113 | static struct nand_oobinfo jffs2_oobinfo_docecc = { | ||
| 1114 | .useecc = MTD_NANDECC_PLACE, | ||
| 1115 | .eccbytes = 6, | ||
| 1116 | .eccpos = {0,1,2,3,4,5} | ||
| 1117 | }; | ||
| 1118 | |||
| 1119 | |||
| 1120 | static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c) | 1146 | static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c) |
| 1121 | { | 1147 | { |
| 1122 | struct nand_oobinfo *oinfo = &c->mtd->oobinfo; | 1148 | struct nand_ecclayout *oinfo = c->mtd->ecclayout; |
| 1123 | 1149 | ||
| 1124 | /* Do this only, if we have an oob buffer */ | 1150 | /* Do this only, if we have an oob buffer */ |
| 1125 | if (!c->mtd->oobsize) | 1151 | if (!c->mtd->oobsize) |
| @@ -1129,33 +1155,23 @@ static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c) | |||
| 1129 | c->cleanmarker_size = 0; | 1155 | c->cleanmarker_size = 0; |
| 1130 | 1156 | ||
| 1131 | /* Should we use autoplacement ? */ | 1157 | /* Should we use autoplacement ? */ |
| 1132 | if (oinfo && oinfo->useecc == MTD_NANDECC_AUTOPLACE) { | 1158 | if (!oinfo) { |
| 1133 | D1(printk(KERN_DEBUG "JFFS2 using autoplace on NAND\n")); | 1159 | D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n")); |
| 1134 | /* Get the position of the free bytes */ | 1160 | return -EINVAL; |
| 1135 | if (!oinfo->oobfree[0][1]) { | 1161 | } |
| 1136 | printk (KERN_WARNING "jffs2_nand_set_oobinfo(): Eeep. Autoplacement selected and no empty space in oob\n"); | ||
| 1137 | return -ENOSPC; | ||
| 1138 | } | ||
| 1139 | c->fsdata_pos = oinfo->oobfree[0][0]; | ||
| 1140 | c->fsdata_len = oinfo->oobfree[0][1]; | ||
| 1141 | if (c->fsdata_len > 8) | ||
| 1142 | c->fsdata_len = 8; | ||
| 1143 | } else { | ||
| 1144 | /* This is just a legacy fallback and should go away soon */ | ||
| 1145 | switch(c->mtd->ecctype) { | ||
| 1146 | case MTD_ECC_RS_DiskOnChip: | ||
| 1147 | printk(KERN_WARNING "JFFS2 using DiskOnChip hardware ECC without autoplacement. Fix it!\n"); | ||
| 1148 | c->oobinfo = &jffs2_oobinfo_docecc; | ||
| 1149 | c->fsdata_pos = 6; | ||
| 1150 | c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN; | ||
| 1151 | c->badblock_pos = 15; | ||
| 1152 | break; | ||
| 1153 | 1162 | ||
| 1154 | default: | 1163 | D1(printk(KERN_DEBUG "JFFS2 using autoplace on NAND\n")); |
| 1155 | D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n")); | 1164 | /* Get the position of the free bytes */ |
| 1156 | return -EINVAL; | 1165 | if (!oinfo->oobfree[0].length) { |
| 1157 | } | 1166 | printk (KERN_WARNING "jffs2_nand_set_oobinfo(): Eeep." |
| 1167 | " Autoplacement selected and no empty space in oob\n"); | ||
| 1168 | return -ENOSPC; | ||
| 1158 | } | 1169 | } |
| 1170 | c->fsdata_pos = oinfo->oobfree[0].offset; | ||
| 1171 | c->fsdata_len = oinfo->oobfree[0].length; | ||
| 1172 | if (c->fsdata_len > 8) | ||
| 1173 | c->fsdata_len = 8; | ||
| 1174 | |||
| 1159 | return 0; | 1175 | return 0; |
| 1160 | } | 1176 | } |
| 1161 | 1177 | ||
| @@ -1165,13 +1181,17 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c) | |||
| 1165 | 1181 | ||
| 1166 | /* Initialise write buffer */ | 1182 | /* Initialise write buffer */ |
| 1167 | init_rwsem(&c->wbuf_sem); | 1183 | init_rwsem(&c->wbuf_sem); |
| 1168 | c->wbuf_pagesize = c->mtd->oobblock; | 1184 | c->wbuf_pagesize = c->mtd->writesize; |
| 1169 | c->wbuf_ofs = 0xFFFFFFFF; | 1185 | c->wbuf_ofs = 0xFFFFFFFF; |
| 1170 | 1186 | ||
| 1171 | c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); | 1187 | c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); |
| 1172 | if (!c->wbuf) | 1188 | if (!c->wbuf) |
| 1173 | return -ENOMEM; | 1189 | return -ENOMEM; |
| 1174 | 1190 | ||
| 1191 | c->oobbuf = kmalloc(NR_OOB_SCAN_PAGES * c->mtd->oobsize, GFP_KERNEL); | ||
| 1192 | if (!c->oobbuf) | ||
| 1193 | return -ENOMEM; | ||
| 1194 | |||
| 1175 | res = jffs2_nand_set_oobinfo(c); | 1195 | res = jffs2_nand_set_oobinfo(c); |
| 1176 | 1196 | ||
| 1177 | #ifdef BREAKME | 1197 | #ifdef BREAKME |
| @@ -1189,6 +1209,7 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c) | |||
| 1189 | void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c) | 1209 | void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c) |
| 1190 | { | 1210 | { |
| 1191 | kfree(c->wbuf); | 1211 | kfree(c->wbuf); |
| 1212 | kfree(c->oobbuf); | ||
| 1192 | } | 1213 | } |
| 1193 | 1214 | ||
| 1194 | int jffs2_dataflash_setup(struct jffs2_sb_info *c) { | 1215 | int jffs2_dataflash_setup(struct jffs2_sb_info *c) { |
| @@ -1236,33 +1257,14 @@ void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) { | |||
| 1236 | kfree(c->wbuf); | 1257 | kfree(c->wbuf); |
| 1237 | } | 1258 | } |
| 1238 | 1259 | ||
| 1239 | int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c) { | ||
| 1240 | /* Cleanmarker is actually larger on the flashes */ | ||
| 1241 | c->cleanmarker_size = 16; | ||
| 1242 | |||
| 1243 | /* Initialize write buffer */ | ||
| 1244 | init_rwsem(&c->wbuf_sem); | ||
| 1245 | c->wbuf_pagesize = c->mtd->eccsize; | ||
| 1246 | c->wbuf_ofs = 0xFFFFFFFF; | ||
| 1247 | |||
| 1248 | c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); | ||
| 1249 | if (!c->wbuf) | ||
| 1250 | return -ENOMEM; | ||
| 1251 | |||
| 1252 | return 0; | ||
| 1253 | } | ||
| 1254 | |||
| 1255 | void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c) { | ||
| 1256 | kfree(c->wbuf); | ||
| 1257 | } | ||
| 1258 | |||
| 1259 | int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) { | 1260 | int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) { |
| 1260 | /* Cleanmarker currently occupies a whole programming region */ | 1261 | /* Cleanmarker currently occupies whole programming regions, |
| 1261 | c->cleanmarker_size = MTD_PROGREGION_SIZE(c->mtd); | 1262 | * either one or 2 for 8Byte STMicro flashes. */ |
| 1263 | c->cleanmarker_size = max(16u, c->mtd->writesize); | ||
| 1262 | 1264 | ||
| 1263 | /* Initialize write buffer */ | 1265 | /* Initialize write buffer */ |
| 1264 | init_rwsem(&c->wbuf_sem); | 1266 | init_rwsem(&c->wbuf_sem); |
| 1265 | c->wbuf_pagesize = MTD_PROGREGION_SIZE(c->mtd); | 1267 | c->wbuf_pagesize = c->mtd->writesize; |
| 1266 | c->wbuf_ofs = 0xFFFFFFFF; | 1268 | c->wbuf_ofs = 0xFFFFFFFF; |
| 1267 | 1269 | ||
| 1268 | c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); | 1270 | c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); |
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c index 1342f0158e9b..67176792e138 100644 --- a/fs/jffs2/write.c +++ b/fs/jffs2/write.c | |||
| @@ -37,7 +37,6 @@ int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint | |||
| 37 | f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache; | 37 | f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache; |
| 38 | f->inocache->state = INO_STATE_PRESENT; | 38 | f->inocache->state = INO_STATE_PRESENT; |
| 39 | 39 | ||
| 40 | |||
| 41 | jffs2_add_ino_cache(c, f->inocache); | 40 | jffs2_add_ino_cache(c, f->inocache); |
| 42 | D1(printk(KERN_DEBUG "jffs2_do_new_inode(): Assigned ino# %d\n", f->inocache->ino)); | 41 | D1(printk(KERN_DEBUG "jffs2_do_new_inode(): Assigned ino# %d\n", f->inocache->ino)); |
| 43 | ri->ino = cpu_to_je32(f->inocache->ino); | 42 | ri->ino = cpu_to_je32(f->inocache->ino); |
| @@ -57,12 +56,14 @@ int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint | |||
| 57 | /* jffs2_write_dnode - given a raw_inode, allocate a full_dnode for it, | 56 | /* jffs2_write_dnode - given a raw_inode, allocate a full_dnode for it, |
| 58 | write it to the flash, link it into the existing inode/fragment list */ | 57 | write it to the flash, link it into the existing inode/fragment list */ |
| 59 | 58 | ||
| 60 | struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode) | 59 | struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, |
| 60 | struct jffs2_raw_inode *ri, const unsigned char *data, | ||
| 61 | uint32_t datalen, int alloc_mode) | ||
| 61 | 62 | ||
| 62 | { | 63 | { |
| 63 | struct jffs2_raw_node_ref *raw; | ||
| 64 | struct jffs2_full_dnode *fn; | 64 | struct jffs2_full_dnode *fn; |
| 65 | size_t retlen; | 65 | size_t retlen; |
| 66 | uint32_t flash_ofs; | ||
| 66 | struct kvec vecs[2]; | 67 | struct kvec vecs[2]; |
| 67 | int ret; | 68 | int ret; |
| 68 | int retried = 0; | 69 | int retried = 0; |
| @@ -78,34 +79,21 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
| 78 | vecs[1].iov_base = (unsigned char *)data; | 79 | vecs[1].iov_base = (unsigned char *)data; |
| 79 | vecs[1].iov_len = datalen; | 80 | vecs[1].iov_len = datalen; |
| 80 | 81 | ||
| 81 | jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len); | ||
| 82 | |||
| 83 | if (je32_to_cpu(ri->totlen) != sizeof(*ri) + datalen) { | 82 | if (je32_to_cpu(ri->totlen) != sizeof(*ri) + datalen) { |
| 84 | printk(KERN_WARNING "jffs2_write_dnode: ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n", je32_to_cpu(ri->totlen), sizeof(*ri), datalen); | 83 | printk(KERN_WARNING "jffs2_write_dnode: ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n", je32_to_cpu(ri->totlen), sizeof(*ri), datalen); |
| 85 | } | 84 | } |
| 86 | raw = jffs2_alloc_raw_node_ref(); | ||
| 87 | if (!raw) | ||
| 88 | return ERR_PTR(-ENOMEM); | ||
| 89 | 85 | ||
| 90 | fn = jffs2_alloc_full_dnode(); | 86 | fn = jffs2_alloc_full_dnode(); |
| 91 | if (!fn) { | 87 | if (!fn) |
| 92 | jffs2_free_raw_node_ref(raw); | ||
| 93 | return ERR_PTR(-ENOMEM); | 88 | return ERR_PTR(-ENOMEM); |
| 94 | } | ||
| 95 | |||
| 96 | fn->ofs = je32_to_cpu(ri->offset); | ||
| 97 | fn->size = je32_to_cpu(ri->dsize); | ||
| 98 | fn->frags = 0; | ||
| 99 | 89 | ||
| 100 | /* check number of valid vecs */ | 90 | /* check number of valid vecs */ |
| 101 | if (!datalen || !data) | 91 | if (!datalen || !data) |
| 102 | cnt = 1; | 92 | cnt = 1; |
| 103 | retry: | 93 | retry: |
| 104 | fn->raw = raw; | 94 | flash_ofs = write_ofs(c); |
| 105 | 95 | ||
| 106 | raw->flash_offset = flash_ofs; | 96 | jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len); |
| 107 | raw->__totlen = PAD(sizeof(*ri)+datalen); | ||
| 108 | raw->next_phys = NULL; | ||
| 109 | 97 | ||
| 110 | if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(ri->version) < f->highest_version)) { | 98 | if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(ri->version) < f->highest_version)) { |
| 111 | BUG_ON(!retried); | 99 | BUG_ON(!retried); |
| @@ -125,22 +113,16 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
| 125 | 113 | ||
| 126 | /* Mark the space as dirtied */ | 114 | /* Mark the space as dirtied */ |
| 127 | if (retlen) { | 115 | if (retlen) { |
| 128 | /* Doesn't belong to any inode */ | ||
| 129 | raw->next_in_ino = NULL; | ||
| 130 | |||
| 131 | /* Don't change raw->size to match retlen. We may have | 116 | /* Don't change raw->size to match retlen. We may have |
| 132 | written the node header already, and only the data will | 117 | written the node header already, and only the data will |
| 133 | seem corrupted, in which case the scan would skip over | 118 | seem corrupted, in which case the scan would skip over |
| 134 | any node we write before the original intended end of | 119 | any node we write before the original intended end of |
| 135 | this node */ | 120 | this node */ |
| 136 | raw->flash_offset |= REF_OBSOLETE; | 121 | jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*ri)+datalen), NULL); |
| 137 | jffs2_add_physical_node_ref(c, raw); | ||
| 138 | jffs2_mark_node_obsolete(c, raw); | ||
| 139 | } else { | 122 | } else { |
| 140 | printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", raw->flash_offset); | 123 | printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", flash_ofs); |
| 141 | jffs2_free_raw_node_ref(raw); | ||
| 142 | } | 124 | } |
| 143 | if (!retried && alloc_mode != ALLOC_NORETRY && (raw = jffs2_alloc_raw_node_ref())) { | 125 | if (!retried && alloc_mode != ALLOC_NORETRY) { |
| 144 | /* Try to reallocate space and retry */ | 126 | /* Try to reallocate space and retry */ |
| 145 | uint32_t dummy; | 127 | uint32_t dummy; |
| 146 | struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size]; | 128 | struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size]; |
| @@ -153,19 +135,20 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
| 153 | jffs2_dbg_acct_paranoia_check(c, jeb); | 135 | jffs2_dbg_acct_paranoia_check(c, jeb); |
| 154 | 136 | ||
| 155 | if (alloc_mode == ALLOC_GC) { | 137 | if (alloc_mode == ALLOC_GC) { |
| 156 | ret = jffs2_reserve_space_gc(c, sizeof(*ri) + datalen, &flash_ofs, | 138 | ret = jffs2_reserve_space_gc(c, sizeof(*ri) + datalen, &dummy, |
| 157 | &dummy, JFFS2_SUMMARY_INODE_SIZE); | 139 | JFFS2_SUMMARY_INODE_SIZE); |
| 158 | } else { | 140 | } else { |
| 159 | /* Locking pain */ | 141 | /* Locking pain */ |
| 160 | up(&f->sem); | 142 | up(&f->sem); |
| 161 | jffs2_complete_reservation(c); | 143 | jffs2_complete_reservation(c); |
| 162 | 144 | ||
| 163 | ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &flash_ofs, | 145 | ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &dummy, |
| 164 | &dummy, alloc_mode, JFFS2_SUMMARY_INODE_SIZE); | 146 | alloc_mode, JFFS2_SUMMARY_INODE_SIZE); |
| 165 | down(&f->sem); | 147 | down(&f->sem); |
| 166 | } | 148 | } |
| 167 | 149 | ||
| 168 | if (!ret) { | 150 | if (!ret) { |
| 151 | flash_ofs = write_ofs(c); | ||
| 169 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs)); | 152 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs)); |
| 170 | 153 | ||
| 171 | jffs2_dbg_acct_sanity_check(c,jeb); | 154 | jffs2_dbg_acct_sanity_check(c,jeb); |
| @@ -174,7 +157,6 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
| 174 | goto retry; | 157 | goto retry; |
| 175 | } | 158 | } |
| 176 | D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); | 159 | D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); |
| 177 | jffs2_free_raw_node_ref(raw); | ||
| 178 | } | 160 | } |
| 179 | /* Release the full_dnode which is now useless, and return */ | 161 | /* Release the full_dnode which is now useless, and return */ |
| 180 | jffs2_free_full_dnode(fn); | 162 | jffs2_free_full_dnode(fn); |
| @@ -188,20 +170,17 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
| 188 | if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) || | 170 | if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) || |
| 189 | ( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) && | 171 | ( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) && |
| 190 | (je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) == je32_to_cpu(ri->isize)))) { | 172 | (je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) == je32_to_cpu(ri->isize)))) { |
| 191 | raw->flash_offset |= REF_PRISTINE; | 173 | flash_ofs |= REF_PRISTINE; |
| 192 | } else { | 174 | } else { |
| 193 | raw->flash_offset |= REF_NORMAL; | 175 | flash_ofs |= REF_NORMAL; |
| 194 | } | 176 | } |
| 195 | jffs2_add_physical_node_ref(c, raw); | 177 | fn->raw = jffs2_add_physical_node_ref(c, flash_ofs, PAD(sizeof(*ri)+datalen), f->inocache); |
| 196 | 178 | fn->ofs = je32_to_cpu(ri->offset); | |
| 197 | /* Link into per-inode list */ | 179 | fn->size = je32_to_cpu(ri->dsize); |
| 198 | spin_lock(&c->erase_completion_lock); | 180 | fn->frags = 0; |
| 199 | raw->next_in_ino = f->inocache->nodes; | ||
| 200 | f->inocache->nodes = raw; | ||
| 201 | spin_unlock(&c->erase_completion_lock); | ||
| 202 | 181 | ||
| 203 | D1(printk(KERN_DEBUG "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n", | 182 | D1(printk(KERN_DEBUG "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n", |
| 204 | flash_ofs, ref_flags(raw), je32_to_cpu(ri->dsize), | 183 | flash_ofs & ~3, flash_ofs & 3, je32_to_cpu(ri->dsize), |
| 205 | je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc), | 184 | je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc), |
| 206 | je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen))); | 185 | je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen))); |
| 207 | 186 | ||
| @@ -212,12 +191,14 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
| 212 | return fn; | 191 | return fn; |
| 213 | } | 192 | } |
| 214 | 193 | ||
| 215 | struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_dirent *rd, const unsigned char *name, uint32_t namelen, uint32_t flash_ofs, int alloc_mode) | 194 | struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, |
| 195 | struct jffs2_raw_dirent *rd, const unsigned char *name, | ||
| 196 | uint32_t namelen, int alloc_mode) | ||
| 216 | { | 197 | { |
| 217 | struct jffs2_raw_node_ref *raw; | ||
| 218 | struct jffs2_full_dirent *fd; | 198 | struct jffs2_full_dirent *fd; |
| 219 | size_t retlen; | 199 | size_t retlen; |
| 220 | struct kvec vecs[2]; | 200 | struct kvec vecs[2]; |
| 201 | uint32_t flash_ofs; | ||
| 221 | int retried = 0; | 202 | int retried = 0; |
| 222 | int ret; | 203 | int ret; |
| 223 | 204 | ||
| @@ -228,26 +209,16 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
| 228 | D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) { | 209 | D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) { |
| 229 | printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dirent()\n"); | 210 | printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dirent()\n"); |
| 230 | BUG(); | 211 | BUG(); |
| 231 | } | 212 | }); |
| 232 | ); | ||
| 233 | 213 | ||
| 234 | vecs[0].iov_base = rd; | 214 | vecs[0].iov_base = rd; |
| 235 | vecs[0].iov_len = sizeof(*rd); | 215 | vecs[0].iov_len = sizeof(*rd); |
| 236 | vecs[1].iov_base = (unsigned char *)name; | 216 | vecs[1].iov_base = (unsigned char *)name; |
| 237 | vecs[1].iov_len = namelen; | 217 | vecs[1].iov_len = namelen; |
| 238 | 218 | ||
| 239 | jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len); | ||
| 240 | |||
| 241 | raw = jffs2_alloc_raw_node_ref(); | ||
| 242 | |||
| 243 | if (!raw) | ||
| 244 | return ERR_PTR(-ENOMEM); | ||
| 245 | |||
| 246 | fd = jffs2_alloc_full_dirent(namelen+1); | 219 | fd = jffs2_alloc_full_dirent(namelen+1); |
| 247 | if (!fd) { | 220 | if (!fd) |
| 248 | jffs2_free_raw_node_ref(raw); | ||
| 249 | return ERR_PTR(-ENOMEM); | 221 | return ERR_PTR(-ENOMEM); |
| 250 | } | ||
| 251 | 222 | ||
| 252 | fd->version = je32_to_cpu(rd->version); | 223 | fd->version = je32_to_cpu(rd->version); |
| 253 | fd->ino = je32_to_cpu(rd->ino); | 224 | fd->ino = je32_to_cpu(rd->ino); |
| @@ -257,11 +228,9 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
| 257 | fd->name[namelen]=0; | 228 | fd->name[namelen]=0; |
| 258 | 229 | ||
| 259 | retry: | 230 | retry: |
| 260 | fd->raw = raw; | 231 | flash_ofs = write_ofs(c); |
| 261 | 232 | ||
| 262 | raw->flash_offset = flash_ofs; | 233 | jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len); |
| 263 | raw->__totlen = PAD(sizeof(*rd)+namelen); | ||
| 264 | raw->next_phys = NULL; | ||
| 265 | 234 | ||
| 266 | if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(rd->version) < f->highest_version)) { | 235 | if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(rd->version) < f->highest_version)) { |
| 267 | BUG_ON(!retried); | 236 | BUG_ON(!retried); |
| @@ -280,15 +249,11 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
| 280 | sizeof(*rd)+namelen, flash_ofs, ret, retlen); | 249 | sizeof(*rd)+namelen, flash_ofs, ret, retlen); |
| 281 | /* Mark the space as dirtied */ | 250 | /* Mark the space as dirtied */ |
| 282 | if (retlen) { | 251 | if (retlen) { |
| 283 | raw->next_in_ino = NULL; | 252 | jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*rd)+namelen), NULL); |
| 284 | raw->flash_offset |= REF_OBSOLETE; | ||
| 285 | jffs2_add_physical_node_ref(c, raw); | ||
| 286 | jffs2_mark_node_obsolete(c, raw); | ||
| 287 | } else { | 253 | } else { |
| 288 | printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", raw->flash_offset); | 254 | printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", flash_ofs); |
| 289 | jffs2_free_raw_node_ref(raw); | ||
| 290 | } | 255 | } |
| 291 | if (!retried && (raw = jffs2_alloc_raw_node_ref())) { | 256 | if (!retried) { |
| 292 | /* Try to reallocate space and retry */ | 257 | /* Try to reallocate space and retry */ |
| 293 | uint32_t dummy; | 258 | uint32_t dummy; |
| 294 | struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size]; | 259 | struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size]; |
| @@ -301,39 +266,33 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
| 301 | jffs2_dbg_acct_paranoia_check(c, jeb); | 266 | jffs2_dbg_acct_paranoia_check(c, jeb); |
| 302 | 267 | ||
| 303 | if (alloc_mode == ALLOC_GC) { | 268 | if (alloc_mode == ALLOC_GC) { |
| 304 | ret = jffs2_reserve_space_gc(c, sizeof(*rd) + namelen, &flash_ofs, | 269 | ret = jffs2_reserve_space_gc(c, sizeof(*rd) + namelen, &dummy, |
| 305 | &dummy, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | 270 | JFFS2_SUMMARY_DIRENT_SIZE(namelen)); |
| 306 | } else { | 271 | } else { |
| 307 | /* Locking pain */ | 272 | /* Locking pain */ |
| 308 | up(&f->sem); | 273 | up(&f->sem); |
| 309 | jffs2_complete_reservation(c); | 274 | jffs2_complete_reservation(c); |
| 310 | 275 | ||
| 311 | ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &flash_ofs, | 276 | ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &dummy, |
| 312 | &dummy, alloc_mode, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | 277 | alloc_mode, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); |
| 313 | down(&f->sem); | 278 | down(&f->sem); |
| 314 | } | 279 | } |
| 315 | 280 | ||
| 316 | if (!ret) { | 281 | if (!ret) { |
| 282 | flash_ofs = write_ofs(c); | ||
| 317 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs)); | 283 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs)); |
| 318 | jffs2_dbg_acct_sanity_check(c,jeb); | 284 | jffs2_dbg_acct_sanity_check(c,jeb); |
| 319 | jffs2_dbg_acct_paranoia_check(c, jeb); | 285 | jffs2_dbg_acct_paranoia_check(c, jeb); |
| 320 | goto retry; | 286 | goto retry; |
| 321 | } | 287 | } |
| 322 | D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); | 288 | D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); |
| 323 | jffs2_free_raw_node_ref(raw); | ||
| 324 | } | 289 | } |
| 325 | /* Release the full_dnode which is now useless, and return */ | 290 | /* Release the full_dnode which is now useless, and return */ |
| 326 | jffs2_free_full_dirent(fd); | 291 | jffs2_free_full_dirent(fd); |
| 327 | return ERR_PTR(ret?ret:-EIO); | 292 | return ERR_PTR(ret?ret:-EIO); |
| 328 | } | 293 | } |
| 329 | /* Mark the space used */ | 294 | /* Mark the space used */ |
| 330 | raw->flash_offset |= REF_PRISTINE; | 295 | fd->raw = jffs2_add_physical_node_ref(c, flash_ofs | REF_PRISTINE, PAD(sizeof(*rd)+namelen), f->inocache); |
| 331 | jffs2_add_physical_node_ref(c, raw); | ||
| 332 | |||
| 333 | spin_lock(&c->erase_completion_lock); | ||
| 334 | raw->next_in_ino = f->inocache->nodes; | ||
| 335 | f->inocache->nodes = raw; | ||
| 336 | spin_unlock(&c->erase_completion_lock); | ||
| 337 | 296 | ||
| 338 | if (retried) { | 297 | if (retried) { |
| 339 | jffs2_dbg_acct_sanity_check(c,NULL); | 298 | jffs2_dbg_acct_sanity_check(c,NULL); |
| @@ -359,14 +318,14 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
| 359 | struct jffs2_full_dnode *fn; | 318 | struct jffs2_full_dnode *fn; |
| 360 | unsigned char *comprbuf = NULL; | 319 | unsigned char *comprbuf = NULL; |
| 361 | uint16_t comprtype = JFFS2_COMPR_NONE; | 320 | uint16_t comprtype = JFFS2_COMPR_NONE; |
| 362 | uint32_t phys_ofs, alloclen; | 321 | uint32_t alloclen; |
| 363 | uint32_t datalen, cdatalen; | 322 | uint32_t datalen, cdatalen; |
| 364 | int retried = 0; | 323 | int retried = 0; |
| 365 | 324 | ||
| 366 | retry: | 325 | retry: |
| 367 | D2(printk(KERN_DEBUG "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, offset)); | 326 | D2(printk(KERN_DEBUG "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, offset)); |
| 368 | 327 | ||
| 369 | ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, | 328 | ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, |
| 370 | &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); | 329 | &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); |
| 371 | if (ret) { | 330 | if (ret) { |
| 372 | D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret)); | 331 | D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret)); |
| @@ -394,7 +353,7 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
| 394 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); | 353 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); |
| 395 | ri->data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen)); | 354 | ri->data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen)); |
| 396 | 355 | ||
| 397 | fn = jffs2_write_dnode(c, f, ri, comprbuf, cdatalen, phys_ofs, ALLOC_NORETRY); | 356 | fn = jffs2_write_dnode(c, f, ri, comprbuf, cdatalen, ALLOC_NORETRY); |
| 398 | 357 | ||
| 399 | jffs2_free_comprbuf(comprbuf, buf); | 358 | jffs2_free_comprbuf(comprbuf, buf); |
| 400 | 359 | ||
| @@ -448,13 +407,13 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str | |||
| 448 | struct jffs2_raw_dirent *rd; | 407 | struct jffs2_raw_dirent *rd; |
| 449 | struct jffs2_full_dnode *fn; | 408 | struct jffs2_full_dnode *fn; |
| 450 | struct jffs2_full_dirent *fd; | 409 | struct jffs2_full_dirent *fd; |
| 451 | uint32_t alloclen, phys_ofs; | 410 | uint32_t alloclen; |
| 452 | int ret; | 411 | int ret; |
| 453 | 412 | ||
| 454 | /* Try to reserve enough space for both node and dirent. | 413 | /* Try to reserve enough space for both node and dirent. |
| 455 | * Just the node will do for now, though | 414 | * Just the node will do for now, though |
| 456 | */ | 415 | */ |
| 457 | ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL, | 416 | ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL, |
| 458 | JFFS2_SUMMARY_INODE_SIZE); | 417 | JFFS2_SUMMARY_INODE_SIZE); |
| 459 | D1(printk(KERN_DEBUG "jffs2_do_create(): reserved 0x%x bytes\n", alloclen)); | 418 | D1(printk(KERN_DEBUG "jffs2_do_create(): reserved 0x%x bytes\n", alloclen)); |
| 460 | if (ret) { | 419 | if (ret) { |
| @@ -465,7 +424,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str | |||
| 465 | ri->data_crc = cpu_to_je32(0); | 424 | ri->data_crc = cpu_to_je32(0); |
| 466 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); | 425 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); |
| 467 | 426 | ||
| 468 | fn = jffs2_write_dnode(c, f, ri, NULL, 0, phys_ofs, ALLOC_NORMAL); | 427 | fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL); |
| 469 | 428 | ||
| 470 | D1(printk(KERN_DEBUG "jffs2_do_create created file with mode 0x%x\n", | 429 | D1(printk(KERN_DEBUG "jffs2_do_create created file with mode 0x%x\n", |
| 471 | jemode_to_cpu(ri->mode))); | 430 | jemode_to_cpu(ri->mode))); |
| @@ -484,7 +443,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str | |||
| 484 | 443 | ||
| 485 | up(&f->sem); | 444 | up(&f->sem); |
| 486 | jffs2_complete_reservation(c); | 445 | jffs2_complete_reservation(c); |
| 487 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, | 446 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, |
| 488 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | 447 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); |
| 489 | 448 | ||
| 490 | if (ret) { | 449 | if (ret) { |
| @@ -516,7 +475,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str | |||
| 516 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); | 475 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); |
| 517 | rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); | 476 | rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); |
| 518 | 477 | ||
| 519 | fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL); | 478 | fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, ALLOC_NORMAL); |
| 520 | 479 | ||
| 521 | jffs2_free_raw_dirent(rd); | 480 | jffs2_free_raw_dirent(rd); |
| 522 | 481 | ||
| @@ -545,7 +504,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
| 545 | { | 504 | { |
| 546 | struct jffs2_raw_dirent *rd; | 505 | struct jffs2_raw_dirent *rd; |
| 547 | struct jffs2_full_dirent *fd; | 506 | struct jffs2_full_dirent *fd; |
| 548 | uint32_t alloclen, phys_ofs; | 507 | uint32_t alloclen; |
| 549 | int ret; | 508 | int ret; |
| 550 | 509 | ||
| 551 | if (1 /* alternative branch needs testing */ || | 510 | if (1 /* alternative branch needs testing */ || |
| @@ -556,7 +515,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
| 556 | if (!rd) | 515 | if (!rd) |
| 557 | return -ENOMEM; | 516 | return -ENOMEM; |
| 558 | 517 | ||
| 559 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, | 518 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, |
| 560 | ALLOC_DELETION, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | 519 | ALLOC_DELETION, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); |
| 561 | if (ret) { | 520 | if (ret) { |
| 562 | jffs2_free_raw_dirent(rd); | 521 | jffs2_free_raw_dirent(rd); |
| @@ -580,7 +539,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
| 580 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); | 539 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); |
| 581 | rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); | 540 | rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); |
| 582 | 541 | ||
| 583 | fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_DELETION); | 542 | fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, ALLOC_DELETION); |
| 584 | 543 | ||
| 585 | jffs2_free_raw_dirent(rd); | 544 | jffs2_free_raw_dirent(rd); |
| 586 | 545 | ||
| @@ -659,14 +618,14 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint | |||
| 659 | { | 618 | { |
| 660 | struct jffs2_raw_dirent *rd; | 619 | struct jffs2_raw_dirent *rd; |
| 661 | struct jffs2_full_dirent *fd; | 620 | struct jffs2_full_dirent *fd; |
| 662 | uint32_t alloclen, phys_ofs; | 621 | uint32_t alloclen; |
| 663 | int ret; | 622 | int ret; |
| 664 | 623 | ||
| 665 | rd = jffs2_alloc_raw_dirent(); | 624 | rd = jffs2_alloc_raw_dirent(); |
| 666 | if (!rd) | 625 | if (!rd) |
| 667 | return -ENOMEM; | 626 | return -ENOMEM; |
| 668 | 627 | ||
| 669 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, | 628 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, |
| 670 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | 629 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); |
| 671 | if (ret) { | 630 | if (ret) { |
| 672 | jffs2_free_raw_dirent(rd); | 631 | jffs2_free_raw_dirent(rd); |
| @@ -692,7 +651,7 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint | |||
| 692 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); | 651 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); |
| 693 | rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); | 652 | rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); |
| 694 | 653 | ||
| 695 | fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL); | 654 | fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, ALLOC_NORMAL); |
| 696 | 655 | ||
| 697 | jffs2_free_raw_dirent(rd); | 656 | jffs2_free_raw_dirent(rd); |
| 698 | 657 | ||
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c new file mode 100644 index 000000000000..2d82e250be34 --- /dev/null +++ b/fs/jffs2/xattr.c | |||
| @@ -0,0 +1,1238 @@ | |||
| 1 | /* | ||
| 2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2006 NEC Corporation | ||
| 5 | * | ||
| 6 | * Created by KaiGai Kohei <kaigai@ak.jp.nec.com> | ||
| 7 | * | ||
| 8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
| 9 | * | ||
| 10 | */ | ||
| 11 | #include <linux/kernel.h> | ||
| 12 | #include <linux/slab.h> | ||
| 13 | #include <linux/fs.h> | ||
| 14 | #include <linux/time.h> | ||
| 15 | #include <linux/pagemap.h> | ||
| 16 | #include <linux/highmem.h> | ||
| 17 | #include <linux/crc32.h> | ||
| 18 | #include <linux/jffs2.h> | ||
| 19 | #include <linux/xattr.h> | ||
| 20 | #include <linux/mtd/mtd.h> | ||
| 21 | #include "nodelist.h" | ||
| 22 | /* -------- xdatum related functions ---------------- | ||
| 23 | * xattr_datum_hashkey(xprefix, xname, xvalue, xsize) | ||
| 24 | * is used to calcurate xdatum hashkey. The reminder of hashkey into XATTRINDEX_HASHSIZE is | ||
| 25 | * the index of the xattr name/value pair cache (c->xattrindex). | ||
| 26 | * unload_xattr_datum(c, xd) | ||
| 27 | * is used to release xattr name/value pair and detach from c->xattrindex. | ||
| 28 | * reclaim_xattr_datum(c) | ||
| 29 | * is used to reclaim xattr name/value pairs on the xattr name/value pair cache when | ||
| 30 | * memory usage by cache is over c->xdatum_mem_threshold. Currentry, this threshold | ||
| 31 | * is hard coded as 32KiB. | ||
| 32 | * delete_xattr_datum_node(c, xd) | ||
| 33 | * is used to delete a jffs2 node is dominated by xdatum. When EBS(Erase Block Summary) is | ||
| 34 | * enabled, it overwrites the obsolete node by myself. | ||
| 35 | * delete_xattr_datum(c, xd) | ||
| 36 | * is used to delete jffs2_xattr_datum object. It must be called with 0-value of reference | ||
| 37 | * counter. (It means how many jffs2_xattr_ref object refers this xdatum.) | ||
| 38 | * do_verify_xattr_datum(c, xd) | ||
| 39 | * is used to load the xdatum informations without name/value pair from the medium. | ||
| 40 | * It's necessary once, because those informations are not collected during mounting | ||
| 41 | * process when EBS is enabled. | ||
| 42 | * 0 will be returned, if success. An negative return value means recoverable error, and | ||
| 43 | * positive return value means unrecoverable error. Thus, caller must remove this xdatum | ||
| 44 | * and xref when it returned positive value. | ||
| 45 | * do_load_xattr_datum(c, xd) | ||
| 46 | * is used to load name/value pair from the medium. | ||
| 47 | * The meanings of return value is same as do_verify_xattr_datum(). | ||
| 48 | * load_xattr_datum(c, xd) | ||
| 49 | * is used to be as a wrapper of do_verify_xattr_datum() and do_load_xattr_datum(). | ||
| 50 | * If xd need to call do_verify_xattr_datum() at first, it's called before calling | ||
| 51 | * do_load_xattr_datum(). The meanings of return value is same as do_verify_xattr_datum(). | ||
| 52 | * save_xattr_datum(c, xd) | ||
| 53 | * is used to write xdatum to medium. xd->version will be incremented. | ||
| 54 | * create_xattr_datum(c, xprefix, xname, xvalue, xsize) | ||
| 55 | * is used to create new xdatum and write to medium. | ||
| 56 | * -------------------------------------------------- */ | ||
| 57 | |||
| 58 | static uint32_t xattr_datum_hashkey(int xprefix, const char *xname, const char *xvalue, int xsize) | ||
| 59 | { | ||
| 60 | int name_len = strlen(xname); | ||
| 61 | |||
| 62 | return crc32(xprefix, xname, name_len) ^ crc32(xprefix, xvalue, xsize); | ||
| 63 | } | ||
| 64 | |||
| 65 | static void unload_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) | ||
| 66 | { | ||
| 67 | /* must be called under down_write(xattr_sem) */ | ||
| 68 | D1(dbg_xattr("%s: xid=%u, version=%u\n", __FUNCTION__, xd->xid, xd->version)); | ||
| 69 | if (xd->xname) { | ||
| 70 | c->xdatum_mem_usage -= (xd->name_len + 1 + xd->value_len); | ||
| 71 | kfree(xd->xname); | ||
| 72 | } | ||
| 73 | |||
| 74 | list_del_init(&xd->xindex); | ||
| 75 | xd->hashkey = 0; | ||
| 76 | xd->xname = NULL; | ||
| 77 | xd->xvalue = NULL; | ||
| 78 | } | ||
| 79 | |||
| 80 | static void reclaim_xattr_datum(struct jffs2_sb_info *c) | ||
| 81 | { | ||
| 82 | /* must be called under down_write(xattr_sem) */ | ||
| 83 | struct jffs2_xattr_datum *xd, *_xd; | ||
| 84 | uint32_t target, before; | ||
| 85 | static int index = 0; | ||
| 86 | int count; | ||
| 87 | |||
| 88 | if (c->xdatum_mem_threshold > c->xdatum_mem_usage) | ||
| 89 | return; | ||
| 90 | |||
| 91 | before = c->xdatum_mem_usage; | ||
| 92 | target = c->xdatum_mem_usage * 4 / 5; /* 20% reduction */ | ||
| 93 | for (count = 0; count < XATTRINDEX_HASHSIZE; count++) { | ||
| 94 | list_for_each_entry_safe(xd, _xd, &c->xattrindex[index], xindex) { | ||
| 95 | if (xd->flags & JFFS2_XFLAGS_HOT) { | ||
| 96 | xd->flags &= ~JFFS2_XFLAGS_HOT; | ||
| 97 | } else if (!(xd->flags & JFFS2_XFLAGS_BIND)) { | ||
| 98 | unload_xattr_datum(c, xd); | ||
| 99 | } | ||
| 100 | if (c->xdatum_mem_usage <= target) | ||
| 101 | goto out; | ||
| 102 | } | ||
| 103 | index = (index+1) % XATTRINDEX_HASHSIZE; | ||
| 104 | } | ||
| 105 | out: | ||
| 106 | JFFS2_NOTICE("xdatum_mem_usage from %u byte to %u byte (%u byte reclaimed)\n", | ||
| 107 | before, c->xdatum_mem_usage, before - c->xdatum_mem_usage); | ||
| 108 | } | ||
| 109 | |||
| 110 | static void delete_xattr_datum_node(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) | ||
| 111 | { | ||
| 112 | /* must be called under down_write(xattr_sem) */ | ||
| 113 | struct jffs2_raw_xattr rx; | ||
| 114 | size_t length; | ||
| 115 | int rc; | ||
| 116 | |||
| 117 | if (!xd->node) { | ||
| 118 | JFFS2_WARNING("xdatum (xid=%u) is removed twice.\n", xd->xid); | ||
| 119 | return; | ||
| 120 | } | ||
| 121 | if (jffs2_sum_active()) { | ||
| 122 | memset(&rx, 0xff, sizeof(struct jffs2_raw_xattr)); | ||
| 123 | rc = jffs2_flash_read(c, ref_offset(xd->node), | ||
| 124 | sizeof(struct jffs2_unknown_node), | ||
| 125 | &length, (char *)&rx); | ||
| 126 | if (rc || length != sizeof(struct jffs2_unknown_node)) { | ||
| 127 | JFFS2_ERROR("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n", | ||
| 128 | rc, sizeof(struct jffs2_unknown_node), | ||
| 129 | length, ref_offset(xd->node)); | ||
| 130 | } | ||
| 131 | rc = jffs2_flash_write(c, ref_offset(xd->node), sizeof(rx), | ||
| 132 | &length, (char *)&rx); | ||
| 133 | if (rc || length != sizeof(struct jffs2_raw_xattr)) { | ||
| 134 | JFFS2_ERROR("jffs2_flash_write()=%d, req=%zu, wrote=%zu ar %#08x\n", | ||
| 135 | rc, sizeof(rx), length, ref_offset(xd->node)); | ||
| 136 | } | ||
| 137 | } | ||
| 138 | spin_lock(&c->erase_completion_lock); | ||
| 139 | xd->node->next_in_ino = NULL; | ||
| 140 | spin_unlock(&c->erase_completion_lock); | ||
| 141 | jffs2_mark_node_obsolete(c, xd->node); | ||
| 142 | xd->node = NULL; | ||
| 143 | } | ||
| 144 | |||
| 145 | static void delete_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) | ||
| 146 | { | ||
| 147 | /* must be called under down_write(xattr_sem) */ | ||
| 148 | BUG_ON(xd->refcnt); | ||
| 149 | |||
| 150 | unload_xattr_datum(c, xd); | ||
| 151 | if (xd->node) { | ||
| 152 | delete_xattr_datum_node(c, xd); | ||
| 153 | xd->node = NULL; | ||
| 154 | } | ||
| 155 | jffs2_free_xattr_datum(xd); | ||
| 156 | } | ||
| 157 | |||
| 158 | static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) | ||
| 159 | { | ||
| 160 | /* must be called under down_write(xattr_sem) */ | ||
| 161 | struct jffs2_eraseblock *jeb; | ||
| 162 | struct jffs2_raw_xattr rx; | ||
| 163 | size_t readlen; | ||
| 164 | uint32_t crc, totlen; | ||
| 165 | int rc; | ||
| 166 | |||
| 167 | BUG_ON(!xd->node); | ||
| 168 | BUG_ON(ref_flags(xd->node) != REF_UNCHECKED); | ||
| 169 | |||
| 170 | rc = jffs2_flash_read(c, ref_offset(xd->node), sizeof(rx), &readlen, (char *)&rx); | ||
| 171 | if (rc || readlen != sizeof(rx)) { | ||
| 172 | JFFS2_WARNING("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n", | ||
| 173 | rc, sizeof(rx), readlen, ref_offset(xd->node)); | ||
| 174 | return rc ? rc : -EIO; | ||
| 175 | } | ||
| 176 | crc = crc32(0, &rx, sizeof(rx) - 4); | ||
| 177 | if (crc != je32_to_cpu(rx.node_crc)) { | ||
| 178 | if (je32_to_cpu(rx.node_crc) != 0xffffffff) | ||
| 179 | JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", | ||
| 180 | ref_offset(xd->node), je32_to_cpu(rx.hdr_crc), crc); | ||
| 181 | return EIO; | ||
| 182 | } | ||
| 183 | totlen = PAD(sizeof(rx) + rx.name_len + 1 + je16_to_cpu(rx.value_len)); | ||
| 184 | if (je16_to_cpu(rx.magic) != JFFS2_MAGIC_BITMASK | ||
| 185 | || je16_to_cpu(rx.nodetype) != JFFS2_NODETYPE_XATTR | ||
| 186 | || je32_to_cpu(rx.totlen) != totlen | ||
| 187 | || je32_to_cpu(rx.xid) != xd->xid | ||
| 188 | || je32_to_cpu(rx.version) != xd->version) { | ||
| 189 | JFFS2_ERROR("inconsistent xdatum at %#08x, magic=%#04x/%#04x, " | ||
| 190 | "nodetype=%#04x/%#04x, totlen=%u/%u, xid=%u/%u, version=%u/%u\n", | ||
| 191 | ref_offset(xd->node), je16_to_cpu(rx.magic), JFFS2_MAGIC_BITMASK, | ||
| 192 | je16_to_cpu(rx.nodetype), JFFS2_NODETYPE_XATTR, | ||
| 193 | je32_to_cpu(rx.totlen), totlen, | ||
| 194 | je32_to_cpu(rx.xid), xd->xid, | ||
| 195 | je32_to_cpu(rx.version), xd->version); | ||
| 196 | return EIO; | ||
| 197 | } | ||
| 198 | xd->xprefix = rx.xprefix; | ||
| 199 | xd->name_len = rx.name_len; | ||
| 200 | xd->value_len = je16_to_cpu(rx.value_len); | ||
| 201 | xd->data_crc = je32_to_cpu(rx.data_crc); | ||
| 202 | |||
| 203 | /* This JFFS2_NODETYPE_XATTR node is checked */ | ||
| 204 | jeb = &c->blocks[ref_offset(xd->node) / c->sector_size]; | ||
| 205 | totlen = PAD(je32_to_cpu(rx.totlen)); | ||
| 206 | |||
| 207 | spin_lock(&c->erase_completion_lock); | ||
| 208 | c->unchecked_size -= totlen; c->used_size += totlen; | ||
| 209 | jeb->unchecked_size -= totlen; jeb->used_size += totlen; | ||
| 210 | xd->node->flash_offset = ref_offset(xd->node) | REF_PRISTINE; | ||
| 211 | spin_unlock(&c->erase_completion_lock); | ||
| 212 | |||
| 213 | /* unchecked xdatum is chained with c->xattr_unchecked */ | ||
| 214 | list_del_init(&xd->xindex); | ||
| 215 | |||
| 216 | dbg_xattr("success on verfying xdatum (xid=%u, version=%u)\n", | ||
| 217 | xd->xid, xd->version); | ||
| 218 | |||
| 219 | return 0; | ||
| 220 | } | ||
| 221 | |||
| 222 | static int do_load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) | ||
| 223 | { | ||
| 224 | /* must be called under down_write(xattr_sem) */ | ||
| 225 | char *data; | ||
| 226 | size_t readlen; | ||
| 227 | uint32_t crc, length; | ||
| 228 | int i, ret, retry = 0; | ||
| 229 | |||
| 230 | BUG_ON(!xd->node); | ||
| 231 | BUG_ON(ref_flags(xd->node) != REF_PRISTINE); | ||
| 232 | BUG_ON(!list_empty(&xd->xindex)); | ||
| 233 | retry: | ||
| 234 | length = xd->name_len + 1 + xd->value_len; | ||
| 235 | data = kmalloc(length, GFP_KERNEL); | ||
| 236 | if (!data) | ||
| 237 | return -ENOMEM; | ||
| 238 | |||
| 239 | ret = jffs2_flash_read(c, ref_offset(xd->node)+sizeof(struct jffs2_raw_xattr), | ||
| 240 | length, &readlen, data); | ||
| 241 | |||
| 242 | if (ret || length!=readlen) { | ||
| 243 | JFFS2_WARNING("jffs2_flash_read() returned %d, request=%d, readlen=%zu, at %#08x\n", | ||
| 244 | ret, length, readlen, ref_offset(xd->node)); | ||
| 245 | kfree(data); | ||
| 246 | return ret ? ret : -EIO; | ||
| 247 | } | ||
| 248 | |||
| 249 | data[xd->name_len] = '\0'; | ||
| 250 | crc = crc32(0, data, length); | ||
| 251 | if (crc != xd->data_crc) { | ||
| 252 | JFFS2_WARNING("node CRC failed (JFFS2_NODETYPE_XREF)" | ||
| 253 | " at %#08x, read: 0x%08x calculated: 0x%08x\n", | ||
| 254 | ref_offset(xd->node), xd->data_crc, crc); | ||
| 255 | kfree(data); | ||
| 256 | return EIO; | ||
| 257 | } | ||
| 258 | |||
| 259 | xd->flags |= JFFS2_XFLAGS_HOT; | ||
| 260 | xd->xname = data; | ||
| 261 | xd->xvalue = data + xd->name_len+1; | ||
| 262 | |||
| 263 | c->xdatum_mem_usage += length; | ||
| 264 | |||
| 265 | xd->hashkey = xattr_datum_hashkey(xd->xprefix, xd->xname, xd->xvalue, xd->value_len); | ||
| 266 | i = xd->hashkey % XATTRINDEX_HASHSIZE; | ||
| 267 | list_add(&xd->xindex, &c->xattrindex[i]); | ||
| 268 | if (!retry) { | ||
| 269 | retry = 1; | ||
| 270 | reclaim_xattr_datum(c); | ||
| 271 | if (!xd->xname) | ||
| 272 | goto retry; | ||
| 273 | } | ||
| 274 | |||
| 275 | dbg_xattr("success on loading xdatum (xid=%u, xprefix=%u, xname='%s')\n", | ||
| 276 | xd->xid, xd->xprefix, xd->xname); | ||
| 277 | |||
| 278 | return 0; | ||
| 279 | } | ||
| 280 | |||
| 281 | static int load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) | ||
| 282 | { | ||
| 283 | /* must be called under down_write(xattr_sem); | ||
| 284 | * rc < 0 : recoverable error, try again | ||
| 285 | * rc = 0 : success | ||
| 286 | * rc > 0 : Unrecoverable error, this node should be deleted. | ||
| 287 | */ | ||
| 288 | int rc = 0; | ||
| 289 | BUG_ON(xd->xname); | ||
| 290 | if (!xd->node) | ||
| 291 | return EIO; | ||
| 292 | if (unlikely(ref_flags(xd->node) != REF_PRISTINE)) { | ||
| 293 | rc = do_verify_xattr_datum(c, xd); | ||
| 294 | if (rc > 0) { | ||
| 295 | list_del_init(&xd->xindex); | ||
| 296 | delete_xattr_datum_node(c, xd); | ||
| 297 | } | ||
| 298 | } | ||
| 299 | if (!rc) | ||
| 300 | rc = do_load_xattr_datum(c, xd); | ||
| 301 | return rc; | ||
| 302 | } | ||
| 303 | |||
| 304 | static int save_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) | ||
| 305 | { | ||
| 306 | /* must be called under down_write(xattr_sem) */ | ||
| 307 | struct jffs2_raw_node_ref *raw; | ||
| 308 | struct jffs2_raw_xattr rx; | ||
| 309 | struct kvec vecs[2]; | ||
| 310 | size_t length; | ||
| 311 | int rc, totlen; | ||
| 312 | uint32_t phys_ofs = write_ofs(c); | ||
| 313 | |||
| 314 | BUG_ON(!xd->xname); | ||
| 315 | |||
| 316 | vecs[0].iov_base = ℞ | ||
| 317 | vecs[0].iov_len = PAD(sizeof(rx)); | ||
| 318 | vecs[1].iov_base = xd->xname; | ||
| 319 | vecs[1].iov_len = xd->name_len + 1 + xd->value_len; | ||
| 320 | totlen = vecs[0].iov_len + vecs[1].iov_len; | ||
| 321 | |||
| 322 | /* Setup raw-xattr */ | ||
| 323 | rx.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | ||
| 324 | rx.nodetype = cpu_to_je16(JFFS2_NODETYPE_XATTR); | ||
| 325 | rx.totlen = cpu_to_je32(PAD(totlen)); | ||
| 326 | rx.hdr_crc = cpu_to_je32(crc32(0, &rx, sizeof(struct jffs2_unknown_node) - 4)); | ||
| 327 | |||
| 328 | rx.xid = cpu_to_je32(xd->xid); | ||
| 329 | rx.version = cpu_to_je32(++xd->version); | ||
| 330 | rx.xprefix = xd->xprefix; | ||
| 331 | rx.name_len = xd->name_len; | ||
| 332 | rx.value_len = cpu_to_je16(xd->value_len); | ||
| 333 | rx.data_crc = cpu_to_je32(crc32(0, vecs[1].iov_base, vecs[1].iov_len)); | ||
| 334 | rx.node_crc = cpu_to_je32(crc32(0, &rx, sizeof(struct jffs2_raw_xattr) - 4)); | ||
| 335 | |||
| 336 | rc = jffs2_flash_writev(c, vecs, 2, phys_ofs, &length, 0); | ||
| 337 | if (rc || totlen != length) { | ||
| 338 | JFFS2_WARNING("jffs2_flash_writev()=%d, req=%u, wrote=%zu, at %#08x\n", | ||
| 339 | rc, totlen, length, phys_ofs); | ||
| 340 | rc = rc ? rc : -EIO; | ||
| 341 | if (length) | ||
| 342 | jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, PAD(totlen), NULL); | ||
| 343 | |||
| 344 | return rc; | ||
| 345 | } | ||
| 346 | |||
| 347 | /* success */ | ||
| 348 | raw = jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(totlen), NULL); | ||
| 349 | /* FIXME */ raw->next_in_ino = (void *)xd; | ||
| 350 | |||
| 351 | if (xd->node) | ||
| 352 | delete_xattr_datum_node(c, xd); | ||
| 353 | xd->node = raw; | ||
| 354 | |||
| 355 | dbg_xattr("success on saving xdatum (xid=%u, version=%u, xprefix=%u, xname='%s')\n", | ||
| 356 | xd->xid, xd->version, xd->xprefix, xd->xname); | ||
| 357 | |||
| 358 | return 0; | ||
| 359 | } | ||
| 360 | |||
| 361 | static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c, | ||
| 362 | int xprefix, const char *xname, | ||
| 363 | const char *xvalue, int xsize) | ||
| 364 | { | ||
| 365 | /* must be called under down_write(xattr_sem) */ | ||
| 366 | struct jffs2_xattr_datum *xd; | ||
| 367 | uint32_t hashkey, name_len; | ||
| 368 | char *data; | ||
| 369 | int i, rc; | ||
| 370 | |||
| 371 | /* Search xattr_datum has same xname/xvalue by index */ | ||
| 372 | hashkey = xattr_datum_hashkey(xprefix, xname, xvalue, xsize); | ||
| 373 | i = hashkey % XATTRINDEX_HASHSIZE; | ||
| 374 | list_for_each_entry(xd, &c->xattrindex[i], xindex) { | ||
| 375 | if (xd->hashkey==hashkey | ||
| 376 | && xd->xprefix==xprefix | ||
| 377 | && xd->value_len==xsize | ||
| 378 | && !strcmp(xd->xname, xname) | ||
| 379 | && !memcmp(xd->xvalue, xvalue, xsize)) { | ||
| 380 | xd->refcnt++; | ||
| 381 | return xd; | ||
| 382 | } | ||
| 383 | } | ||
| 384 | |||
| 385 | /* Not found, Create NEW XATTR-Cache */ | ||
| 386 | name_len = strlen(xname); | ||
| 387 | |||
| 388 | xd = jffs2_alloc_xattr_datum(); | ||
| 389 | if (!xd) | ||
| 390 | return ERR_PTR(-ENOMEM); | ||
| 391 | |||
| 392 | data = kmalloc(name_len + 1 + xsize, GFP_KERNEL); | ||
| 393 | if (!data) { | ||
| 394 | jffs2_free_xattr_datum(xd); | ||
| 395 | return ERR_PTR(-ENOMEM); | ||
| 396 | } | ||
| 397 | strcpy(data, xname); | ||
| 398 | memcpy(data + name_len + 1, xvalue, xsize); | ||
| 399 | |||
| 400 | xd->refcnt = 1; | ||
| 401 | xd->xid = ++c->highest_xid; | ||
| 402 | xd->flags |= JFFS2_XFLAGS_HOT; | ||
| 403 | xd->xprefix = xprefix; | ||
| 404 | |||
| 405 | xd->hashkey = hashkey; | ||
| 406 | xd->xname = data; | ||
| 407 | xd->xvalue = data + name_len + 1; | ||
| 408 | xd->name_len = name_len; | ||
| 409 | xd->value_len = xsize; | ||
| 410 | xd->data_crc = crc32(0, data, xd->name_len + 1 + xd->value_len); | ||
| 411 | |||
| 412 | rc = save_xattr_datum(c, xd); | ||
| 413 | if (rc) { | ||
| 414 | kfree(xd->xname); | ||
| 415 | jffs2_free_xattr_datum(xd); | ||
| 416 | return ERR_PTR(rc); | ||
| 417 | } | ||
| 418 | |||
| 419 | /* Insert Hash Index */ | ||
| 420 | i = hashkey % XATTRINDEX_HASHSIZE; | ||
| 421 | list_add(&xd->xindex, &c->xattrindex[i]); | ||
| 422 | |||
| 423 | c->xdatum_mem_usage += (xd->name_len + 1 + xd->value_len); | ||
| 424 | reclaim_xattr_datum(c); | ||
| 425 | |||
| 426 | return xd; | ||
| 427 | } | ||
| 428 | |||
| 429 | /* -------- xref related functions ------------------ | ||
| 430 | * verify_xattr_ref(c, ref) | ||
| 431 | * is used to load xref information from medium. Because summary data does not | ||
| 432 | * contain xid/ino, it's necessary to verify once while mounting process. | ||
| 433 | * delete_xattr_ref_node(c, ref) | ||
| 434 | * is used to delete a jffs2 node is dominated by xref. When EBS is enabled, | ||
| 435 | * it overwrites the obsolete node by myself. | ||
| 436 | * delete_xattr_ref(c, ref) | ||
| 437 | * is used to delete jffs2_xattr_ref object. If the reference counter of xdatum | ||
| 438 | * is refered by this xref become 0, delete_xattr_datum() is called later. | ||
| 439 | * save_xattr_ref(c, ref) | ||
| 440 | * is used to write xref to medium. | ||
| 441 | * create_xattr_ref(c, ic, xd) | ||
| 442 | * is used to create a new xref and write to medium. | ||
| 443 | * jffs2_xattr_delete_inode(c, ic) | ||
| 444 | * is called to remove xrefs related to obsolete inode when inode is unlinked. | ||
| 445 | * jffs2_xattr_free_inode(c, ic) | ||
| 446 | * is called to release xattr related objects when unmounting. | ||
| 447 | * check_xattr_ref_inode(c, ic) | ||
| 448 | * is used to confirm inode does not have duplicate xattr name/value pair. | ||
| 449 | * -------------------------------------------------- */ | ||
| 450 | static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) | ||
| 451 | { | ||
| 452 | struct jffs2_eraseblock *jeb; | ||
| 453 | struct jffs2_raw_xref rr; | ||
| 454 | size_t readlen; | ||
| 455 | uint32_t crc, totlen; | ||
| 456 | int rc; | ||
| 457 | |||
| 458 | BUG_ON(ref_flags(ref->node) != REF_UNCHECKED); | ||
| 459 | |||
| 460 | rc = jffs2_flash_read(c, ref_offset(ref->node), sizeof(rr), &readlen, (char *)&rr); | ||
| 461 | if (rc || sizeof(rr) != readlen) { | ||
| 462 | JFFS2_WARNING("jffs2_flash_read()=%d, req=%zu, read=%zu, at %#08x\n", | ||
| 463 | rc, sizeof(rr), readlen, ref_offset(ref->node)); | ||
| 464 | return rc ? rc : -EIO; | ||
| 465 | } | ||
| 466 | /* obsolete node */ | ||
| 467 | crc = crc32(0, &rr, sizeof(rr) - 4); | ||
| 468 | if (crc != je32_to_cpu(rr.node_crc)) { | ||
| 469 | if (je32_to_cpu(rr.node_crc) != 0xffffffff) | ||
| 470 | JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", | ||
| 471 | ref_offset(ref->node), je32_to_cpu(rr.node_crc), crc); | ||
| 472 | return EIO; | ||
| 473 | } | ||
| 474 | if (je16_to_cpu(rr.magic) != JFFS2_MAGIC_BITMASK | ||
| 475 | || je16_to_cpu(rr.nodetype) != JFFS2_NODETYPE_XREF | ||
| 476 | || je32_to_cpu(rr.totlen) != PAD(sizeof(rr))) { | ||
| 477 | JFFS2_ERROR("inconsistent xref at %#08x, magic=%#04x/%#04x, " | ||
| 478 | "nodetype=%#04x/%#04x, totlen=%u/%zu\n", | ||
| 479 | ref_offset(ref->node), je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK, | ||
| 480 | je16_to_cpu(rr.nodetype), JFFS2_NODETYPE_XREF, | ||
| 481 | je32_to_cpu(rr.totlen), PAD(sizeof(rr))); | ||
| 482 | return EIO; | ||
| 483 | } | ||
| 484 | ref->ino = je32_to_cpu(rr.ino); | ||
| 485 | ref->xid = je32_to_cpu(rr.xid); | ||
| 486 | |||
| 487 | /* fixup superblock/eraseblock info */ | ||
| 488 | jeb = &c->blocks[ref_offset(ref->node) / c->sector_size]; | ||
| 489 | totlen = PAD(sizeof(rr)); | ||
| 490 | |||
| 491 | spin_lock(&c->erase_completion_lock); | ||
| 492 | c->unchecked_size -= totlen; c->used_size += totlen; | ||
| 493 | jeb->unchecked_size -= totlen; jeb->used_size += totlen; | ||
| 494 | ref->node->flash_offset = ref_offset(ref->node) | REF_PRISTINE; | ||
| 495 | spin_unlock(&c->erase_completion_lock); | ||
| 496 | |||
| 497 | dbg_xattr("success on verifying xref (ino=%u, xid=%u) at %#08x\n", | ||
| 498 | ref->ino, ref->xid, ref_offset(ref->node)); | ||
| 499 | return 0; | ||
| 500 | } | ||
| 501 | |||
| 502 | static void delete_xattr_ref_node(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) | ||
| 503 | { | ||
| 504 | struct jffs2_raw_xref rr; | ||
| 505 | size_t length; | ||
| 506 | int rc; | ||
| 507 | |||
| 508 | if (jffs2_sum_active()) { | ||
| 509 | memset(&rr, 0xff, sizeof(rr)); | ||
| 510 | rc = jffs2_flash_read(c, ref_offset(ref->node), | ||
| 511 | sizeof(struct jffs2_unknown_node), | ||
| 512 | &length, (char *)&rr); | ||
| 513 | if (rc || length != sizeof(struct jffs2_unknown_node)) { | ||
| 514 | JFFS2_ERROR("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n", | ||
| 515 | rc, sizeof(struct jffs2_unknown_node), | ||
| 516 | length, ref_offset(ref->node)); | ||
| 517 | } | ||
| 518 | rc = jffs2_flash_write(c, ref_offset(ref->node), sizeof(rr), | ||
| 519 | &length, (char *)&rr); | ||
| 520 | if (rc || length != sizeof(struct jffs2_raw_xref)) { | ||
| 521 | JFFS2_ERROR("jffs2_flash_write()=%d, req=%zu, wrote=%zu at %#08x\n", | ||
| 522 | rc, sizeof(rr), length, ref_offset(ref->node)); | ||
| 523 | } | ||
| 524 | } | ||
| 525 | spin_lock(&c->erase_completion_lock); | ||
| 526 | ref->node->next_in_ino = NULL; | ||
| 527 | spin_unlock(&c->erase_completion_lock); | ||
| 528 | jffs2_mark_node_obsolete(c, ref->node); | ||
| 529 | ref->node = NULL; | ||
| 530 | } | ||
| 531 | |||
| 532 | static void delete_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) | ||
| 533 | { | ||
| 534 | /* must be called under down_write(xattr_sem) */ | ||
| 535 | struct jffs2_xattr_datum *xd; | ||
| 536 | |||
| 537 | BUG_ON(!ref->node); | ||
| 538 | delete_xattr_ref_node(c, ref); | ||
| 539 | |||
| 540 | xd = ref->xd; | ||
| 541 | xd->refcnt--; | ||
| 542 | if (!xd->refcnt) | ||
| 543 | delete_xattr_datum(c, xd); | ||
| 544 | jffs2_free_xattr_ref(ref); | ||
| 545 | } | ||
| 546 | |||
| 547 | static int save_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) | ||
| 548 | { | ||
| 549 | /* must be called under down_write(xattr_sem) */ | ||
| 550 | struct jffs2_raw_node_ref *raw; | ||
| 551 | struct jffs2_raw_xref rr; | ||
| 552 | size_t length; | ||
| 553 | uint32_t phys_ofs = write_ofs(c); | ||
| 554 | int ret; | ||
| 555 | |||
| 556 | rr.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | ||
| 557 | rr.nodetype = cpu_to_je16(JFFS2_NODETYPE_XREF); | ||
| 558 | rr.totlen = cpu_to_je32(PAD(sizeof(rr))); | ||
| 559 | rr.hdr_crc = cpu_to_je32(crc32(0, &rr, sizeof(struct jffs2_unknown_node) - 4)); | ||
| 560 | |||
| 561 | rr.ino = cpu_to_je32(ref->ic->ino); | ||
| 562 | rr.xid = cpu_to_je32(ref->xd->xid); | ||
| 563 | rr.node_crc = cpu_to_je32(crc32(0, &rr, sizeof(rr) - 4)); | ||
| 564 | |||
| 565 | ret = jffs2_flash_write(c, phys_ofs, sizeof(rr), &length, (char *)&rr); | ||
| 566 | if (ret || sizeof(rr) != length) { | ||
| 567 | JFFS2_WARNING("jffs2_flash_write() returned %d, request=%zu, retlen=%zu, at %#08x\n", | ||
| 568 | ret, sizeof(rr), length, phys_ofs); | ||
| 569 | ret = ret ? ret : -EIO; | ||
| 570 | if (length) | ||
| 571 | jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, PAD(sizeof(rr)), NULL); | ||
| 572 | |||
| 573 | return ret; | ||
| 574 | } | ||
| 575 | |||
| 576 | raw = jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(sizeof(rr)), NULL); | ||
| 577 | /* FIXME */ raw->next_in_ino = (void *)ref; | ||
| 578 | if (ref->node) | ||
| 579 | delete_xattr_ref_node(c, ref); | ||
| 580 | ref->node = raw; | ||
| 581 | |||
| 582 | dbg_xattr("success on saving xref (ino=%u, xid=%u)\n", ref->ic->ino, ref->xd->xid); | ||
| 583 | |||
| 584 | return 0; | ||
| 585 | } | ||
| 586 | |||
| 587 | static struct jffs2_xattr_ref *create_xattr_ref(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, | ||
| 588 | struct jffs2_xattr_datum *xd) | ||
| 589 | { | ||
| 590 | /* must be called under down_write(xattr_sem) */ | ||
| 591 | struct jffs2_xattr_ref *ref; | ||
| 592 | int ret; | ||
| 593 | |||
| 594 | ref = jffs2_alloc_xattr_ref(); | ||
| 595 | if (!ref) | ||
| 596 | return ERR_PTR(-ENOMEM); | ||
| 597 | ref->ic = ic; | ||
| 598 | ref->xd = xd; | ||
| 599 | |||
| 600 | ret = save_xattr_ref(c, ref); | ||
| 601 | if (ret) { | ||
| 602 | jffs2_free_xattr_ref(ref); | ||
| 603 | return ERR_PTR(ret); | ||
| 604 | } | ||
| 605 | |||
| 606 | /* Chain to inode */ | ||
| 607 | ref->next = ic->xref; | ||
| 608 | ic->xref = ref; | ||
| 609 | |||
| 610 | return ref; /* success */ | ||
| 611 | } | ||
| 612 | |||
| 613 | void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) | ||
| 614 | { | ||
| 615 | /* It's called from jffs2_clear_inode() on inode removing. | ||
| 616 | When an inode with XATTR is removed, those XATTRs must be removed. */ | ||
| 617 | struct jffs2_xattr_ref *ref, *_ref; | ||
| 618 | |||
| 619 | if (!ic || ic->nlink > 0) | ||
| 620 | return; | ||
| 621 | |||
| 622 | down_write(&c->xattr_sem); | ||
| 623 | for (ref = ic->xref; ref; ref = _ref) { | ||
| 624 | _ref = ref->next; | ||
| 625 | delete_xattr_ref(c, ref); | ||
| 626 | } | ||
| 627 | ic->xref = NULL; | ||
| 628 | up_write(&c->xattr_sem); | ||
| 629 | } | ||
| 630 | |||
| 631 | void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) | ||
| 632 | { | ||
| 633 | /* It's called from jffs2_free_ino_caches() until unmounting FS. */ | ||
| 634 | struct jffs2_xattr_datum *xd; | ||
| 635 | struct jffs2_xattr_ref *ref, *_ref; | ||
| 636 | |||
| 637 | down_write(&c->xattr_sem); | ||
| 638 | for (ref = ic->xref; ref; ref = _ref) { | ||
| 639 | _ref = ref->next; | ||
| 640 | xd = ref->xd; | ||
| 641 | xd->refcnt--; | ||
| 642 | if (!xd->refcnt) { | ||
| 643 | unload_xattr_datum(c, xd); | ||
| 644 | jffs2_free_xattr_datum(xd); | ||
| 645 | } | ||
| 646 | jffs2_free_xattr_ref(ref); | ||
| 647 | } | ||
| 648 | ic->xref = NULL; | ||
| 649 | up_write(&c->xattr_sem); | ||
| 650 | } | ||
| 651 | |||
| 652 | static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) | ||
| 653 | { | ||
| 654 | /* success of check_xattr_ref_inode() means taht inode (ic) dose not have | ||
| 655 | * duplicate name/value pairs. If duplicate name/value pair would be found, | ||
| 656 | * one will be removed. | ||
| 657 | */ | ||
| 658 | struct jffs2_xattr_ref *ref, *cmp, **pref; | ||
| 659 | int rc = 0; | ||
| 660 | |||
| 661 | if (likely(ic->flags & INO_FLAGS_XATTR_CHECKED)) | ||
| 662 | return 0; | ||
| 663 | down_write(&c->xattr_sem); | ||
| 664 | retry: | ||
| 665 | rc = 0; | ||
| 666 | for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) { | ||
| 667 | if (!ref->xd->xname) { | ||
| 668 | rc = load_xattr_datum(c, ref->xd); | ||
| 669 | if (unlikely(rc > 0)) { | ||
| 670 | *pref = ref->next; | ||
| 671 | delete_xattr_ref(c, ref); | ||
| 672 | goto retry; | ||
| 673 | } else if (unlikely(rc < 0)) | ||
| 674 | goto out; | ||
| 675 | } | ||
| 676 | for (cmp=ref->next, pref=&ref->next; cmp; pref=&cmp->next, cmp=cmp->next) { | ||
| 677 | if (!cmp->xd->xname) { | ||
| 678 | ref->xd->flags |= JFFS2_XFLAGS_BIND; | ||
| 679 | rc = load_xattr_datum(c, cmp->xd); | ||
| 680 | ref->xd->flags &= ~JFFS2_XFLAGS_BIND; | ||
| 681 | if (unlikely(rc > 0)) { | ||
| 682 | *pref = cmp->next; | ||
| 683 | delete_xattr_ref(c, cmp); | ||
| 684 | goto retry; | ||
| 685 | } else if (unlikely(rc < 0)) | ||
| 686 | goto out; | ||
| 687 | } | ||
| 688 | if (ref->xd->xprefix == cmp->xd->xprefix | ||
| 689 | && !strcmp(ref->xd->xname, cmp->xd->xname)) { | ||
| 690 | *pref = cmp->next; | ||
| 691 | delete_xattr_ref(c, cmp); | ||
| 692 | goto retry; | ||
| 693 | } | ||
| 694 | } | ||
| 695 | } | ||
| 696 | ic->flags |= INO_FLAGS_XATTR_CHECKED; | ||
| 697 | out: | ||
| 698 | up_write(&c->xattr_sem); | ||
| 699 | |||
| 700 | return rc; | ||
| 701 | } | ||
| 702 | |||
| 703 | /* -------- xattr subsystem functions --------------- | ||
| 704 | * jffs2_init_xattr_subsystem(c) | ||
| 705 | * is used to initialize semaphore and list_head, and some variables. | ||
| 706 | * jffs2_find_xattr_datum(c, xid) | ||
| 707 | * is used to lookup xdatum while scanning process. | ||
| 708 | * jffs2_clear_xattr_subsystem(c) | ||
| 709 | * is used to release any xattr related objects. | ||
| 710 | * jffs2_build_xattr_subsystem(c) | ||
| 711 | * is used to associate xdatum and xref while super block building process. | ||
| 712 | * jffs2_setup_xattr_datum(c, xid, version) | ||
| 713 | * is used to insert xdatum while scanning process. | ||
| 714 | * -------------------------------------------------- */ | ||
| 715 | void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c) | ||
| 716 | { | ||
| 717 | int i; | ||
| 718 | |||
| 719 | for (i=0; i < XATTRINDEX_HASHSIZE; i++) | ||
| 720 | INIT_LIST_HEAD(&c->xattrindex[i]); | ||
| 721 | INIT_LIST_HEAD(&c->xattr_unchecked); | ||
| 722 | c->xref_temp = NULL; | ||
| 723 | |||
| 724 | init_rwsem(&c->xattr_sem); | ||
| 725 | c->xdatum_mem_usage = 0; | ||
| 726 | c->xdatum_mem_threshold = 32 * 1024; /* Default 32KB */ | ||
| 727 | } | ||
| 728 | |||
| 729 | static struct jffs2_xattr_datum *jffs2_find_xattr_datum(struct jffs2_sb_info *c, uint32_t xid) | ||
| 730 | { | ||
| 731 | struct jffs2_xattr_datum *xd; | ||
| 732 | int i = xid % XATTRINDEX_HASHSIZE; | ||
| 733 | |||
| 734 | /* It's only used in scanning/building process. */ | ||
| 735 | BUG_ON(!(c->flags & (JFFS2_SB_FLAG_SCANNING|JFFS2_SB_FLAG_BUILDING))); | ||
| 736 | |||
| 737 | list_for_each_entry(xd, &c->xattrindex[i], xindex) { | ||
| 738 | if (xd->xid==xid) | ||
| 739 | return xd; | ||
| 740 | } | ||
| 741 | return NULL; | ||
| 742 | } | ||
| 743 | |||
| 744 | void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c) | ||
| 745 | { | ||
| 746 | struct jffs2_xattr_datum *xd, *_xd; | ||
| 747 | struct jffs2_xattr_ref *ref, *_ref; | ||
| 748 | int i; | ||
| 749 | |||
| 750 | for (ref=c->xref_temp; ref; ref = _ref) { | ||
| 751 | _ref = ref->next; | ||
| 752 | jffs2_free_xattr_ref(ref); | ||
| 753 | } | ||
| 754 | c->xref_temp = NULL; | ||
| 755 | |||
| 756 | for (i=0; i < XATTRINDEX_HASHSIZE; i++) { | ||
| 757 | list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) { | ||
| 758 | list_del(&xd->xindex); | ||
| 759 | if (xd->xname) | ||
| 760 | kfree(xd->xname); | ||
| 761 | jffs2_free_xattr_datum(xd); | ||
| 762 | } | ||
| 763 | } | ||
| 764 | } | ||
| 765 | |||
| 766 | void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c) | ||
| 767 | { | ||
| 768 | struct jffs2_xattr_ref *ref, *_ref; | ||
| 769 | struct jffs2_xattr_datum *xd, *_xd; | ||
| 770 | struct jffs2_inode_cache *ic; | ||
| 771 | int i, xdatum_count =0, xdatum_unchecked_count = 0, xref_count = 0; | ||
| 772 | |||
| 773 | BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING)); | ||
| 774 | |||
| 775 | /* Phase.1 */ | ||
| 776 | for (ref=c->xref_temp; ref; ref=_ref) { | ||
| 777 | _ref = ref->next; | ||
| 778 | /* checking REF_UNCHECKED nodes */ | ||
| 779 | if (ref_flags(ref->node) != REF_PRISTINE) { | ||
| 780 | if (verify_xattr_ref(c, ref)) { | ||
| 781 | delete_xattr_ref_node(c, ref); | ||
| 782 | jffs2_free_xattr_ref(ref); | ||
| 783 | continue; | ||
| 784 | } | ||
| 785 | } | ||
| 786 | /* At this point, ref->xid and ref->ino contain XID and inode number. | ||
| 787 | ref->xd and ref->ic are not valid yet. */ | ||
| 788 | xd = jffs2_find_xattr_datum(c, ref->xid); | ||
| 789 | ic = jffs2_get_ino_cache(c, ref->ino); | ||
| 790 | if (!xd || !ic) { | ||
| 791 | if (ref_flags(ref->node) != REF_UNCHECKED) | ||
| 792 | JFFS2_WARNING("xref(ino=%u, xid=%u) is orphan. \n", | ||
| 793 | ref->ino, ref->xid); | ||
| 794 | delete_xattr_ref_node(c, ref); | ||
| 795 | jffs2_free_xattr_ref(ref); | ||
| 796 | continue; | ||
| 797 | } | ||
| 798 | ref->xd = xd; | ||
| 799 | ref->ic = ic; | ||
| 800 | xd->refcnt++; | ||
| 801 | ref->next = ic->xref; | ||
| 802 | ic->xref = ref; | ||
| 803 | xref_count++; | ||
| 804 | } | ||
| 805 | c->xref_temp = NULL; | ||
| 806 | /* After this, ref->xid/ino are NEVER used. */ | ||
| 807 | |||
| 808 | /* Phase.2 */ | ||
| 809 | for (i=0; i < XATTRINDEX_HASHSIZE; i++) { | ||
| 810 | list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) { | ||
| 811 | list_del_init(&xd->xindex); | ||
| 812 | if (!xd->refcnt) { | ||
| 813 | if (ref_flags(xd->node) != REF_UNCHECKED) | ||
| 814 | JFFS2_WARNING("orphan xdatum(xid=%u, version=%u) at %#08x\n", | ||
| 815 | xd->xid, xd->version, ref_offset(xd->node)); | ||
| 816 | delete_xattr_datum(c, xd); | ||
| 817 | continue; | ||
| 818 | } | ||
| 819 | if (ref_flags(xd->node) != REF_PRISTINE) { | ||
| 820 | dbg_xattr("unchecked xdatum(xid=%u) at %#08x\n", | ||
| 821 | xd->xid, ref_offset(xd->node)); | ||
| 822 | list_add(&xd->xindex, &c->xattr_unchecked); | ||
| 823 | xdatum_unchecked_count++; | ||
| 824 | } | ||
| 825 | xdatum_count++; | ||
| 826 | } | ||
| 827 | } | ||
| 828 | /* build complete */ | ||
| 829 | JFFS2_NOTICE("complete building xattr subsystem, %u of xdatum (%u unchecked) and " | ||
| 830 | "%u of xref found.\n", xdatum_count, xdatum_unchecked_count, xref_count); | ||
| 831 | } | ||
| 832 | |||
| 833 | struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c, | ||
| 834 | uint32_t xid, uint32_t version) | ||
| 835 | { | ||
| 836 | struct jffs2_xattr_datum *xd, *_xd; | ||
| 837 | |||
| 838 | _xd = jffs2_find_xattr_datum(c, xid); | ||
| 839 | if (_xd) { | ||
| 840 | dbg_xattr("duplicate xdatum (xid=%u, version=%u/%u) at %#08x\n", | ||
| 841 | xid, version, _xd->version, ref_offset(_xd->node)); | ||
| 842 | if (version < _xd->version) | ||
| 843 | return ERR_PTR(-EEXIST); | ||
| 844 | } | ||
| 845 | xd = jffs2_alloc_xattr_datum(); | ||
| 846 | if (!xd) | ||
| 847 | return ERR_PTR(-ENOMEM); | ||
| 848 | xd->xid = xid; | ||
| 849 | xd->version = version; | ||
| 850 | if (xd->xid > c->highest_xid) | ||
| 851 | c->highest_xid = xd->xid; | ||
| 852 | list_add_tail(&xd->xindex, &c->xattrindex[xid % XATTRINDEX_HASHSIZE]); | ||
| 853 | |||
| 854 | if (_xd) { | ||
| 855 | list_del_init(&_xd->xindex); | ||
| 856 | delete_xattr_datum_node(c, _xd); | ||
| 857 | jffs2_free_xattr_datum(_xd); | ||
| 858 | } | ||
| 859 | return xd; | ||
| 860 | } | ||
| 861 | |||
| 862 | /* -------- xattr subsystem functions --------------- | ||
| 863 | * xprefix_to_handler(xprefix) | ||
| 864 | * is used to translate xprefix into xattr_handler. | ||
| 865 | * jffs2_listxattr(dentry, buffer, size) | ||
| 866 | * is an implementation of listxattr handler on jffs2. | ||
| 867 | * do_jffs2_getxattr(inode, xprefix, xname, buffer, size) | ||
| 868 | * is an implementation of getxattr handler on jffs2. | ||
| 869 | * do_jffs2_setxattr(inode, xprefix, xname, buffer, size, flags) | ||
| 870 | * is an implementation of setxattr handler on jffs2. | ||
| 871 | * -------------------------------------------------- */ | ||
| 872 | struct xattr_handler *jffs2_xattr_handlers[] = { | ||
| 873 | &jffs2_user_xattr_handler, | ||
| 874 | #ifdef CONFIG_JFFS2_FS_SECURITY | ||
| 875 | &jffs2_security_xattr_handler, | ||
| 876 | #endif | ||
| 877 | #ifdef CONFIG_JFFS2_FS_POSIX_ACL | ||
| 878 | &jffs2_acl_access_xattr_handler, | ||
| 879 | &jffs2_acl_default_xattr_handler, | ||
| 880 | #endif | ||
| 881 | &jffs2_trusted_xattr_handler, | ||
| 882 | NULL | ||
| 883 | }; | ||
| 884 | |||
| 885 | static struct xattr_handler *xprefix_to_handler(int xprefix) { | ||
| 886 | struct xattr_handler *ret; | ||
| 887 | |||
| 888 | switch (xprefix) { | ||
| 889 | case JFFS2_XPREFIX_USER: | ||
| 890 | ret = &jffs2_user_xattr_handler; | ||
| 891 | break; | ||
| 892 | #ifdef CONFIG_JFFS2_FS_SECURITY | ||
| 893 | case JFFS2_XPREFIX_SECURITY: | ||
| 894 | ret = &jffs2_security_xattr_handler; | ||
| 895 | break; | ||
| 896 | #endif | ||
| 897 | #ifdef CONFIG_JFFS2_FS_POSIX_ACL | ||
| 898 | case JFFS2_XPREFIX_ACL_ACCESS: | ||
| 899 | ret = &jffs2_acl_access_xattr_handler; | ||
| 900 | break; | ||
| 901 | case JFFS2_XPREFIX_ACL_DEFAULT: | ||
| 902 | ret = &jffs2_acl_default_xattr_handler; | ||
| 903 | break; | ||
| 904 | #endif | ||
| 905 | case JFFS2_XPREFIX_TRUSTED: | ||
| 906 | ret = &jffs2_trusted_xattr_handler; | ||
| 907 | break; | ||
| 908 | default: | ||
| 909 | ret = NULL; | ||
| 910 | break; | ||
| 911 | } | ||
| 912 | return ret; | ||
| 913 | } | ||
| 914 | |||
| 915 | ssize_t jffs2_listxattr(struct dentry *dentry, char *buffer, size_t size) | ||
| 916 | { | ||
| 917 | struct inode *inode = dentry->d_inode; | ||
| 918 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); | ||
| 919 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); | ||
| 920 | struct jffs2_inode_cache *ic = f->inocache; | ||
| 921 | struct jffs2_xattr_ref *ref, **pref; | ||
| 922 | struct jffs2_xattr_datum *xd; | ||
| 923 | struct xattr_handler *xhandle; | ||
| 924 | ssize_t len, rc; | ||
| 925 | int retry = 0; | ||
| 926 | |||
| 927 | rc = check_xattr_ref_inode(c, ic); | ||
| 928 | if (unlikely(rc)) | ||
| 929 | return rc; | ||
| 930 | |||
| 931 | down_read(&c->xattr_sem); | ||
| 932 | retry: | ||
| 933 | len = 0; | ||
| 934 | for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) { | ||
| 935 | BUG_ON(ref->ic != ic); | ||
| 936 | xd = ref->xd; | ||
| 937 | if (!xd->xname) { | ||
| 938 | /* xdatum is unchached */ | ||
| 939 | if (!retry) { | ||
| 940 | retry = 1; | ||
| 941 | up_read(&c->xattr_sem); | ||
| 942 | down_write(&c->xattr_sem); | ||
| 943 | goto retry; | ||
| 944 | } else { | ||
| 945 | rc = load_xattr_datum(c, xd); | ||
| 946 | if (unlikely(rc > 0)) { | ||
| 947 | *pref = ref->next; | ||
| 948 | delete_xattr_ref(c, ref); | ||
| 949 | goto retry; | ||
| 950 | } else if (unlikely(rc < 0)) | ||
| 951 | goto out; | ||
| 952 | } | ||
| 953 | } | ||
| 954 | xhandle = xprefix_to_handler(xd->xprefix); | ||
| 955 | if (!xhandle) | ||
| 956 | continue; | ||
| 957 | if (buffer) { | ||
| 958 | rc = xhandle->list(inode, buffer+len, size-len, xd->xname, xd->name_len); | ||
| 959 | } else { | ||
| 960 | rc = xhandle->list(inode, NULL, 0, xd->xname, xd->name_len); | ||
| 961 | } | ||
| 962 | if (rc < 0) | ||
| 963 | goto out; | ||
| 964 | len += rc; | ||
| 965 | } | ||
| 966 | rc = len; | ||
| 967 | out: | ||
| 968 | if (!retry) { | ||
| 969 | up_read(&c->xattr_sem); | ||
| 970 | } else { | ||
| 971 | up_write(&c->xattr_sem); | ||
| 972 | } | ||
| 973 | return rc; | ||
| 974 | } | ||
| 975 | |||
| 976 | int do_jffs2_getxattr(struct inode *inode, int xprefix, const char *xname, | ||
| 977 | char *buffer, size_t size) | ||
| 978 | { | ||
| 979 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); | ||
| 980 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); | ||
| 981 | struct jffs2_inode_cache *ic = f->inocache; | ||
| 982 | struct jffs2_xattr_datum *xd; | ||
| 983 | struct jffs2_xattr_ref *ref, **pref; | ||
| 984 | int rc, retry = 0; | ||
| 985 | |||
| 986 | rc = check_xattr_ref_inode(c, ic); | ||
| 987 | if (unlikely(rc)) | ||
| 988 | return rc; | ||
| 989 | |||
| 990 | down_read(&c->xattr_sem); | ||
| 991 | retry: | ||
| 992 | for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) { | ||
| 993 | BUG_ON(ref->ic!=ic); | ||
| 994 | |||
| 995 | xd = ref->xd; | ||
| 996 | if (xd->xprefix != xprefix) | ||
| 997 | continue; | ||
| 998 | if (!xd->xname) { | ||
| 999 | /* xdatum is unchached */ | ||
| 1000 | if (!retry) { | ||
| 1001 | retry = 1; | ||
| 1002 | up_read(&c->xattr_sem); | ||
| 1003 | down_write(&c->xattr_sem); | ||
| 1004 | goto retry; | ||
| 1005 | } else { | ||
| 1006 | rc = load_xattr_datum(c, xd); | ||
| 1007 | if (unlikely(rc > 0)) { | ||
| 1008 | *pref = ref->next; | ||
| 1009 | delete_xattr_ref(c, ref); | ||
| 1010 | goto retry; | ||
| 1011 | } else if (unlikely(rc < 0)) { | ||
| 1012 | goto out; | ||
| 1013 | } | ||
| 1014 | } | ||
| 1015 | } | ||
| 1016 | if (!strcmp(xname, xd->xname)) { | ||
| 1017 | rc = xd->value_len; | ||
| 1018 | if (buffer) { | ||
| 1019 | if (size < rc) { | ||
| 1020 | rc = -ERANGE; | ||
| 1021 | } else { | ||
| 1022 | memcpy(buffer, xd->xvalue, rc); | ||
| 1023 | } | ||
| 1024 | } | ||
| 1025 | goto out; | ||
| 1026 | } | ||
| 1027 | } | ||
| 1028 | rc = -ENODATA; | ||
| 1029 | out: | ||
| 1030 | if (!retry) { | ||
| 1031 | up_read(&c->xattr_sem); | ||
| 1032 | } else { | ||
| 1033 | up_write(&c->xattr_sem); | ||
| 1034 | } | ||
| 1035 | return rc; | ||
| 1036 | } | ||
| 1037 | |||
| 1038 | int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname, | ||
| 1039 | const char *buffer, size_t size, int flags) | ||
| 1040 | { | ||
| 1041 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); | ||
| 1042 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); | ||
| 1043 | struct jffs2_inode_cache *ic = f->inocache; | ||
| 1044 | struct jffs2_xattr_datum *xd; | ||
| 1045 | struct jffs2_xattr_ref *ref, *newref, **pref; | ||
| 1046 | uint32_t length, request; | ||
| 1047 | int rc; | ||
| 1048 | |||
| 1049 | rc = check_xattr_ref_inode(c, ic); | ||
| 1050 | if (unlikely(rc)) | ||
| 1051 | return rc; | ||
| 1052 | |||
| 1053 | request = PAD(sizeof(struct jffs2_raw_xattr) + strlen(xname) + 1 + size); | ||
| 1054 | rc = jffs2_reserve_space(c, request, &length, | ||
| 1055 | ALLOC_NORMAL, JFFS2_SUMMARY_XATTR_SIZE); | ||
| 1056 | if (rc) { | ||
| 1057 | JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, request); | ||
| 1058 | return rc; | ||
| 1059 | } | ||
| 1060 | |||
| 1061 | /* Find existing xattr */ | ||
| 1062 | down_write(&c->xattr_sem); | ||
| 1063 | retry: | ||
| 1064 | for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) { | ||
| 1065 | xd = ref->xd; | ||
| 1066 | if (xd->xprefix != xprefix) | ||
| 1067 | continue; | ||
| 1068 | if (!xd->xname) { | ||
| 1069 | rc = load_xattr_datum(c, xd); | ||
| 1070 | if (unlikely(rc > 0)) { | ||
| 1071 | *pref = ref->next; | ||
| 1072 | delete_xattr_ref(c, ref); | ||
| 1073 | goto retry; | ||
| 1074 | } else if (unlikely(rc < 0)) | ||
| 1075 | goto out; | ||
| 1076 | } | ||
| 1077 | if (!strcmp(xd->xname, xname)) { | ||
| 1078 | if (flags & XATTR_CREATE) { | ||
| 1079 | rc = -EEXIST; | ||
| 1080 | goto out; | ||
| 1081 | } | ||
| 1082 | if (!buffer) { | ||
| 1083 | *pref = ref->next; | ||
| 1084 | delete_xattr_ref(c, ref); | ||
| 1085 | rc = 0; | ||
| 1086 | goto out; | ||
| 1087 | } | ||
| 1088 | goto found; | ||
| 1089 | } | ||
| 1090 | } | ||
| 1091 | /* not found */ | ||
| 1092 | if (flags & XATTR_REPLACE) { | ||
| 1093 | rc = -ENODATA; | ||
| 1094 | goto out; | ||
| 1095 | } | ||
| 1096 | if (!buffer) { | ||
| 1097 | rc = -EINVAL; | ||
| 1098 | goto out; | ||
| 1099 | } | ||
| 1100 | found: | ||
| 1101 | xd = create_xattr_datum(c, xprefix, xname, buffer, size); | ||
| 1102 | if (IS_ERR(xd)) { | ||
| 1103 | rc = PTR_ERR(xd); | ||
| 1104 | goto out; | ||
| 1105 | } | ||
| 1106 | up_write(&c->xattr_sem); | ||
| 1107 | jffs2_complete_reservation(c); | ||
| 1108 | |||
| 1109 | /* create xattr_ref */ | ||
| 1110 | request = PAD(sizeof(struct jffs2_raw_xref)); | ||
| 1111 | rc = jffs2_reserve_space(c, request, &length, | ||
| 1112 | ALLOC_NORMAL, JFFS2_SUMMARY_XREF_SIZE); | ||
| 1113 | if (rc) { | ||
| 1114 | JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, request); | ||
| 1115 | down_write(&c->xattr_sem); | ||
| 1116 | xd->refcnt--; | ||
| 1117 | if (!xd->refcnt) | ||
| 1118 | delete_xattr_datum(c, xd); | ||
| 1119 | up_write(&c->xattr_sem); | ||
| 1120 | return rc; | ||
| 1121 | } | ||
| 1122 | down_write(&c->xattr_sem); | ||
| 1123 | if (ref) | ||
| 1124 | *pref = ref->next; | ||
| 1125 | newref = create_xattr_ref(c, ic, xd); | ||
| 1126 | if (IS_ERR(newref)) { | ||
| 1127 | if (ref) { | ||
| 1128 | ref->next = ic->xref; | ||
| 1129 | ic->xref = ref; | ||
| 1130 | } | ||
| 1131 | rc = PTR_ERR(newref); | ||
| 1132 | xd->refcnt--; | ||
| 1133 | if (!xd->refcnt) | ||
| 1134 | delete_xattr_datum(c, xd); | ||
| 1135 | } else if (ref) { | ||
| 1136 | delete_xattr_ref(c, ref); | ||
| 1137 | } | ||
| 1138 | out: | ||
| 1139 | up_write(&c->xattr_sem); | ||
| 1140 | jffs2_complete_reservation(c); | ||
| 1141 | return rc; | ||
| 1142 | } | ||
| 1143 | |||
| 1144 | /* -------- garbage collector functions ------------- | ||
| 1145 | * jffs2_garbage_collect_xattr_datum(c, xd) | ||
| 1146 | * is used to move xdatum into new node. | ||
| 1147 | * jffs2_garbage_collect_xattr_ref(c, ref) | ||
| 1148 | * is used to move xref into new node. | ||
| 1149 | * jffs2_verify_xattr(c) | ||
| 1150 | * is used to call do_verify_xattr_datum() before garbage collecting. | ||
| 1151 | * -------------------------------------------------- */ | ||
| 1152 | int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) | ||
| 1153 | { | ||
| 1154 | uint32_t totlen, length, old_ofs; | ||
| 1155 | int rc = -EINVAL; | ||
| 1156 | |||
| 1157 | down_write(&c->xattr_sem); | ||
| 1158 | BUG_ON(!xd->node); | ||
| 1159 | |||
| 1160 | old_ofs = ref_offset(xd->node); | ||
| 1161 | totlen = ref_totlen(c, c->gcblock, xd->node); | ||
| 1162 | if (totlen < sizeof(struct jffs2_raw_xattr)) | ||
| 1163 | goto out; | ||
| 1164 | |||
| 1165 | if (!xd->xname) { | ||
| 1166 | rc = load_xattr_datum(c, xd); | ||
| 1167 | if (unlikely(rc > 0)) { | ||
| 1168 | delete_xattr_datum_node(c, xd); | ||
| 1169 | rc = 0; | ||
| 1170 | goto out; | ||
| 1171 | } else if (unlikely(rc < 0)) | ||
| 1172 | goto out; | ||
| 1173 | } | ||
| 1174 | rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XATTR_SIZE); | ||
| 1175 | if (rc || length < totlen) { | ||
| 1176 | JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, totlen); | ||
| 1177 | rc = rc ? rc : -EBADFD; | ||
| 1178 | goto out; | ||
| 1179 | } | ||
| 1180 | rc = save_xattr_datum(c, xd); | ||
| 1181 | if (!rc) | ||
| 1182 | dbg_xattr("xdatum (xid=%u, version=%u) GC'ed from %#08x to %08x\n", | ||
| 1183 | xd->xid, xd->version, old_ofs, ref_offset(xd->node)); | ||
| 1184 | out: | ||
| 1185 | up_write(&c->xattr_sem); | ||
| 1186 | return rc; | ||
| 1187 | } | ||
| 1188 | |||
| 1189 | |||
| 1190 | int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) | ||
| 1191 | { | ||
| 1192 | uint32_t totlen, length, old_ofs; | ||
| 1193 | int rc = -EINVAL; | ||
| 1194 | |||
| 1195 | down_write(&c->xattr_sem); | ||
| 1196 | BUG_ON(!ref->node); | ||
| 1197 | |||
| 1198 | old_ofs = ref_offset(ref->node); | ||
| 1199 | totlen = ref_totlen(c, c->gcblock, ref->node); | ||
| 1200 | if (totlen != sizeof(struct jffs2_raw_xref)) | ||
| 1201 | goto out; | ||
| 1202 | |||
| 1203 | rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XREF_SIZE); | ||
| 1204 | if (rc || length < totlen) { | ||
| 1205 | JFFS2_WARNING("%s: jffs2_reserve_space() = %d, request = %u\n", | ||
| 1206 | __FUNCTION__, rc, totlen); | ||
| 1207 | rc = rc ? rc : -EBADFD; | ||
| 1208 | goto out; | ||
| 1209 | } | ||
| 1210 | rc = save_xattr_ref(c, ref); | ||
| 1211 | if (!rc) | ||
| 1212 | dbg_xattr("xref (ino=%u, xid=%u) GC'ed from %#08x to %08x\n", | ||
| 1213 | ref->ic->ino, ref->xd->xid, old_ofs, ref_offset(ref->node)); | ||
| 1214 | out: | ||
| 1215 | up_write(&c->xattr_sem); | ||
| 1216 | return rc; | ||
| 1217 | } | ||
| 1218 | |||
| 1219 | int jffs2_verify_xattr(struct jffs2_sb_info *c) | ||
| 1220 | { | ||
| 1221 | struct jffs2_xattr_datum *xd, *_xd; | ||
| 1222 | int rc; | ||
| 1223 | |||
| 1224 | down_write(&c->xattr_sem); | ||
| 1225 | list_for_each_entry_safe(xd, _xd, &c->xattr_unchecked, xindex) { | ||
| 1226 | rc = do_verify_xattr_datum(c, xd); | ||
| 1227 | if (rc == 0) { | ||
| 1228 | list_del_init(&xd->xindex); | ||
| 1229 | break; | ||
| 1230 | } else if (rc > 0) { | ||
| 1231 | list_del_init(&xd->xindex); | ||
| 1232 | delete_xattr_datum_node(c, xd); | ||
| 1233 | } | ||
| 1234 | } | ||
| 1235 | up_write(&c->xattr_sem); | ||
| 1236 | |||
| 1237 | return list_empty(&c->xattr_unchecked) ? 1 : 0; | ||
| 1238 | } | ||
diff --git a/fs/jffs2/xattr.h b/fs/jffs2/xattr.h new file mode 100644 index 000000000000..2c199856c582 --- /dev/null +++ b/fs/jffs2/xattr.h | |||
| @@ -0,0 +1,116 @@ | |||
| 1 | /* | ||
| 2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2006 NEC Corporation | ||
| 5 | * | ||
| 6 | * Created by KaiGai Kohei <kaigai@ak.jp.nec.com> | ||
| 7 | * | ||
| 8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
| 9 | * | ||
| 10 | */ | ||
| 11 | #ifndef _JFFS2_FS_XATTR_H_ | ||
| 12 | #define _JFFS2_FS_XATTR_H_ | ||
| 13 | |||
| 14 | #include <linux/xattr.h> | ||
| 15 | #include <linux/list.h> | ||
| 16 | |||
| 17 | #define JFFS2_XFLAGS_HOT (0x01) /* This datum is HOT */ | ||
| 18 | #define JFFS2_XFLAGS_BIND (0x02) /* This datum is not reclaimed */ | ||
| 19 | |||
| 20 | struct jffs2_xattr_datum | ||
| 21 | { | ||
| 22 | void *always_null; | ||
| 23 | struct jffs2_raw_node_ref *node; | ||
| 24 | uint8_t class; | ||
| 25 | uint8_t flags; | ||
| 26 | uint16_t xprefix; /* see JFFS2_XATTR_PREFIX_* */ | ||
| 27 | |||
| 28 | struct list_head xindex; /* chained from c->xattrindex[n] */ | ||
| 29 | uint32_t refcnt; /* # of xattr_ref refers this */ | ||
| 30 | uint32_t xid; | ||
| 31 | uint32_t version; | ||
| 32 | |||
| 33 | uint32_t data_crc; | ||
| 34 | uint32_t hashkey; | ||
| 35 | char *xname; /* XATTR name without prefix */ | ||
| 36 | uint32_t name_len; /* length of xname */ | ||
| 37 | char *xvalue; /* XATTR value */ | ||
| 38 | uint32_t value_len; /* length of xvalue */ | ||
| 39 | }; | ||
| 40 | |||
| 41 | struct jffs2_inode_cache; | ||
| 42 | struct jffs2_xattr_ref | ||
| 43 | { | ||
| 44 | void *always_null; | ||
| 45 | struct jffs2_raw_node_ref *node; | ||
| 46 | uint8_t class; | ||
| 47 | uint8_t flags; /* Currently unused */ | ||
| 48 | u16 unused; | ||
| 49 | |||
| 50 | union { | ||
| 51 | struct jffs2_inode_cache *ic; /* reference to jffs2_inode_cache */ | ||
| 52 | uint32_t ino; /* only used in scanning/building */ | ||
| 53 | }; | ||
| 54 | union { | ||
| 55 | struct jffs2_xattr_datum *xd; /* reference to jffs2_xattr_datum */ | ||
| 56 | uint32_t xid; /* only used in sccanning/building */ | ||
| 57 | }; | ||
| 58 | struct jffs2_xattr_ref *next; /* chained from ic->xref_list */ | ||
| 59 | }; | ||
| 60 | |||
| 61 | #ifdef CONFIG_JFFS2_FS_XATTR | ||
| 62 | |||
| 63 | extern void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c); | ||
| 64 | extern void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c); | ||
| 65 | extern void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c); | ||
| 66 | |||
| 67 | extern struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c, | ||
| 68 | uint32_t xid, uint32_t version); | ||
| 69 | |||
| 70 | extern void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); | ||
| 71 | extern void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); | ||
| 72 | |||
| 73 | extern int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd); | ||
| 74 | extern int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref); | ||
| 75 | extern int jffs2_verify_xattr(struct jffs2_sb_info *c); | ||
| 76 | |||
| 77 | extern int do_jffs2_getxattr(struct inode *inode, int xprefix, const char *xname, | ||
| 78 | char *buffer, size_t size); | ||
| 79 | extern int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname, | ||
| 80 | const char *buffer, size_t size, int flags); | ||
| 81 | |||
| 82 | extern struct xattr_handler *jffs2_xattr_handlers[]; | ||
| 83 | extern struct xattr_handler jffs2_user_xattr_handler; | ||
| 84 | extern struct xattr_handler jffs2_trusted_xattr_handler; | ||
| 85 | |||
| 86 | extern ssize_t jffs2_listxattr(struct dentry *, char *, size_t); | ||
| 87 | #define jffs2_getxattr generic_getxattr | ||
| 88 | #define jffs2_setxattr generic_setxattr | ||
| 89 | #define jffs2_removexattr generic_removexattr | ||
| 90 | |||
| 91 | #else | ||
| 92 | |||
| 93 | #define jffs2_init_xattr_subsystem(c) | ||
| 94 | #define jffs2_build_xattr_subsystem(c) | ||
| 95 | #define jffs2_clear_xattr_subsystem(c) | ||
| 96 | |||
| 97 | #define jffs2_xattr_delete_inode(c, ic) | ||
| 98 | #define jffs2_xattr_free_inode(c, ic) | ||
| 99 | #define jffs2_verify_xattr(c) (1) | ||
| 100 | |||
| 101 | #define jffs2_xattr_handlers NULL | ||
| 102 | #define jffs2_listxattr NULL | ||
| 103 | #define jffs2_getxattr NULL | ||
| 104 | #define jffs2_setxattr NULL | ||
| 105 | #define jffs2_removexattr NULL | ||
| 106 | |||
| 107 | #endif /* CONFIG_JFFS2_FS_XATTR */ | ||
| 108 | |||
| 109 | #ifdef CONFIG_JFFS2_FS_SECURITY | ||
| 110 | extern int jffs2_init_security(struct inode *inode, struct inode *dir); | ||
| 111 | extern struct xattr_handler jffs2_security_xattr_handler; | ||
| 112 | #else | ||
| 113 | #define jffs2_init_security(inode,dir) (0) | ||
| 114 | #endif /* CONFIG_JFFS2_FS_SECURITY */ | ||
| 115 | |||
| 116 | #endif /* _JFFS2_FS_XATTR_H_ */ | ||
diff --git a/fs/jffs2/xattr_trusted.c b/fs/jffs2/xattr_trusted.c new file mode 100644 index 000000000000..ed046e19dbfa --- /dev/null +++ b/fs/jffs2/xattr_trusted.c | |||
| @@ -0,0 +1,52 @@ | |||
| 1 | /* | ||
| 2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2006 NEC Corporation | ||
| 5 | * | ||
| 6 | * Created by KaiGai Kohei <kaigai@ak.jp.nec.com> | ||
| 7 | * | ||
| 8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
| 9 | * | ||
| 10 | */ | ||
| 11 | #include <linux/kernel.h> | ||
| 12 | #include <linux/fs.h> | ||
| 13 | #include <linux/jffs2.h> | ||
| 14 | #include <linux/xattr.h> | ||
| 15 | #include <linux/mtd/mtd.h> | ||
| 16 | #include "nodelist.h" | ||
| 17 | |||
| 18 | static int jffs2_trusted_getxattr(struct inode *inode, const char *name, | ||
| 19 | void *buffer, size_t size) | ||
| 20 | { | ||
| 21 | if (!strcmp(name, "")) | ||
| 22 | return -EINVAL; | ||
| 23 | return do_jffs2_getxattr(inode, JFFS2_XPREFIX_TRUSTED, name, buffer, size); | ||
| 24 | } | ||
| 25 | |||
| 26 | static int jffs2_trusted_setxattr(struct inode *inode, const char *name, const void *buffer, | ||
| 27 | size_t size, int flags) | ||
| 28 | { | ||
| 29 | if (!strcmp(name, "")) | ||
| 30 | return -EINVAL; | ||
| 31 | return do_jffs2_setxattr(inode, JFFS2_XPREFIX_TRUSTED, name, buffer, size, flags); | ||
| 32 | } | ||
| 33 | |||
| 34 | static size_t jffs2_trusted_listxattr(struct inode *inode, char *list, size_t list_size, | ||
| 35 | const char *name, size_t name_len) | ||
| 36 | { | ||
| 37 | size_t retlen = XATTR_TRUSTED_PREFIX_LEN + name_len + 1; | ||
| 38 | |||
| 39 | if (list && retlen<=list_size) { | ||
| 40 | strcpy(list, XATTR_TRUSTED_PREFIX); | ||
| 41 | strcpy(list + XATTR_TRUSTED_PREFIX_LEN, name); | ||
| 42 | } | ||
| 43 | |||
| 44 | return retlen; | ||
| 45 | } | ||
| 46 | |||
| 47 | struct xattr_handler jffs2_trusted_xattr_handler = { | ||
| 48 | .prefix = XATTR_TRUSTED_PREFIX, | ||
| 49 | .list = jffs2_trusted_listxattr, | ||
| 50 | .set = jffs2_trusted_setxattr, | ||
| 51 | .get = jffs2_trusted_getxattr | ||
| 52 | }; | ||
diff --git a/fs/jffs2/xattr_user.c b/fs/jffs2/xattr_user.c new file mode 100644 index 000000000000..2f8e9aa01ea0 --- /dev/null +++ b/fs/jffs2/xattr_user.c | |||
| @@ -0,0 +1,52 @@ | |||
| 1 | /* | ||
| 2 | * JFFS2 -- Journalling Flash File System, Version 2. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2006 NEC Corporation | ||
| 5 | * | ||
| 6 | * Created by KaiGai Kohei <kaigai@ak.jp.nec.com> | ||
| 7 | * | ||
| 8 | * For licensing information, see the file 'LICENCE' in this directory. | ||
| 9 | * | ||
| 10 | */ | ||
| 11 | #include <linux/kernel.h> | ||
| 12 | #include <linux/fs.h> | ||
| 13 | #include <linux/jffs2.h> | ||
| 14 | #include <linux/xattr.h> | ||
| 15 | #include <linux/mtd/mtd.h> | ||
| 16 | #include "nodelist.h" | ||
| 17 | |||
| 18 | static int jffs2_user_getxattr(struct inode *inode, const char *name, | ||
| 19 | void *buffer, size_t size) | ||
| 20 | { | ||
| 21 | if (!strcmp(name, "")) | ||
| 22 | return -EINVAL; | ||
| 23 | return do_jffs2_getxattr(inode, JFFS2_XPREFIX_USER, name, buffer, size); | ||
| 24 | } | ||
| 25 | |||
| 26 | static int jffs2_user_setxattr(struct inode *inode, const char *name, const void *buffer, | ||
| 27 | size_t size, int flags) | ||
| 28 | { | ||
| 29 | if (!strcmp(name, "")) | ||
| 30 | return -EINVAL; | ||
| 31 | return do_jffs2_setxattr(inode, JFFS2_XPREFIX_USER, name, buffer, size, flags); | ||
| 32 | } | ||
| 33 | |||
| 34 | static size_t jffs2_user_listxattr(struct inode *inode, char *list, size_t list_size, | ||
| 35 | const char *name, size_t name_len) | ||
| 36 | { | ||
| 37 | size_t retlen = XATTR_USER_PREFIX_LEN + name_len + 1; | ||
| 38 | |||
| 39 | if (list && retlen <= list_size) { | ||
| 40 | strcpy(list, XATTR_USER_PREFIX); | ||
| 41 | strcpy(list + XATTR_USER_PREFIX_LEN, name); | ||
| 42 | } | ||
| 43 | |||
| 44 | return retlen; | ||
| 45 | } | ||
| 46 | |||
| 47 | struct xattr_handler jffs2_user_xattr_handler = { | ||
| 48 | .prefix = XATTR_USER_PREFIX, | ||
| 49 | .list = jffs2_user_listxattr, | ||
| 50 | .set = jffs2_user_setxattr, | ||
| 51 | .get = jffs2_user_getxattr | ||
| 52 | }; | ||
diff --git a/fs/namei.c b/fs/namei.c index d6e2ee251736..184fe4acf824 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
| @@ -1127,7 +1127,7 @@ out: | |||
| 1127 | if (likely(retval == 0)) { | 1127 | if (likely(retval == 0)) { |
| 1128 | if (unlikely(current->audit_context && nd && nd->dentry && | 1128 | if (unlikely(current->audit_context && nd && nd->dentry && |
| 1129 | nd->dentry->d_inode)) | 1129 | nd->dentry->d_inode)) |
| 1130 | audit_inode(name, nd->dentry->d_inode, flags); | 1130 | audit_inode(name, nd->dentry->d_inode); |
| 1131 | } | 1131 | } |
| 1132 | out_fail: | 1132 | out_fail: |
| 1133 | return retval; | 1133 | return retval; |
| @@ -633,7 +633,7 @@ asmlinkage long sys_fchmod(unsigned int fd, mode_t mode) | |||
| 633 | dentry = file->f_dentry; | 633 | dentry = file->f_dentry; |
| 634 | inode = dentry->d_inode; | 634 | inode = dentry->d_inode; |
| 635 | 635 | ||
| 636 | audit_inode(NULL, inode, 0); | 636 | audit_inode(NULL, inode); |
| 637 | 637 | ||
| 638 | err = -EROFS; | 638 | err = -EROFS; |
| 639 | if (IS_RDONLY(inode)) | 639 | if (IS_RDONLY(inode)) |
| @@ -786,7 +786,7 @@ asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group) | |||
| 786 | if (file) { | 786 | if (file) { |
| 787 | struct dentry * dentry; | 787 | struct dentry * dentry; |
| 788 | dentry = file->f_dentry; | 788 | dentry = file->f_dentry; |
| 789 | audit_inode(NULL, dentry->d_inode, 0); | 789 | audit_inode(NULL, dentry->d_inode); |
| 790 | error = chown_common(dentry, user, group); | 790 | error = chown_common(dentry, user, group); |
| 791 | fput(file); | 791 | fput(file); |
| 792 | } | 792 | } |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 6cc77dc3f3ff..6afff725a8c9 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
| @@ -1019,8 +1019,8 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf, | |||
| 1019 | if (current != task) | 1019 | if (current != task) |
| 1020 | return -EPERM; | 1020 | return -EPERM; |
| 1021 | 1021 | ||
| 1022 | if (count > PAGE_SIZE) | 1022 | if (count >= PAGE_SIZE) |
| 1023 | count = PAGE_SIZE; | 1023 | count = PAGE_SIZE - 1; |
| 1024 | 1024 | ||
| 1025 | if (*ppos != 0) { | 1025 | if (*ppos != 0) { |
| 1026 | /* No partial writes. */ | 1026 | /* No partial writes. */ |
| @@ -1033,6 +1033,7 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf, | |||
| 1033 | if (copy_from_user(page, buf, count)) | 1033 | if (copy_from_user(page, buf, count)) |
| 1034 | goto out_free_page; | 1034 | goto out_free_page; |
| 1035 | 1035 | ||
| 1036 | page[count] = '\0'; | ||
| 1036 | loginuid = simple_strtoul(page, &tmp, 10); | 1037 | loginuid = simple_strtoul(page, &tmp, 10); |
| 1037 | if (tmp == page) { | 1038 | if (tmp == page) { |
| 1038 | length = -EINVAL; | 1039 | length = -EINVAL; |
diff --git a/fs/xattr.c b/fs/xattr.c index e416190f5e9c..c32f15b5f60f 100644 --- a/fs/xattr.c +++ b/fs/xattr.c | |||
| @@ -242,7 +242,7 @@ sys_fsetxattr(int fd, char __user *name, void __user *value, | |||
| 242 | if (!f) | 242 | if (!f) |
| 243 | return error; | 243 | return error; |
| 244 | dentry = f->f_dentry; | 244 | dentry = f->f_dentry; |
| 245 | audit_inode(NULL, dentry->d_inode, 0); | 245 | audit_inode(NULL, dentry->d_inode); |
| 246 | error = setxattr(dentry, name, value, size, flags); | 246 | error = setxattr(dentry, name, value, size, flags); |
| 247 | fput(f); | 247 | fput(f); |
| 248 | return error; | 248 | return error; |
| @@ -469,7 +469,7 @@ sys_fremovexattr(int fd, char __user *name) | |||
| 469 | if (!f) | 469 | if (!f) |
| 470 | return error; | 470 | return error; |
| 471 | dentry = f->f_dentry; | 471 | dentry = f->f_dentry; |
| 472 | audit_inode(NULL, dentry->d_inode, 0); | 472 | audit_inode(NULL, dentry->d_inode); |
| 473 | error = removexattr(dentry, name); | 473 | error = removexattr(dentry, name); |
| 474 | fput(f); | 474 | fput(f); |
| 475 | return error; | 475 | return error; |
