aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/ubi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/ubi')
-rw-r--r--drivers/mtd/ubi/build.c674
-rw-r--r--drivers/mtd/ubi/cdev.c244
-rw-r--r--drivers/mtd/ubi/debug.h21
-rw-r--r--drivers/mtd/ubi/eba.c321
-rw-r--r--drivers/mtd/ubi/gluebi.c9
-rw-r--r--drivers/mtd/ubi/io.c10
-rw-r--r--drivers/mtd/ubi/kapi.c177
-rw-r--r--drivers/mtd/ubi/misc.c2
-rw-r--r--drivers/mtd/ubi/scan.c12
-rw-r--r--drivers/mtd/ubi/ubi.h171
-rw-r--r--drivers/mtd/ubi/upd.c185
-rw-r--r--drivers/mtd/ubi/vmt.c208
-rw-r--r--drivers/mtd/ubi/vtbl.c45
-rw-r--r--drivers/mtd/ubi/wl.c338
14 files changed, 1578 insertions, 839 deletions
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 023653977a1a..6ac81e35355c 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -21,11 +21,16 @@
21 */ 21 */
22 22
23/* 23/*
24 * This file includes UBI initialization and building of UBI devices. At the 24 * This file includes UBI initialization and building of UBI devices.
25 * moment UBI devices may only be added while UBI is initialized, but dynamic 25 *
26 * device add/remove functionality is planned. Also, at the moment we only 26 * When UBI is initialized, it attaches all the MTD devices specified as the
27 * attach UBI devices by scanning, which will become a bottleneck when flashes 27 * module load parameters or the kernel boot parameters. If MTD devices were
28 * reach certain large size. Then one may improve UBI and add other methods. 28 * specified, UBI does not attach any MTD device, but it is possible to do
29 * later using the "UBI control device".
30 *
31 * At the moment we only attach UBI devices by scanning, which will become a
32 * bottleneck when flashes reach certain large size. Then one may improve UBI
33 * and add other methods, although it does not seem to be easy to do.
29 */ 34 */
30 35
31#include <linux/err.h> 36#include <linux/err.h>
@@ -33,7 +38,9 @@
33#include <linux/moduleparam.h> 38#include <linux/moduleparam.h>
34#include <linux/stringify.h> 39#include <linux/stringify.h>
35#include <linux/stat.h> 40#include <linux/stat.h>
41#include <linux/miscdevice.h>
36#include <linux/log2.h> 42#include <linux/log2.h>
43#include <linux/kthread.h>
37#include "ubi.h" 44#include "ubi.h"
38 45
39/* Maximum length of the 'mtd=' parameter */ 46/* Maximum length of the 'mtd=' parameter */
@@ -43,13 +50,11 @@
43 * struct mtd_dev_param - MTD device parameter description data structure. 50 * struct mtd_dev_param - MTD device parameter description data structure.
44 * @name: MTD device name or number string 51 * @name: MTD device name or number string
45 * @vid_hdr_offs: VID header offset 52 * @vid_hdr_offs: VID header offset
46 * @data_offs: data offset
47 */ 53 */
48struct mtd_dev_param 54struct mtd_dev_param
49{ 55{
50 char name[MTD_PARAM_LEN_MAX]; 56 char name[MTD_PARAM_LEN_MAX];
51 int vid_hdr_offs; 57 int vid_hdr_offs;
52 int data_offs;
53}; 58};
54 59
55/* Numbers of elements set in the @mtd_dev_param array */ 60/* Numbers of elements set in the @mtd_dev_param array */
@@ -58,14 +63,27 @@ static int mtd_devs = 0;
58/* MTD devices specification parameters */ 63/* MTD devices specification parameters */
59static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES]; 64static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES];
60 65
61/* Number of UBI devices in system */ 66/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
62int ubi_devices_cnt; 67struct class *ubi_class;
68
69/* Slab cache for wear-leveling entries */
70struct kmem_cache *ubi_wl_entry_slab;
71
72/* UBI control character device */
73static struct miscdevice ubi_ctrl_cdev = {
74 .minor = MISC_DYNAMIC_MINOR,
75 .name = "ubi_ctrl",
76 .fops = &ubi_ctrl_cdev_operations,
77};
63 78
64/* All UBI devices in system */ 79/* All UBI devices in system */
65struct ubi_device *ubi_devices[UBI_MAX_DEVICES]; 80static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
66 81
67/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ 82/* Serializes UBI devices creations and removals */
68struct class *ubi_class; 83DEFINE_MUTEX(ubi_devices_mutex);
84
85/* Protects @ubi_devices and @ubi->ref_count */
86static DEFINE_SPINLOCK(ubi_devices_lock);
69 87
70/* "Show" method for files in '/<sysfs>/class/ubi/' */ 88/* "Show" method for files in '/<sysfs>/class/ubi/' */
71static ssize_t ubi_version_show(struct class *class, char *buf) 89static ssize_t ubi_version_show(struct class *class, char *buf)
@@ -101,38 +119,150 @@ static struct device_attribute dev_min_io_size =
101 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL); 119 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
102static struct device_attribute dev_bgt_enabled = 120static struct device_attribute dev_bgt_enabled =
103 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL); 121 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
122static struct device_attribute dev_mtd_num =
123 __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
124
125/**
126 * ubi_get_device - get UBI device.
127 * @ubi_num: UBI device number
128 *
129 * This function returns UBI device description object for UBI device number
130 * @ubi_num, or %NULL if the device does not exist. This function increases the
131 * device reference count to prevent removal of the device. In other words, the
132 * device cannot be removed if its reference count is not zero.
133 */
134struct ubi_device *ubi_get_device(int ubi_num)
135{
136 struct ubi_device *ubi;
137
138 spin_lock(&ubi_devices_lock);
139 ubi = ubi_devices[ubi_num];
140 if (ubi) {
141 ubi_assert(ubi->ref_count >= 0);
142 ubi->ref_count += 1;
143 get_device(&ubi->dev);
144 }
145 spin_unlock(&ubi_devices_lock);
146
147 return ubi;
148}
149
150/**
151 * ubi_put_device - drop an UBI device reference.
152 * @ubi: UBI device description object
153 */
154void ubi_put_device(struct ubi_device *ubi)
155{
156 spin_lock(&ubi_devices_lock);
157 ubi->ref_count -= 1;
158 put_device(&ubi->dev);
159 spin_unlock(&ubi_devices_lock);
160}
161
162/**
163 * ubi_get_by_major - get UBI device description object by character device
164 * major number.
165 * @major: major number
166 *
167 * This function is similar to 'ubi_get_device()', but it searches the device
168 * by its major number.
169 */
170struct ubi_device *ubi_get_by_major(int major)
171{
172 int i;
173 struct ubi_device *ubi;
174
175 spin_lock(&ubi_devices_lock);
176 for (i = 0; i < UBI_MAX_DEVICES; i++) {
177 ubi = ubi_devices[i];
178 if (ubi && MAJOR(ubi->cdev.dev) == major) {
179 ubi_assert(ubi->ref_count >= 0);
180 ubi->ref_count += 1;
181 get_device(&ubi->dev);
182 spin_unlock(&ubi_devices_lock);
183 return ubi;
184 }
185 }
186 spin_unlock(&ubi_devices_lock);
187
188 return NULL;
189}
190
191/**
192 * ubi_major2num - get UBI device number by character device major number.
193 * @major: major number
194 *
195 * This function searches UBI device number object by its major number. If UBI
196 * device was not found, this function returns -ENODEV, otherwise the UBI device
197 * number is returned.
198 */
199int ubi_major2num(int major)
200{
201 int i, ubi_num = -ENODEV;
202
203 spin_lock(&ubi_devices_lock);
204 for (i = 0; i < UBI_MAX_DEVICES; i++) {
205 struct ubi_device *ubi = ubi_devices[i];
206
207 if (ubi && MAJOR(ubi->cdev.dev) == major) {
208 ubi_num = ubi->ubi_num;
209 break;
210 }
211 }
212 spin_unlock(&ubi_devices_lock);
213
214 return ubi_num;
215}
104 216
105/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */ 217/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
106static ssize_t dev_attribute_show(struct device *dev, 218static ssize_t dev_attribute_show(struct device *dev,
107 struct device_attribute *attr, char *buf) 219 struct device_attribute *attr, char *buf)
108{ 220{
109 const struct ubi_device *ubi; 221 ssize_t ret;
222 struct ubi_device *ubi;
110 223
224 /*
225 * The below code looks weird, but it actually makes sense. We get the
226 * UBI device reference from the contained 'struct ubi_device'. But it
227 * is unclear if the device was removed or not yet. Indeed, if the
228 * device was removed before we increased its reference count,
229 * 'ubi_get_device()' will return -ENODEV and we fail.
230 *
231 * Remember, 'struct ubi_device' is freed in the release function, so
232 * we still can use 'ubi->ubi_num'.
233 */
111 ubi = container_of(dev, struct ubi_device, dev); 234 ubi = container_of(dev, struct ubi_device, dev);
235 ubi = ubi_get_device(ubi->ubi_num);
236 if (!ubi)
237 return -ENODEV;
238
112 if (attr == &dev_eraseblock_size) 239 if (attr == &dev_eraseblock_size)
113 return sprintf(buf, "%d\n", ubi->leb_size); 240 ret = sprintf(buf, "%d\n", ubi->leb_size);
114 else if (attr == &dev_avail_eraseblocks) 241 else if (attr == &dev_avail_eraseblocks)
115 return sprintf(buf, "%d\n", ubi->avail_pebs); 242 ret = sprintf(buf, "%d\n", ubi->avail_pebs);
116 else if (attr == &dev_total_eraseblocks) 243 else if (attr == &dev_total_eraseblocks)
117 return sprintf(buf, "%d\n", ubi->good_peb_count); 244 ret = sprintf(buf, "%d\n", ubi->good_peb_count);
118 else if (attr == &dev_volumes_count) 245 else if (attr == &dev_volumes_count)
119 return sprintf(buf, "%d\n", ubi->vol_count); 246 ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
120 else if (attr == &dev_max_ec) 247 else if (attr == &dev_max_ec)
121 return sprintf(buf, "%d\n", ubi->max_ec); 248 ret = sprintf(buf, "%d\n", ubi->max_ec);
122 else if (attr == &dev_reserved_for_bad) 249 else if (attr == &dev_reserved_for_bad)
123 return sprintf(buf, "%d\n", ubi->beb_rsvd_pebs); 250 ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
124 else if (attr == &dev_bad_peb_count) 251 else if (attr == &dev_bad_peb_count)
125 return sprintf(buf, "%d\n", ubi->bad_peb_count); 252 ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
126 else if (attr == &dev_max_vol_count) 253 else if (attr == &dev_max_vol_count)
127 return sprintf(buf, "%d\n", ubi->vtbl_slots); 254 ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
128 else if (attr == &dev_min_io_size) 255 else if (attr == &dev_min_io_size)
129 return sprintf(buf, "%d\n", ubi->min_io_size); 256 ret = sprintf(buf, "%d\n", ubi->min_io_size);
130 else if (attr == &dev_bgt_enabled) 257 else if (attr == &dev_bgt_enabled)
131 return sprintf(buf, "%d\n", ubi->thread_enabled); 258 ret = sprintf(buf, "%d\n", ubi->thread_enabled);
259 else if (attr == &dev_mtd_num)
260 ret = sprintf(buf, "%d\n", ubi->mtd->index);
132 else 261 else
133 BUG(); 262 ret = -EINVAL;
134 263
135 return 0; 264 ubi_put_device(ubi);
265 return ret;
136} 266}
137 267
138/* Fake "release" method for UBI devices */ 268/* Fake "release" method for UBI devices */
@@ -150,68 +280,44 @@ static int ubi_sysfs_init(struct ubi_device *ubi)
150 int err; 280 int err;
151 281
152 ubi->dev.release = dev_release; 282 ubi->dev.release = dev_release;
153 ubi->dev.devt = MKDEV(ubi->major, 0); 283 ubi->dev.devt = ubi->cdev.dev;
154 ubi->dev.class = ubi_class; 284 ubi->dev.class = ubi_class;
155 sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num); 285 sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num);
156 err = device_register(&ubi->dev); 286 err = device_register(&ubi->dev);
157 if (err) 287 if (err)
158 goto out; 288 return err;
159 289
160 err = device_create_file(&ubi->dev, &dev_eraseblock_size); 290 err = device_create_file(&ubi->dev, &dev_eraseblock_size);
161 if (err) 291 if (err)
162 goto out_unregister; 292 return err;
163 err = device_create_file(&ubi->dev, &dev_avail_eraseblocks); 293 err = device_create_file(&ubi->dev, &dev_avail_eraseblocks);
164 if (err) 294 if (err)
165 goto out_eraseblock_size; 295 return err;
166 err = device_create_file(&ubi->dev, &dev_total_eraseblocks); 296 err = device_create_file(&ubi->dev, &dev_total_eraseblocks);
167 if (err) 297 if (err)
168 goto out_avail_eraseblocks; 298 return err;
169 err = device_create_file(&ubi->dev, &dev_volumes_count); 299 err = device_create_file(&ubi->dev, &dev_volumes_count);
170 if (err) 300 if (err)
171 goto out_total_eraseblocks; 301 return err;
172 err = device_create_file(&ubi->dev, &dev_max_ec); 302 err = device_create_file(&ubi->dev, &dev_max_ec);
173 if (err) 303 if (err)
174 goto out_volumes_count; 304 return err;
175 err = device_create_file(&ubi->dev, &dev_reserved_for_bad); 305 err = device_create_file(&ubi->dev, &dev_reserved_for_bad);
176 if (err) 306 if (err)
177 goto out_volumes_max_ec; 307 return err;
178 err = device_create_file(&ubi->dev, &dev_bad_peb_count); 308 err = device_create_file(&ubi->dev, &dev_bad_peb_count);
179 if (err) 309 if (err)
180 goto out_reserved_for_bad; 310 return err;
181 err = device_create_file(&ubi->dev, &dev_max_vol_count); 311 err = device_create_file(&ubi->dev, &dev_max_vol_count);
182 if (err) 312 if (err)
183 goto out_bad_peb_count; 313 return err;
184 err = device_create_file(&ubi->dev, &dev_min_io_size); 314 err = device_create_file(&ubi->dev, &dev_min_io_size);
185 if (err) 315 if (err)
186 goto out_max_vol_count; 316 return err;
187 err = device_create_file(&ubi->dev, &dev_bgt_enabled); 317 err = device_create_file(&ubi->dev, &dev_bgt_enabled);
188 if (err) 318 if (err)
189 goto out_min_io_size; 319 return err;
190 320 err = device_create_file(&ubi->dev, &dev_mtd_num);
191 return 0;
192
193out_min_io_size:
194 device_remove_file(&ubi->dev, &dev_min_io_size);
195out_max_vol_count:
196 device_remove_file(&ubi->dev, &dev_max_vol_count);
197out_bad_peb_count:
198 device_remove_file(&ubi->dev, &dev_bad_peb_count);
199out_reserved_for_bad:
200 device_remove_file(&ubi->dev, &dev_reserved_for_bad);
201out_volumes_max_ec:
202 device_remove_file(&ubi->dev, &dev_max_ec);
203out_volumes_count:
204 device_remove_file(&ubi->dev, &dev_volumes_count);
205out_total_eraseblocks:
206 device_remove_file(&ubi->dev, &dev_total_eraseblocks);
207out_avail_eraseblocks:
208 device_remove_file(&ubi->dev, &dev_avail_eraseblocks);
209out_eraseblock_size:
210 device_remove_file(&ubi->dev, &dev_eraseblock_size);
211out_unregister:
212 device_unregister(&ubi->dev);
213out:
214 ubi_err("failed to initialize sysfs for %s", ubi->ubi_name);
215 return err; 321 return err;
216} 322}
217 323
@@ -221,6 +327,7 @@ out:
221 */ 327 */
222static void ubi_sysfs_close(struct ubi_device *ubi) 328static void ubi_sysfs_close(struct ubi_device *ubi)
223{ 329{
330 device_remove_file(&ubi->dev, &dev_mtd_num);
224 device_remove_file(&ubi->dev, &dev_bgt_enabled); 331 device_remove_file(&ubi->dev, &dev_bgt_enabled);
225 device_remove_file(&ubi->dev, &dev_min_io_size); 332 device_remove_file(&ubi->dev, &dev_min_io_size);
226 device_remove_file(&ubi->dev, &dev_max_vol_count); 333 device_remove_file(&ubi->dev, &dev_max_vol_count);
@@ -244,7 +351,7 @@ static void kill_volumes(struct ubi_device *ubi)
244 351
245 for (i = 0; i < ubi->vtbl_slots; i++) 352 for (i = 0; i < ubi->vtbl_slots; i++)
246 if (ubi->volumes[i]) 353 if (ubi->volumes[i])
247 ubi_free_volume(ubi, i); 354 ubi_free_volume(ubi, ubi->volumes[i]);
248} 355}
249 356
250/** 357/**
@@ -259,9 +366,6 @@ static int uif_init(struct ubi_device *ubi)
259 int i, err; 366 int i, err;
260 dev_t dev; 367 dev_t dev;
261 368
262 mutex_init(&ubi->vtbl_mutex);
263 spin_lock_init(&ubi->volumes_lock);
264
265 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); 369 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
266 370
267 /* 371 /*
@@ -278,39 +382,40 @@ static int uif_init(struct ubi_device *ubi)
278 return err; 382 return err;
279 } 383 }
280 384
385 ubi_assert(MINOR(dev) == 0);
281 cdev_init(&ubi->cdev, &ubi_cdev_operations); 386 cdev_init(&ubi->cdev, &ubi_cdev_operations);
282 ubi->major = MAJOR(dev); 387 dbg_msg("%s major is %u", ubi->ubi_name, MAJOR(dev));
283 dbg_msg("%s major is %u", ubi->ubi_name, ubi->major);
284 ubi->cdev.owner = THIS_MODULE; 388 ubi->cdev.owner = THIS_MODULE;
285 389
286 dev = MKDEV(ubi->major, 0);
287 err = cdev_add(&ubi->cdev, dev, 1); 390 err = cdev_add(&ubi->cdev, dev, 1);
288 if (err) { 391 if (err) {
289 ubi_err("cannot add character device %s", ubi->ubi_name); 392 ubi_err("cannot add character device");
290 goto out_unreg; 393 goto out_unreg;
291 } 394 }
292 395
293 err = ubi_sysfs_init(ubi); 396 err = ubi_sysfs_init(ubi);
294 if (err) 397 if (err)
295 goto out_cdev; 398 goto out_sysfs;
296 399
297 for (i = 0; i < ubi->vtbl_slots; i++) 400 for (i = 0; i < ubi->vtbl_slots; i++)
298 if (ubi->volumes[i]) { 401 if (ubi->volumes[i]) {
299 err = ubi_add_volume(ubi, i); 402 err = ubi_add_volume(ubi, ubi->volumes[i]);
300 if (err) 403 if (err) {
404 ubi_err("cannot add volume %d", i);
301 goto out_volumes; 405 goto out_volumes;
406 }
302 } 407 }
303 408
304 return 0; 409 return 0;
305 410
306out_volumes: 411out_volumes:
307 kill_volumes(ubi); 412 kill_volumes(ubi);
413out_sysfs:
308 ubi_sysfs_close(ubi); 414 ubi_sysfs_close(ubi);
309out_cdev:
310 cdev_del(&ubi->cdev); 415 cdev_del(&ubi->cdev);
311out_unreg: 416out_unreg:
312 unregister_chrdev_region(MKDEV(ubi->major, 0), 417 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
313 ubi->vtbl_slots + 1); 418 ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err);
314 return err; 419 return err;
315} 420}
316 421
@@ -323,7 +428,7 @@ static void uif_close(struct ubi_device *ubi)
323 kill_volumes(ubi); 428 kill_volumes(ubi);
324 ubi_sysfs_close(ubi); 429 ubi_sysfs_close(ubi);
325 cdev_del(&ubi->cdev); 430 cdev_del(&ubi->cdev);
326 unregister_chrdev_region(MKDEV(ubi->major, 0), ubi->vtbl_slots + 1); 431 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
327} 432}
328 433
329/** 434/**
@@ -384,9 +489,9 @@ out_si:
384 * assumed: 489 * assumed:
385 * o EC header is always at offset zero - this cannot be changed; 490 * o EC header is always at offset zero - this cannot be changed;
386 * o VID header starts just after the EC header at the closest address 491 * o VID header starts just after the EC header at the closest address
387 * aligned to @io->@hdrs_min_io_size; 492 * aligned to @io->hdrs_min_io_size;
388 * o data starts just after the VID header at the closest address aligned to 493 * o data starts just after the VID header at the closest address aligned to
389 * @io->@min_io_size 494 * @io->min_io_size
390 * 495 *
391 * This function returns zero in case of success and a negative error code in 496 * This function returns zero in case of success and a negative error code in
392 * case of failure. 497 * case of failure.
@@ -407,6 +512,9 @@ static int io_init(struct ubi_device *ubi)
407 return -EINVAL; 512 return -EINVAL;
408 } 513 }
409 514
515 if (ubi->vid_hdr_offset < 0)
516 return -EINVAL;
517
410 /* 518 /*
411 * Note, in this implementation we support MTD devices with 0x7FFFFFFF 519 * Note, in this implementation we support MTD devices with 0x7FFFFFFF
412 * physical eraseblocks maximum. 520 * physical eraseblocks maximum.
@@ -424,7 +532,8 @@ static int io_init(struct ubi_device *ubi)
424 532
425 /* Make sure minimal I/O unit is power of 2 */ 533 /* Make sure minimal I/O unit is power of 2 */
426 if (!is_power_of_2(ubi->min_io_size)) { 534 if (!is_power_of_2(ubi->min_io_size)) {
427 ubi_err("bad min. I/O unit"); 535 ubi_err("min. I/O unit (%d) is not power of 2",
536 ubi->min_io_size);
428 return -EINVAL; 537 return -EINVAL;
429 } 538 }
430 539
@@ -453,10 +562,8 @@ static int io_init(struct ubi_device *ubi)
453 } 562 }
454 563
455 /* Similar for the data offset */ 564 /* Similar for the data offset */
456 if (ubi->leb_start == 0) { 565 ubi->leb_start = ubi->vid_hdr_offset + UBI_EC_HDR_SIZE;
457 ubi->leb_start = ubi->vid_hdr_offset + ubi->vid_hdr_alsize; 566 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
458 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
459 }
460 567
461 dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset); 568 dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset);
462 dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset); 569 dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
@@ -514,76 +621,147 @@ static int io_init(struct ubi_device *ubi)
514} 621}
515 622
516/** 623/**
517 * attach_mtd_dev - attach an MTD device. 624 * autoresize - re-size the volume which has the "auto-resize" flag set.
518 * @mtd_dev: MTD device name or number string 625 * @ubi: UBI device description object
519 * @vid_hdr_offset: VID header offset 626 * @vol_id: ID of the volume to re-size
520 * @data_offset: data offset
521 * 627 *
522 * This function attaches an MTD device to UBI. It first treats @mtd_dev as the 628 * This function re-sizes the volume marked by the @UBI_VTBL_AUTORESIZE_FLG in
523 * MTD device name, and tries to open it by this name. If it is unable to open, 629 * the volume table to the largest possible size. See comments in ubi-header.h
524 * it tries to convert @mtd_dev to an integer and open the MTD device by its 630 * for more description of the flag. Returns zero in case of success and a
525 * number. Returns zero in case of success and a negative error code in case of 631 * negative error code in case of failure.
526 * failure.
527 */ 632 */
528static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset, 633static int autoresize(struct ubi_device *ubi, int vol_id)
529 int data_offset)
530{ 634{
531 struct ubi_device *ubi; 635 struct ubi_volume_desc desc;
532 struct mtd_info *mtd; 636 struct ubi_volume *vol = ubi->volumes[vol_id];
533 int i, err; 637 int err, old_reserved_pebs = vol->reserved_pebs;
534 638
535 mtd = get_mtd_device_nm(mtd_dev); 639 /*
536 if (IS_ERR(mtd)) { 640 * Clear the auto-resize flag in the volume in-memory copy of the
537 int mtd_num; 641 * volume table, and 'ubi_resize_volume()' will propogate this change
538 char *endp; 642 * to the flash.
643 */
644 ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
539 645
540 if (PTR_ERR(mtd) != -ENODEV) 646 if (ubi->avail_pebs == 0) {
541 return PTR_ERR(mtd); 647 struct ubi_vtbl_record vtbl_rec;
542 648
543 /* 649 /*
544 * Probably this is not MTD device name but MTD device number - 650 * No avalilable PEBs to re-size the volume, clear the flag on
545 * check this out. 651 * flash and exit.
546 */ 652 */
547 mtd_num = simple_strtoul(mtd_dev, &endp, 0); 653 memcpy(&vtbl_rec, &ubi->vtbl[vol_id],
548 if (*endp != '\0' || mtd_dev == endp) { 654 sizeof(struct ubi_vtbl_record));
549 ubi_err("incorrect MTD device: \"%s\"", mtd_dev); 655 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
550 return -ENODEV; 656 if (err)
551 } 657 ubi_err("cannot clean auto-resize flag for volume %d",
552 658 vol_id);
553 mtd = get_mtd_device(NULL, mtd_num); 659 } else {
554 if (IS_ERR(mtd)) 660 desc.vol = vol;
555 return PTR_ERR(mtd); 661 err = ubi_resize_volume(&desc,
662 old_reserved_pebs + ubi->avail_pebs);
663 if (err)
664 ubi_err("cannot auto-resize volume %d", vol_id);
556 } 665 }
557 666
558 /* Check if we already have the same MTD device attached */ 667 if (err)
559 for (i = 0; i < ubi_devices_cnt; i++) 668 return err;
560 if (ubi_devices[i]->mtd->index == mtd->index) { 669
561 ubi_err("mtd%d is already attached to ubi%d", 670 ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id,
671 vol->name, old_reserved_pebs, vol->reserved_pebs);
672 return 0;
673}
674
675/**
676 * ubi_attach_mtd_dev - attach an MTD device.
677 * @mtd_dev: MTD device description object
678 * @ubi_num: number to assign to the new UBI device
679 * @vid_hdr_offset: VID header offset
680 *
681 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
682 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
683 * which case this function finds a vacant device nubert and assings it
684 * automatically. Returns the new UBI device number in case of success and a
685 * negative error code in case of failure.
686 *
687 * Note, the invocations of this function has to be serialized by the
688 * @ubi_devices_mutex.
689 */
690int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
691{
692 struct ubi_device *ubi;
693 int i, err;
694
695 /*
696 * Check if we already have the same MTD device attached.
697 *
698 * Note, this function assumes that UBI devices creations and deletions
699 * are serialized, so it does not take the &ubi_devices_lock.
700 */
701 for (i = 0; i < UBI_MAX_DEVICES; i++) {
702 ubi = ubi_devices[i];
703 if (ubi && mtd->index == ubi->mtd->index) {
704 dbg_err("mtd%d is already attached to ubi%d",
562 mtd->index, i); 705 mtd->index, i);
563 err = -EINVAL; 706 return -EEXIST;
564 goto out_mtd;
565 } 707 }
708 }
566 709
567 ubi = ubi_devices[ubi_devices_cnt] = kzalloc(sizeof(struct ubi_device), 710 /*
568 GFP_KERNEL); 711 * Make sure this MTD device is not emulated on top of an UBI volume
569 if (!ubi) { 712 * already. Well, generally this recursion works fine, but there are
570 err = -ENOMEM; 713 * different problems like the UBI module takes a reference to itself
571 goto out_mtd; 714 * by attaching (and thus, opening) the emulated MTD device. This
715 * results in inability to unload the module. And in general it makes
716 * no sense to attach emulated MTD devices, so we prohibit this.
717 */
718 if (mtd->type == MTD_UBIVOLUME) {
719 ubi_err("refuse attaching mtd%d - it is already emulated on "
720 "top of UBI", mtd->index);
721 return -EINVAL;
572 } 722 }
573 723
574 ubi->ubi_num = ubi_devices_cnt; 724 if (ubi_num == UBI_DEV_NUM_AUTO) {
575 ubi->mtd = mtd; 725 /* Search for an empty slot in the @ubi_devices array */
726 for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
727 if (!ubi_devices[ubi_num])
728 break;
729 if (ubi_num == UBI_MAX_DEVICES) {
730 dbg_err("only %d UBI devices may be created", UBI_MAX_DEVICES);
731 return -ENFILE;
732 }
733 } else {
734 if (ubi_num >= UBI_MAX_DEVICES)
735 return -EINVAL;
736
737 /* Make sure ubi_num is not busy */
738 if (ubi_devices[ubi_num]) {
739 dbg_err("ubi%d already exists", ubi_num);
740 return -EEXIST;
741 }
742 }
576 743
577 dbg_msg("attaching mtd%d to ubi%d: VID header offset %d data offset %d", 744 ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
578 ubi->mtd->index, ubi_devices_cnt, vid_hdr_offset, data_offset); 745 if (!ubi)
746 return -ENOMEM;
579 747
748 ubi->mtd = mtd;
749 ubi->ubi_num = ubi_num;
580 ubi->vid_hdr_offset = vid_hdr_offset; 750 ubi->vid_hdr_offset = vid_hdr_offset;
581 ubi->leb_start = data_offset; 751 ubi->autoresize_vol_id = -1;
752
753 mutex_init(&ubi->buf_mutex);
754 mutex_init(&ubi->ckvol_mutex);
755 mutex_init(&ubi->volumes_mutex);
756 spin_lock_init(&ubi->volumes_lock);
757
758 dbg_msg("attaching mtd%d to ubi%d: VID header offset %d",
759 mtd->index, ubi_num, vid_hdr_offset);
760
582 err = io_init(ubi); 761 err = io_init(ubi);
583 if (err) 762 if (err)
584 goto out_free; 763 goto out_free;
585 764
586 mutex_init(&ubi->buf_mutex);
587 ubi->peb_buf1 = vmalloc(ubi->peb_size); 765 ubi->peb_buf1 = vmalloc(ubi->peb_size);
588 if (!ubi->peb_buf1) 766 if (!ubi->peb_buf1)
589 goto out_free; 767 goto out_free;
@@ -605,12 +783,26 @@ static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset,
605 goto out_free; 783 goto out_free;
606 } 784 }
607 785
786 if (ubi->autoresize_vol_id != -1) {
787 err = autoresize(ubi, ubi->autoresize_vol_id);
788 if (err)
789 goto out_detach;
790 }
791
608 err = uif_init(ubi); 792 err = uif_init(ubi);
609 if (err) 793 if (err)
610 goto out_detach; 794 goto out_detach;
611 795
612 ubi_msg("attached mtd%d to ubi%d", ubi->mtd->index, ubi_devices_cnt); 796 ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
613 ubi_msg("MTD device name: \"%s\"", ubi->mtd->name); 797 if (IS_ERR(ubi->bgt_thread)) {
798 err = PTR_ERR(ubi->bgt_thread);
799 ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
800 err);
801 goto out_uif;
802 }
803
804 ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num);
805 ubi_msg("MTD device name: \"%s\"", mtd->name);
614 ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20); 806 ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20);
615 ubi_msg("physical eraseblock size: %d bytes (%d KiB)", 807 ubi_msg("physical eraseblock size: %d bytes (%d KiB)",
616 ubi->peb_size, ubi->peb_size >> 10); 808 ubi->peb_size, ubi->peb_size >> 10);
@@ -638,9 +830,11 @@ static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset,
638 wake_up_process(ubi->bgt_thread); 830 wake_up_process(ubi->bgt_thread);
639 } 831 }
640 832
641 ubi_devices_cnt += 1; 833 ubi_devices[ubi_num] = ubi;
642 return 0; 834 return ubi_num;
643 835
836out_uif:
837 uif_close(ubi);
644out_detach: 838out_detach:
645 ubi_eba_close(ubi); 839 ubi_eba_close(ubi);
646 ubi_wl_close(ubi); 840 ubi_wl_close(ubi);
@@ -652,21 +846,58 @@ out_free:
652 vfree(ubi->dbg_peb_buf); 846 vfree(ubi->dbg_peb_buf);
653#endif 847#endif
654 kfree(ubi); 848 kfree(ubi);
655out_mtd:
656 put_mtd_device(mtd);
657 ubi_devices[ubi_devices_cnt] = NULL;
658 return err; 849 return err;
659} 850}
660 851
661/** 852/**
662 * detach_mtd_dev - detach an MTD device. 853 * ubi_detach_mtd_dev - detach an MTD device.
663 * @ubi: UBI device description object 854 * @ubi_num: UBI device number to detach from
855 * @anyway: detach MTD even if device reference count is not zero
856 *
857 * This function destroys an UBI device number @ubi_num and detaches the
858 * underlying MTD device. Returns zero in case of success and %-EBUSY if the
859 * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not
860 * exist.
861 *
862 * Note, the invocations of this function has to be serialized by the
863 * @ubi_devices_mutex.
664 */ 864 */
665static void detach_mtd_dev(struct ubi_device *ubi) 865int ubi_detach_mtd_dev(int ubi_num, int anyway)
666{ 866{
667 int ubi_num = ubi->ubi_num, mtd_num = ubi->mtd->index; 867 struct ubi_device *ubi;
868
869 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
870 return -EINVAL;
871
872 spin_lock(&ubi_devices_lock);
873 ubi = ubi_devices[ubi_num];
874 if (!ubi) {
875 spin_unlock(&ubi_devices_lock);
876 return -EINVAL;
877 }
878
879 if (ubi->ref_count) {
880 if (!anyway) {
881 spin_unlock(&ubi_devices_lock);
882 return -EBUSY;
883 }
884 /* This may only happen if there is a bug */
885 ubi_err("%s reference count %d, destroy anyway",
886 ubi->ubi_name, ubi->ref_count);
887 }
888 ubi_devices[ubi_num] = NULL;
889 spin_unlock(&ubi_devices_lock);
668 890
891 ubi_assert(ubi_num == ubi->ubi_num);
669 dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); 892 dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
893
894 /*
895 * Before freeing anything, we have to stop the background thread to
896 * prevent it from doing anything on this device while we are freeing.
897 */
898 if (ubi->bgt_thread)
899 kthread_stop(ubi->bgt_thread);
900
670 uif_close(ubi); 901 uif_close(ubi);
671 ubi_eba_close(ubi); 902 ubi_eba_close(ubi);
672 ubi_wl_close(ubi); 903 ubi_wl_close(ubi);
@@ -677,11 +908,37 @@ static void detach_mtd_dev(struct ubi_device *ubi)
677#ifdef CONFIG_MTD_UBI_DEBUG 908#ifdef CONFIG_MTD_UBI_DEBUG
678 vfree(ubi->dbg_peb_buf); 909 vfree(ubi->dbg_peb_buf);
679#endif 910#endif
680 kfree(ubi_devices[ubi_num]); 911 ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
681 ubi_devices[ubi_num] = NULL; 912 kfree(ubi);
682 ubi_devices_cnt -= 1; 913 return 0;
683 ubi_assert(ubi_devices_cnt >= 0); 914}
684 ubi_msg("mtd%d is detached from ubi%d", mtd_num, ubi_num); 915
916/**
917 * find_mtd_device - open an MTD device by its name or number.
918 * @mtd_dev: name or number of the device
919 *
920 * This function tries to open and MTD device described by @mtd_dev string,
921 * which is first treated as an ASCII number, and if it is not true, it is
922 * treated as MTD device name. Returns MTD device description object in case of
923 * success and a negative error code in case of failure.
924 */
925static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
926{
927 struct mtd_info *mtd;
928 int mtd_num;
929 char *endp;
930
931 mtd_num = simple_strtoul(mtd_dev, &endp, 0);
932 if (*endp != '\0' || mtd_dev == endp) {
933 /*
934 * This does not look like an ASCII integer, probably this is
935 * MTD device name.
936 */
937 mtd = get_mtd_device_nm(mtd_dev);
938 } else
939 mtd = get_mtd_device(NULL, mtd_num);
940
941 return mtd;
685} 942}
686 943
687static int __init ubi_init(void) 944static int __init ubi_init(void)
@@ -693,47 +950,96 @@ static int __init ubi_init(void)
693 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); 950 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
694 951
695 if (mtd_devs > UBI_MAX_DEVICES) { 952 if (mtd_devs > UBI_MAX_DEVICES) {
696 printk("UBI error: too many MTD devices, maximum is %d\n", 953 printk(KERN_ERR "UBI error: too many MTD devices, "
697 UBI_MAX_DEVICES); 954 "maximum is %d\n", UBI_MAX_DEVICES);
698 return -EINVAL; 955 return -EINVAL;
699 } 956 }
700 957
958 /* Create base sysfs directory and sysfs files */
701 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR); 959 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR);
702 if (IS_ERR(ubi_class)) 960 if (IS_ERR(ubi_class)) {
703 return PTR_ERR(ubi_class); 961 err = PTR_ERR(ubi_class);
962 printk(KERN_ERR "UBI error: cannot create UBI class\n");
963 goto out;
964 }
704 965
705 err = class_create_file(ubi_class, &ubi_version); 966 err = class_create_file(ubi_class, &ubi_version);
706 if (err) 967 if (err) {
968 printk(KERN_ERR "UBI error: cannot create sysfs file\n");
707 goto out_class; 969 goto out_class;
970 }
971
972 err = misc_register(&ubi_ctrl_cdev);
973 if (err) {
974 printk(KERN_ERR "UBI error: cannot register device\n");
975 goto out_version;
976 }
977
978 ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
979 sizeof(struct ubi_wl_entry),
980 0, 0, NULL);
981 if (!ubi_wl_entry_slab)
982 goto out_dev_unreg;
708 983
709 /* Attach MTD devices */ 984 /* Attach MTD devices */
710 for (i = 0; i < mtd_devs; i++) { 985 for (i = 0; i < mtd_devs; i++) {
711 struct mtd_dev_param *p = &mtd_dev_param[i]; 986 struct mtd_dev_param *p = &mtd_dev_param[i];
987 struct mtd_info *mtd;
712 988
713 cond_resched(); 989 cond_resched();
714 err = attach_mtd_dev(p->name, p->vid_hdr_offs, p->data_offs); 990
715 if (err) 991 mtd = open_mtd_device(p->name);
992 if (IS_ERR(mtd)) {
993 err = PTR_ERR(mtd);
994 goto out_detach;
995 }
996
997 mutex_lock(&ubi_devices_mutex);
998 err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO,
999 p->vid_hdr_offs);
1000 mutex_unlock(&ubi_devices_mutex);
1001 if (err < 0) {
1002 put_mtd_device(mtd);
1003 printk(KERN_ERR "UBI error: cannot attach %s\n",
1004 p->name);
716 goto out_detach; 1005 goto out_detach;
1006 }
717 } 1007 }
718 1008
719 return 0; 1009 return 0;
720 1010
721out_detach: 1011out_detach:
722 for (k = 0; k < i; k++) 1012 for (k = 0; k < i; k++)
723 detach_mtd_dev(ubi_devices[k]); 1013 if (ubi_devices[k]) {
1014 mutex_lock(&ubi_devices_mutex);
1015 ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
1016 mutex_unlock(&ubi_devices_mutex);
1017 }
1018 kmem_cache_destroy(ubi_wl_entry_slab);
1019out_dev_unreg:
1020 misc_deregister(&ubi_ctrl_cdev);
1021out_version:
724 class_remove_file(ubi_class, &ubi_version); 1022 class_remove_file(ubi_class, &ubi_version);
725out_class: 1023out_class:
726 class_destroy(ubi_class); 1024 class_destroy(ubi_class);
1025out:
1026 printk(KERN_ERR "UBI error: cannot initialize UBI, error %d\n", err);
727 return err; 1027 return err;
728} 1028}
729module_init(ubi_init); 1029module_init(ubi_init);
730 1030
731static void __exit ubi_exit(void) 1031static void __exit ubi_exit(void)
732{ 1032{
733 int i, n = ubi_devices_cnt; 1033 int i;
734 1034
735 for (i = 0; i < n; i++) 1035 for (i = 0; i < UBI_MAX_DEVICES; i++)
736 detach_mtd_dev(ubi_devices[i]); 1036 if (ubi_devices[i]) {
1037 mutex_lock(&ubi_devices_mutex);
1038 ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
1039 mutex_unlock(&ubi_devices_mutex);
1040 }
1041 kmem_cache_destroy(ubi_wl_entry_slab);
1042 misc_deregister(&ubi_ctrl_cdev);
737 class_remove_file(ubi_class, &ubi_version); 1043 class_remove_file(ubi_class, &ubi_version);
738 class_destroy(ubi_class); 1044 class_destroy(ubi_class);
739} 1045}
@@ -754,7 +1060,8 @@ static int __init bytes_str_to_int(const char *str)
754 1060
755 result = simple_strtoul(str, &endp, 0); 1061 result = simple_strtoul(str, &endp, 0);
756 if (str == endp || result < 0) { 1062 if (str == endp || result < 0) {
757 printk("UBI error: incorrect bytes count: \"%s\"\n", str); 1063 printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
1064 str);
758 return -EINVAL; 1065 return -EINVAL;
759 } 1066 }
760 1067
@@ -764,15 +1071,14 @@ static int __init bytes_str_to_int(const char *str)
764 case 'M': 1071 case 'M':
765 result *= 1024; 1072 result *= 1024;
766 case 'K': 1073 case 'K':
767 case 'k':
768 result *= 1024; 1074 result *= 1024;
769 if (endp[1] == 'i' && (endp[2] == '\0' || 1075 if (endp[1] == 'i' && endp[2] == 'B')
770 endp[2] == 'B' || endp[2] == 'b'))
771 endp += 2; 1076 endp += 2;
772 case '\0': 1077 case '\0':
773 break; 1078 break;
774 default: 1079 default:
775 printk("UBI error: incorrect bytes count: \"%s\"\n", str); 1080 printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
1081 str);
776 return -EINVAL; 1082 return -EINVAL;
777 } 1083 }
778 1084
@@ -793,23 +1099,27 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
793 struct mtd_dev_param *p; 1099 struct mtd_dev_param *p;
794 char buf[MTD_PARAM_LEN_MAX]; 1100 char buf[MTD_PARAM_LEN_MAX];
795 char *pbuf = &buf[0]; 1101 char *pbuf = &buf[0];
796 char *tokens[3] = {NULL, NULL, NULL}; 1102 char *tokens[2] = {NULL, NULL};
1103
1104 if (!val)
1105 return -EINVAL;
797 1106
798 if (mtd_devs == UBI_MAX_DEVICES) { 1107 if (mtd_devs == UBI_MAX_DEVICES) {
799 printk("UBI error: too many parameters, max. is %d\n", 1108 printk(KERN_ERR "UBI error: too many parameters, max. is %d\n",
800 UBI_MAX_DEVICES); 1109 UBI_MAX_DEVICES);
801 return -EINVAL; 1110 return -EINVAL;
802 } 1111 }
803 1112
804 len = strnlen(val, MTD_PARAM_LEN_MAX); 1113 len = strnlen(val, MTD_PARAM_LEN_MAX);
805 if (len == MTD_PARAM_LEN_MAX) { 1114 if (len == MTD_PARAM_LEN_MAX) {
806 printk("UBI error: parameter \"%s\" is too long, max. is %d\n", 1115 printk(KERN_ERR "UBI error: parameter \"%s\" is too long, "
807 val, MTD_PARAM_LEN_MAX); 1116 "max. is %d\n", val, MTD_PARAM_LEN_MAX);
808 return -EINVAL; 1117 return -EINVAL;
809 } 1118 }
810 1119
811 if (len == 0) { 1120 if (len == 0) {
812 printk("UBI warning: empty 'mtd=' parameter - ignored\n"); 1121 printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - "
1122 "ignored\n");
813 return 0; 1123 return 0;
814 } 1124 }
815 1125
@@ -819,11 +1129,12 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
819 if (buf[len - 1] == '\n') 1129 if (buf[len - 1] == '\n')
820 buf[len - 1] = '\0'; 1130 buf[len - 1] = '\0';
821 1131
822 for (i = 0; i < 3; i++) 1132 for (i = 0; i < 2; i++)
823 tokens[i] = strsep(&pbuf, ","); 1133 tokens[i] = strsep(&pbuf, ",");
824 1134
825 if (pbuf) { 1135 if (pbuf) {
826 printk("UBI error: too many arguments at \"%s\"\n", val); 1136 printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n",
1137 val);
827 return -EINVAL; 1138 return -EINVAL;
828 } 1139 }
829 1140
@@ -832,13 +1143,9 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
832 1143
833 if (tokens[1]) 1144 if (tokens[1])
834 p->vid_hdr_offs = bytes_str_to_int(tokens[1]); 1145 p->vid_hdr_offs = bytes_str_to_int(tokens[1]);
835 if (tokens[2])
836 p->data_offs = bytes_str_to_int(tokens[2]);
837 1146
838 if (p->vid_hdr_offs < 0) 1147 if (p->vid_hdr_offs < 0)
839 return p->vid_hdr_offs; 1148 return p->vid_hdr_offs;
840 if (p->data_offs < 0)
841 return p->data_offs;
842 1149
843 mtd_devs += 1; 1150 mtd_devs += 1;
844 return 0; 1151 return 0;
@@ -846,16 +1153,15 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
846 1153
847module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000); 1154module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
848MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: " 1155MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: "
849 "mtd=<name|num>[,<vid_hdr_offs>,<data_offs>]. " 1156 "mtd=<name|num>[,<vid_hdr_offs>].\n"
850 "Multiple \"mtd\" parameters may be specified.\n" 1157 "Multiple \"mtd\" parameters may be specified.\n"
851 "MTD devices may be specified by their number or name. " 1158 "MTD devices may be specified by their number or name.\n"
852 "Optional \"vid_hdr_offs\" and \"data_offs\" parameters " 1159 "Optional \"vid_hdr_offs\" parameter specifies UBI VID "
853 "specify UBI VID header position and data starting " 1160 "header position and data starting position to be used "
854 "position to be used by UBI.\n" 1161 "by UBI.\n"
855 "Example: mtd=content,1984,2048 mtd=4 - attach MTD device" 1162 "Example: mtd=content,1984 mtd=4 - attach MTD device"
856 "with name content using VID header offset 1984 and data " 1163 "with name \"content\" using VID header offset 1984, and "
857 "start 2048, and MTD device number 4 using default " 1164 "MTD device number 4 with default VID header offset.");
858 "offsets");
859 1165
860MODULE_VERSION(__stringify(UBI_VERSION)); 1166MODULE_VERSION(__stringify(UBI_VERSION));
861MODULE_DESCRIPTION("UBI - Unsorted Block Images"); 1167MODULE_DESCRIPTION("UBI - Unsorted Block Images");
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index fe4da1e96c52..9d6aae5449b6 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -28,6 +28,11 @@
28 * 28 *
29 * Major and minor numbers are assigned dynamically to both UBI and volume 29 * Major and minor numbers are assigned dynamically to both UBI and volume
30 * character devices. 30 * character devices.
31 *
32 * Well, there is the third kind of character devices - the UBI control
33 * character device, which allows to manipulate by UBI devices - create and
34 * delete them. In other words, it is used for attaching and detaching MTD
35 * devices.
31 */ 36 */
32 37
33#include <linux/module.h> 38#include <linux/module.h>
@@ -39,34 +44,6 @@
39#include <asm/div64.h> 44#include <asm/div64.h>
40#include "ubi.h" 45#include "ubi.h"
41 46
42/*
43 * Maximum sequence numbers of UBI and volume character device IOCTLs (direct
44 * logical eraseblock erase is a debug-only feature).
45 */
46#define UBI_CDEV_IOC_MAX_SEQ 2
47#ifndef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO
48#define VOL_CDEV_IOC_MAX_SEQ 1
49#else
50#define VOL_CDEV_IOC_MAX_SEQ 2
51#endif
52
53/**
54 * major_to_device - get UBI device object by character device major number.
55 * @major: major number
56 *
57 * This function returns a pointer to the UBI device object.
58 */
59static struct ubi_device *major_to_device(int major)
60{
61 int i;
62
63 for (i = 0; i < ubi_devices_cnt; i++)
64 if (ubi_devices[i] && ubi_devices[i]->major == major)
65 return ubi_devices[i];
66 BUG();
67 return NULL;
68}
69
70/** 47/**
71 * get_exclusive - get exclusive access to an UBI volume. 48 * get_exclusive - get exclusive access to an UBI volume.
72 * @desc: volume descriptor 49 * @desc: volume descriptor
@@ -124,9 +101,11 @@ static void revoke_exclusive(struct ubi_volume_desc *desc, int mode)
124static int vol_cdev_open(struct inode *inode, struct file *file) 101static int vol_cdev_open(struct inode *inode, struct file *file)
125{ 102{
126 struct ubi_volume_desc *desc; 103 struct ubi_volume_desc *desc;
127 const struct ubi_device *ubi = major_to_device(imajor(inode)); 104 int vol_id = iminor(inode) - 1, mode, ubi_num;
128 int vol_id = iminor(inode) - 1; 105
129 int mode; 106 ubi_num = ubi_major2num(imajor(inode));
107 if (ubi_num < 0)
108 return ubi_num;
130 109
131 if (file->f_mode & FMODE_WRITE) 110 if (file->f_mode & FMODE_WRITE)
132 mode = UBI_READWRITE; 111 mode = UBI_READWRITE;
@@ -135,7 +114,7 @@ static int vol_cdev_open(struct inode *inode, struct file *file)
135 114
136 dbg_msg("open volume %d, mode %d", vol_id, mode); 115 dbg_msg("open volume %d, mode %d", vol_id, mode);
137 116
138 desc = ubi_open_volume(ubi->ubi_num, vol_id, mode); 117 desc = ubi_open_volume(ubi_num, vol_id, mode);
139 if (IS_ERR(desc)) 118 if (IS_ERR(desc))
140 return PTR_ERR(desc); 119 return PTR_ERR(desc);
141 120
@@ -153,8 +132,15 @@ static int vol_cdev_release(struct inode *inode, struct file *file)
153 if (vol->updating) { 132 if (vol->updating) {
154 ubi_warn("update of volume %d not finished, volume is damaged", 133 ubi_warn("update of volume %d not finished, volume is damaged",
155 vol->vol_id); 134 vol->vol_id);
135 ubi_assert(!vol->changing_leb);
156 vol->updating = 0; 136 vol->updating = 0;
157 vfree(vol->upd_buf); 137 vfree(vol->upd_buf);
138 } else if (vol->changing_leb) {
139 dbg_msg("only %lld of %lld bytes received for atomic LEB change"
140 " for volume %d:%d, cancel", vol->upd_received,
141 vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id);
142 vol->changing_leb = 0;
143 vfree(vol->upd_buf);
158 } 144 }
159 145
160 ubi_close_volume(desc); 146 ubi_close_volume(desc);
@@ -205,13 +191,13 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
205 struct ubi_volume_desc *desc = file->private_data; 191 struct ubi_volume_desc *desc = file->private_data;
206 struct ubi_volume *vol = desc->vol; 192 struct ubi_volume *vol = desc->vol;
207 struct ubi_device *ubi = vol->ubi; 193 struct ubi_device *ubi = vol->ubi;
208 int err, lnum, off, len, vol_id = desc->vol->vol_id, tbuf_size; 194 int err, lnum, off, len, tbuf_size;
209 size_t count_save = count; 195 size_t count_save = count;
210 void *tbuf; 196 void *tbuf;
211 uint64_t tmp; 197 uint64_t tmp;
212 198
213 dbg_msg("read %zd bytes from offset %lld of volume %d", 199 dbg_msg("read %zd bytes from offset %lld of volume %d",
214 count, *offp, vol_id); 200 count, *offp, vol->vol_id);
215 201
216 if (vol->updating) { 202 if (vol->updating) {
217 dbg_err("updating"); 203 dbg_err("updating");
@@ -225,7 +211,7 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
225 return 0; 211 return 0;
226 212
227 if (vol->corrupted) 213 if (vol->corrupted)
228 dbg_msg("read from corrupted volume %d", vol_id); 214 dbg_msg("read from corrupted volume %d", vol->vol_id);
229 215
230 if (*offp + count > vol->used_bytes) 216 if (*offp + count > vol->used_bytes)
231 count_save = count = vol->used_bytes - *offp; 217 count_save = count = vol->used_bytes - *offp;
@@ -249,7 +235,7 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
249 if (off + len >= vol->usable_leb_size) 235 if (off + len >= vol->usable_leb_size)
250 len = vol->usable_leb_size - off; 236 len = vol->usable_leb_size - off;
251 237
252 err = ubi_eba_read_leb(ubi, vol_id, lnum, tbuf, off, len, 0); 238 err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0);
253 if (err) 239 if (err)
254 break; 240 break;
255 241
@@ -289,13 +275,13 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
289 struct ubi_volume_desc *desc = file->private_data; 275 struct ubi_volume_desc *desc = file->private_data;
290 struct ubi_volume *vol = desc->vol; 276 struct ubi_volume *vol = desc->vol;
291 struct ubi_device *ubi = vol->ubi; 277 struct ubi_device *ubi = vol->ubi;
292 int lnum, off, len, tbuf_size, vol_id = vol->vol_id, err = 0; 278 int lnum, off, len, tbuf_size, err = 0;
293 size_t count_save = count; 279 size_t count_save = count;
294 char *tbuf; 280 char *tbuf;
295 uint64_t tmp; 281 uint64_t tmp;
296 282
297 dbg_msg("requested: write %zd bytes to offset %lld of volume %u", 283 dbg_msg("requested: write %zd bytes to offset %lld of volume %u",
298 count, *offp, desc->vol->vol_id); 284 count, *offp, vol->vol_id);
299 285
300 if (vol->vol_type == UBI_STATIC_VOLUME) 286 if (vol->vol_type == UBI_STATIC_VOLUME)
301 return -EROFS; 287 return -EROFS;
@@ -339,7 +325,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
339 break; 325 break;
340 } 326 }
341 327
342 err = ubi_eba_write_leb(ubi, vol_id, lnum, tbuf, off, len, 328 err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len,
343 UBI_UNKNOWN); 329 UBI_UNKNOWN);
344 if (err) 330 if (err)
345 break; 331 break;
@@ -372,22 +358,32 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
372 struct ubi_volume *vol = desc->vol; 358 struct ubi_volume *vol = desc->vol;
373 struct ubi_device *ubi = vol->ubi; 359 struct ubi_device *ubi = vol->ubi;
374 360
375 if (!vol->updating) 361 if (!vol->updating && !vol->changing_leb)
376 return vol_cdev_direct_write(file, buf, count, offp); 362 return vol_cdev_direct_write(file, buf, count, offp);
377 363
378 err = ubi_more_update_data(ubi, vol->vol_id, buf, count); 364 if (vol->updating)
365 err = ubi_more_update_data(ubi, vol, buf, count);
366 else
367 err = ubi_more_leb_change_data(ubi, vol, buf, count);
368
379 if (err < 0) { 369 if (err < 0) {
380 ubi_err("cannot write %zd bytes of update data", count); 370 ubi_err("cannot accept more %zd bytes of data, error %d",
371 count, err);
381 return err; 372 return err;
382 } 373 }
383 374
384 if (err) { 375 if (err) {
385 /* 376 /*
386 * Update is finished, @err contains number of actually written 377 * The operation is finished, @err contains number of actually
387 * bytes now. 378 * written bytes.
388 */ 379 */
389 count = err; 380 count = err;
390 381
382 if (vol->changing_leb) {
383 revoke_exclusive(desc, UBI_READWRITE);
384 return count;
385 }
386
391 err = ubi_check_volume(ubi, vol->vol_id); 387 err = ubi_check_volume(ubi, vol->vol_id);
392 if (err < 0) 388 if (err < 0)
393 return err; 389 return err;
@@ -402,7 +398,6 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
402 revoke_exclusive(desc, UBI_READWRITE); 398 revoke_exclusive(desc, UBI_READWRITE);
403 } 399 }
404 400
405 *offp += count;
406 return count; 401 return count;
407} 402}
408 403
@@ -447,11 +442,46 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
447 if (err < 0) 442 if (err < 0)
448 break; 443 break;
449 444
450 err = ubi_start_update(ubi, vol->vol_id, bytes); 445 err = ubi_start_update(ubi, vol, bytes);
451 if (bytes == 0) 446 if (bytes == 0)
452 revoke_exclusive(desc, UBI_READWRITE); 447 revoke_exclusive(desc, UBI_READWRITE);
448 break;
449 }
450
451 /* Atomic logical eraseblock change command */
452 case UBI_IOCEBCH:
453 {
454 struct ubi_leb_change_req req;
455
456 err = copy_from_user(&req, argp,
457 sizeof(struct ubi_leb_change_req));
458 if (err) {
459 err = -EFAULT;
460 break;
461 }
462
463 if (desc->mode == UBI_READONLY ||
464 vol->vol_type == UBI_STATIC_VOLUME) {
465 err = -EROFS;
466 break;
467 }
468
469 /* Validate the request */
470 err = -EINVAL;
471 if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
472 req.bytes < 0 || req.lnum >= vol->usable_leb_size)
473 break;
474 if (req.dtype != UBI_LONGTERM && req.dtype != UBI_SHORTTERM &&
475 req.dtype != UBI_UNKNOWN)
476 break;
477
478 err = get_exclusive(desc);
479 if (err < 0)
480 break;
453 481
454 file->f_pos = 0; 482 err = ubi_start_leb_change(ubi, vol, &req);
483 if (req.bytes == 0)
484 revoke_exclusive(desc, UBI_READWRITE);
455 break; 485 break;
456 } 486 }
457 487
@@ -467,7 +497,8 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
467 break; 497 break;
468 } 498 }
469 499
470 if (desc->mode == UBI_READONLY) { 500 if (desc->mode == UBI_READONLY ||
501 vol->vol_type == UBI_STATIC_VOLUME) {
471 err = -EROFS; 502 err = -EROFS;
472 break; 503 break;
473 } 504 }
@@ -477,13 +508,8 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
477 break; 508 break;
478 } 509 }
479 510
480 if (vol->vol_type != UBI_DYNAMIC_VOLUME) {
481 err = -EROFS;
482 break;
483 }
484
485 dbg_msg("erase LEB %d:%d", vol->vol_id, lnum); 511 dbg_msg("erase LEB %d:%d", vol->vol_id, lnum);
486 err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum); 512 err = ubi_eba_unmap_leb(ubi, vol, lnum);
487 if (err) 513 if (err)
488 break; 514 break;
489 515
@@ -580,9 +606,9 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
580 if (!capable(CAP_SYS_RESOURCE)) 606 if (!capable(CAP_SYS_RESOURCE))
581 return -EPERM; 607 return -EPERM;
582 608
583 ubi = major_to_device(imajor(inode)); 609 ubi = ubi_get_by_major(imajor(inode));
584 if (IS_ERR(ubi)) 610 if (!ubi)
585 return PTR_ERR(ubi); 611 return -ENODEV;
586 612
587 switch (cmd) { 613 switch (cmd) {
588 /* Create volume command */ 614 /* Create volume command */
@@ -591,8 +617,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
591 struct ubi_mkvol_req req; 617 struct ubi_mkvol_req req;
592 618
593 dbg_msg("create volume"); 619 dbg_msg("create volume");
594 err = copy_from_user(&req, argp, 620 err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req));
595 sizeof(struct ubi_mkvol_req));
596 if (err) { 621 if (err) {
597 err = -EFAULT; 622 err = -EFAULT;
598 break; 623 break;
@@ -604,7 +629,9 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
604 629
605 req.name[req.name_len] = '\0'; 630 req.name[req.name_len] = '\0';
606 631
632 mutex_lock(&ubi->volumes_mutex);
607 err = ubi_create_volume(ubi, &req); 633 err = ubi_create_volume(ubi, &req);
634 mutex_unlock(&ubi->volumes_mutex);
608 if (err) 635 if (err)
609 break; 636 break;
610 637
@@ -633,10 +660,16 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
633 break; 660 break;
634 } 661 }
635 662
663 mutex_lock(&ubi->volumes_mutex);
636 err = ubi_remove_volume(desc); 664 err = ubi_remove_volume(desc);
637 if (err) 665 mutex_unlock(&ubi->volumes_mutex);
638 ubi_close_volume(desc);
639 666
667 /*
668 * The volume is deleted (unless an error occurred), and the
669 * 'struct ubi_volume' object will be freed when
670 * 'ubi_close_volume()' will call 'put_device()'.
671 */
672 ubi_close_volume(desc);
640 break; 673 break;
641 } 674 }
642 675
@@ -648,8 +681,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
648 struct ubi_rsvol_req req; 681 struct ubi_rsvol_req req;
649 682
650 dbg_msg("re-size volume"); 683 dbg_msg("re-size volume");
651 err = copy_from_user(&req, argp, 684 err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req));
652 sizeof(struct ubi_rsvol_req));
653 if (err) { 685 if (err) {
654 err = -EFAULT; 686 err = -EFAULT;
655 break; 687 break;
@@ -669,7 +701,9 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
669 pebs = !!do_div(tmp, desc->vol->usable_leb_size); 701 pebs = !!do_div(tmp, desc->vol->usable_leb_size);
670 pebs += tmp; 702 pebs += tmp;
671 703
704 mutex_lock(&ubi->volumes_mutex);
672 err = ubi_resize_volume(desc, pebs); 705 err = ubi_resize_volume(desc, pebs);
706 mutex_unlock(&ubi->volumes_mutex);
673 ubi_close_volume(desc); 707 ubi_close_volume(desc);
674 break; 708 break;
675 } 709 }
@@ -679,9 +713,93 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
679 break; 713 break;
680 } 714 }
681 715
716 ubi_put_device(ubi);
682 return err; 717 return err;
683} 718}
684 719
720static int ctrl_cdev_ioctl(struct inode *inode, struct file *file,
721 unsigned int cmd, unsigned long arg)
722{
723 int err = 0;
724 void __user *argp = (void __user *)arg;
725
726 if (!capable(CAP_SYS_RESOURCE))
727 return -EPERM;
728
729 switch (cmd) {
730 /* Attach an MTD device command */
731 case UBI_IOCATT:
732 {
733 struct ubi_attach_req req;
734 struct mtd_info *mtd;
735
736 dbg_msg("attach MTD device");
737 err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req));
738 if (err) {
739 err = -EFAULT;
740 break;
741 }
742
743 if (req.mtd_num < 0 ||
744 (req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) {
745 err = -EINVAL;
746 break;
747 }
748
749 mtd = get_mtd_device(NULL, req.mtd_num);
750 if (IS_ERR(mtd)) {
751 err = PTR_ERR(mtd);
752 break;
753 }
754
755 /*
756 * Note, further request verification is done by
757 * 'ubi_attach_mtd_dev()'.
758 */
759 mutex_lock(&ubi_devices_mutex);
760 err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset);
761 mutex_unlock(&ubi_devices_mutex);
762 if (err < 0)
763 put_mtd_device(mtd);
764 else
765 /* @err contains UBI device number */
766 err = put_user(err, (__user int32_t *)argp);
767
768 break;
769 }
770
771 /* Detach an MTD device command */
772 case UBI_IOCDET:
773 {
774 int ubi_num;
775
776 dbg_msg("dettach MTD device");
777 err = get_user(ubi_num, (__user int32_t *)argp);
778 if (err) {
779 err = -EFAULT;
780 break;
781 }
782
783 mutex_lock(&ubi_devices_mutex);
784 err = ubi_detach_mtd_dev(ubi_num, 0);
785 mutex_unlock(&ubi_devices_mutex);
786 break;
787 }
788
789 default:
790 err = -ENOTTY;
791 break;
792 }
793
794 return err;
795}
796
797/* UBI control character device operations */
798struct file_operations ubi_ctrl_cdev_operations = {
799 .ioctl = ctrl_cdev_ioctl,
800 .owner = THIS_MODULE,
801};
802
685/* UBI character device operations */ 803/* UBI character device operations */
686struct file_operations ubi_cdev_operations = { 804struct file_operations ubi_cdev_operations = {
687 .owner = THIS_MODULE, 805 .owner = THIS_MODULE,
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 467722eb618b..51c40b17f1ec 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -39,8 +39,9 @@
39 39
40#ifdef CONFIG_MTD_UBI_DEBUG_MSG 40#ifdef CONFIG_MTD_UBI_DEBUG_MSG
41/* Generic debugging message */ 41/* Generic debugging message */
42#define dbg_msg(fmt, ...) \ 42#define dbg_msg(fmt, ...) \
43 printk(KERN_DEBUG "UBI DBG: %s: " fmt "\n", __FUNCTION__, ##__VA_ARGS__) 43 printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
44 current->pid, __FUNCTION__, ##__VA_ARGS__)
44 45
45#define ubi_dbg_dump_stack() dump_stack() 46#define ubi_dbg_dump_stack() dump_stack()
46 47
@@ -76,36 +77,28 @@ void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
76 77
77#ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA 78#ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA
78/* Messages from the eraseblock association unit */ 79/* Messages from the eraseblock association unit */
79#define dbg_eba(fmt, ...) \ 80#define dbg_eba(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
80 printk(KERN_DEBUG "UBI DBG eba: %s: " fmt "\n", __FUNCTION__, \
81 ##__VA_ARGS__)
82#else 81#else
83#define dbg_eba(fmt, ...) ({}) 82#define dbg_eba(fmt, ...) ({})
84#endif 83#endif
85 84
86#ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL 85#ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL
87/* Messages from the wear-leveling unit */ 86/* Messages from the wear-leveling unit */
88#define dbg_wl(fmt, ...) \ 87#define dbg_wl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
89 printk(KERN_DEBUG "UBI DBG wl: %s: " fmt "\n", __FUNCTION__, \
90 ##__VA_ARGS__)
91#else 88#else
92#define dbg_wl(fmt, ...) ({}) 89#define dbg_wl(fmt, ...) ({})
93#endif 90#endif
94 91
95#ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO 92#ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO
96/* Messages from the input/output unit */ 93/* Messages from the input/output unit */
97#define dbg_io(fmt, ...) \ 94#define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
98 printk(KERN_DEBUG "UBI DBG io: %s: " fmt "\n", __FUNCTION__, \
99 ##__VA_ARGS__)
100#else 95#else
101#define dbg_io(fmt, ...) ({}) 96#define dbg_io(fmt, ...) ({})
102#endif 97#endif
103 98
104#ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD 99#ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD
105/* Initialization and build messages */ 100/* Initialization and build messages */
106#define dbg_bld(fmt, ...) \ 101#define dbg_bld(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
107 printk(KERN_DEBUG "UBI DBG bld: %s: " fmt "\n", __FUNCTION__, \
108 ##__VA_ARGS__)
109#else 102#else
110#define dbg_bld(fmt, ...) ({}) 103#define dbg_bld(fmt, ...) ({})
111#endif 104#endif
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 880fa3690352..7ce91ca742b1 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -31,7 +31,7 @@
31 * logical eraseblock it is locked for reading or writing. The per-logical 31 * logical eraseblock it is locked for reading or writing. The per-logical
32 * eraseblock locking is implemented by means of the lock tree. The lock tree 32 * eraseblock locking is implemented by means of the lock tree. The lock tree
33 * is an RB-tree which refers all the currently locked logical eraseblocks. The 33 * is an RB-tree which refers all the currently locked logical eraseblocks. The
34 * lock tree elements are &struct ltree_entry objects. They are indexed by 34 * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by
35 * (@vol_id, @lnum) pairs. 35 * (@vol_id, @lnum) pairs.
36 * 36 *
37 * EBA also maintains the global sequence counter which is incremented each 37 * EBA also maintains the global sequence counter which is incremented each
@@ -50,29 +50,6 @@
50#define EBA_RESERVED_PEBS 1 50#define EBA_RESERVED_PEBS 1
51 51
52/** 52/**
53 * struct ltree_entry - an entry in the lock tree.
54 * @rb: links RB-tree nodes
55 * @vol_id: volume ID of the locked logical eraseblock
56 * @lnum: locked logical eraseblock number
57 * @users: how many tasks are using this logical eraseblock or wait for it
58 * @mutex: read/write mutex to implement read/write access serialization to
59 * the (@vol_id, @lnum) logical eraseblock
60 *
61 * When a logical eraseblock is being locked - corresponding &struct ltree_entry
62 * object is inserted to the lock tree (@ubi->ltree).
63 */
64struct ltree_entry {
65 struct rb_node rb;
66 int vol_id;
67 int lnum;
68 int users;
69 struct rw_semaphore mutex;
70};
71
72/* Slab cache for lock-tree entries */
73static struct kmem_cache *ltree_slab;
74
75/**
76 * next_sqnum - get next sequence number. 53 * next_sqnum - get next sequence number.
77 * @ubi: UBI device description object 54 * @ubi: UBI device description object
78 * 55 *
@@ -101,7 +78,7 @@ static unsigned long long next_sqnum(struct ubi_device *ubi)
101 */ 78 */
102static int ubi_get_compat(const struct ubi_device *ubi, int vol_id) 79static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
103{ 80{
104 if (vol_id == UBI_LAYOUT_VOL_ID) 81 if (vol_id == UBI_LAYOUT_VOLUME_ID)
105 return UBI_LAYOUT_VOLUME_COMPAT; 82 return UBI_LAYOUT_VOLUME_COMPAT;
106 return 0; 83 return 0;
107} 84}
@@ -112,20 +89,20 @@ static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
112 * @vol_id: volume ID 89 * @vol_id: volume ID
113 * @lnum: logical eraseblock number 90 * @lnum: logical eraseblock number
114 * 91 *
115 * This function returns a pointer to the corresponding &struct ltree_entry 92 * This function returns a pointer to the corresponding &struct ubi_ltree_entry
116 * object if the logical eraseblock is locked and %NULL if it is not. 93 * object if the logical eraseblock is locked and %NULL if it is not.
117 * @ubi->ltree_lock has to be locked. 94 * @ubi->ltree_lock has to be locked.
118 */ 95 */
119static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id, 96static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
120 int lnum) 97 int lnum)
121{ 98{
122 struct rb_node *p; 99 struct rb_node *p;
123 100
124 p = ubi->ltree.rb_node; 101 p = ubi->ltree.rb_node;
125 while (p) { 102 while (p) {
126 struct ltree_entry *le; 103 struct ubi_ltree_entry *le;
127 104
128 le = rb_entry(p, struct ltree_entry, rb); 105 le = rb_entry(p, struct ubi_ltree_entry, rb);
129 106
130 if (vol_id < le->vol_id) 107 if (vol_id < le->vol_id)
131 p = p->rb_left; 108 p = p->rb_left;
@@ -155,15 +132,17 @@ static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
155 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation 132 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
156 * failed. 133 * failed.
157 */ 134 */
158static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id, 135static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
159 int lnum) 136 int vol_id, int lnum)
160{ 137{
161 struct ltree_entry *le, *le1, *le_free; 138 struct ubi_ltree_entry *le, *le1, *le_free;
162 139
163 le = kmem_cache_alloc(ltree_slab, GFP_NOFS); 140 le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
164 if (!le) 141 if (!le)
165 return ERR_PTR(-ENOMEM); 142 return ERR_PTR(-ENOMEM);
166 143
144 le->users = 0;
145 init_rwsem(&le->mutex);
167 le->vol_id = vol_id; 146 le->vol_id = vol_id;
168 le->lnum = lnum; 147 le->lnum = lnum;
169 148
@@ -189,7 +168,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
189 p = &ubi->ltree.rb_node; 168 p = &ubi->ltree.rb_node;
190 while (*p) { 169 while (*p) {
191 parent = *p; 170 parent = *p;
192 le1 = rb_entry(parent, struct ltree_entry, rb); 171 le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
193 172
194 if (vol_id < le1->vol_id) 173 if (vol_id < le1->vol_id)
195 p = &(*p)->rb_left; 174 p = &(*p)->rb_left;
@@ -211,7 +190,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
211 spin_unlock(&ubi->ltree_lock); 190 spin_unlock(&ubi->ltree_lock);
212 191
213 if (le_free) 192 if (le_free)
214 kmem_cache_free(ltree_slab, le_free); 193 kfree(le_free);
215 194
216 return le; 195 return le;
217} 196}
@@ -227,7 +206,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
227 */ 206 */
228static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) 207static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
229{ 208{
230 struct ltree_entry *le; 209 struct ubi_ltree_entry *le;
231 210
232 le = ltree_add_entry(ubi, vol_id, lnum); 211 le = ltree_add_entry(ubi, vol_id, lnum);
233 if (IS_ERR(le)) 212 if (IS_ERR(le))
@@ -245,7 +224,7 @@ static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
245static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) 224static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
246{ 225{
247 int free = 0; 226 int free = 0;
248 struct ltree_entry *le; 227 struct ubi_ltree_entry *le;
249 228
250 spin_lock(&ubi->ltree_lock); 229 spin_lock(&ubi->ltree_lock);
251 le = ltree_lookup(ubi, vol_id, lnum); 230 le = ltree_lookup(ubi, vol_id, lnum);
@@ -259,7 +238,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
259 238
260 up_read(&le->mutex); 239 up_read(&le->mutex);
261 if (free) 240 if (free)
262 kmem_cache_free(ltree_slab, le); 241 kfree(le);
263} 242}
264 243
265/** 244/**
@@ -273,7 +252,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
273 */ 252 */
274static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) 253static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
275{ 254{
276 struct ltree_entry *le; 255 struct ubi_ltree_entry *le;
277 256
278 le = ltree_add_entry(ubi, vol_id, lnum); 257 le = ltree_add_entry(ubi, vol_id, lnum);
279 if (IS_ERR(le)) 258 if (IS_ERR(le))
@@ -283,6 +262,44 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
283} 262}
284 263
285/** 264/**
265 * leb_write_lock - lock logical eraseblock for writing.
266 * @ubi: UBI device description object
267 * @vol_id: volume ID
268 * @lnum: logical eraseblock number
269 *
270 * This function locks a logical eraseblock for writing if there is no
271 * contention and does nothing if there is contention. Returns %0 in case of
272 * success, %1 in case of contention, and and a negative error code in case of
273 * failure.
274 */
275static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
276{
277 int free;
278 struct ubi_ltree_entry *le;
279
280 le = ltree_add_entry(ubi, vol_id, lnum);
281 if (IS_ERR(le))
282 return PTR_ERR(le);
283 if (down_write_trylock(&le->mutex))
284 return 0;
285
286 /* Contention, cancel */
287 spin_lock(&ubi->ltree_lock);
288 le->users -= 1;
289 ubi_assert(le->users >= 0);
290 if (le->users == 0) {
291 rb_erase(&le->rb, &ubi->ltree);
292 free = 1;
293 } else
294 free = 0;
295 spin_unlock(&ubi->ltree_lock);
296 if (free)
297 kfree(le);
298
299 return 1;
300}
301
302/**
286 * leb_write_unlock - unlock logical eraseblock. 303 * leb_write_unlock - unlock logical eraseblock.
287 * @ubi: UBI device description object 304 * @ubi: UBI device description object
288 * @vol_id: volume ID 305 * @vol_id: volume ID
@@ -291,7 +308,7 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
291static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) 308static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
292{ 309{
293 int free; 310 int free;
294 struct ltree_entry *le; 311 struct ubi_ltree_entry *le;
295 312
296 spin_lock(&ubi->ltree_lock); 313 spin_lock(&ubi->ltree_lock);
297 le = ltree_lookup(ubi, vol_id, lnum); 314 le = ltree_lookup(ubi, vol_id, lnum);
@@ -306,23 +323,23 @@ static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
306 323
307 up_write(&le->mutex); 324 up_write(&le->mutex);
308 if (free) 325 if (free)
309 kmem_cache_free(ltree_slab, le); 326 kfree(le);
310} 327}
311 328
312/** 329/**
313 * ubi_eba_unmap_leb - un-map logical eraseblock. 330 * ubi_eba_unmap_leb - un-map logical eraseblock.
314 * @ubi: UBI device description object 331 * @ubi: UBI device description object
315 * @vol_id: volume ID 332 * @vol: volume description object
316 * @lnum: logical eraseblock number 333 * @lnum: logical eraseblock number
317 * 334 *
318 * This function un-maps logical eraseblock @lnum and schedules corresponding 335 * This function un-maps logical eraseblock @lnum and schedules corresponding
319 * physical eraseblock for erasure. Returns zero in case of success and a 336 * physical eraseblock for erasure. Returns zero in case of success and a
320 * negative error code in case of failure. 337 * negative error code in case of failure.
321 */ 338 */
322int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum) 339int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
340 int lnum)
323{ 341{
324 int idx = vol_id2idx(ubi, vol_id), err, pnum; 342 int err, pnum, vol_id = vol->vol_id;
325 struct ubi_volume *vol = ubi->volumes[idx];
326 343
327 if (ubi->ro_mode) 344 if (ubi->ro_mode)
328 return -EROFS; 345 return -EROFS;
@@ -349,7 +366,7 @@ out_unlock:
349/** 366/**
350 * ubi_eba_read_leb - read data. 367 * ubi_eba_read_leb - read data.
351 * @ubi: UBI device description object 368 * @ubi: UBI device description object
352 * @vol_id: volume ID 369 * @vol: volume description object
353 * @lnum: logical eraseblock number 370 * @lnum: logical eraseblock number
354 * @buf: buffer to store the read data 371 * @buf: buffer to store the read data
355 * @offset: offset from where to read 372 * @offset: offset from where to read
@@ -365,12 +382,11 @@ out_unlock:
365 * returned for any volume type if an ECC error was detected by the MTD device 382 * returned for any volume type if an ECC error was detected by the MTD device
366 * driver. Other negative error cored may be returned in case of other errors. 383 * driver. Other negative error cored may be returned in case of other errors.
367 */ 384 */
368int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, 385int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
369 int offset, int len, int check) 386 void *buf, int offset, int len, int check)
370{ 387{
371 int err, pnum, scrub = 0, idx = vol_id2idx(ubi, vol_id); 388 int err, pnum, scrub = 0, vol_id = vol->vol_id;
372 struct ubi_vid_hdr *vid_hdr; 389 struct ubi_vid_hdr *vid_hdr;
373 struct ubi_volume *vol = ubi->volumes[idx];
374 uint32_t uninitialized_var(crc); 390 uint32_t uninitialized_var(crc);
375 391
376 err = leb_read_lock(ubi, vol_id, lnum); 392 err = leb_read_lock(ubi, vol_id, lnum);
@@ -578,7 +594,7 @@ write_error:
578/** 594/**
579 * ubi_eba_write_leb - write data to dynamic volume. 595 * ubi_eba_write_leb - write data to dynamic volume.
580 * @ubi: UBI device description object 596 * @ubi: UBI device description object
581 * @vol_id: volume ID 597 * @vol: volume description object
582 * @lnum: logical eraseblock number 598 * @lnum: logical eraseblock number
583 * @buf: the data to write 599 * @buf: the data to write
584 * @offset: offset within the logical eraseblock where to write 600 * @offset: offset within the logical eraseblock where to write
@@ -586,15 +602,14 @@ write_error:
586 * @dtype: data type 602 * @dtype: data type
587 * 603 *
588 * This function writes data to logical eraseblock @lnum of a dynamic volume 604 * This function writes data to logical eraseblock @lnum of a dynamic volume
589 * @vol_id. Returns zero in case of success and a negative error code in case 605 * @vol. Returns zero in case of success and a negative error code in case
590 * of failure. In case of error, it is possible that something was still 606 * of failure. In case of error, it is possible that something was still
591 * written to the flash media, but may be some garbage. 607 * written to the flash media, but may be some garbage.
592 */ 608 */
593int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum, 609int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
594 const void *buf, int offset, int len, int dtype) 610 const void *buf, int offset, int len, int dtype)
595{ 611{
596 int idx = vol_id2idx(ubi, vol_id), err, pnum, tries = 0; 612 int err, pnum, tries = 0, vol_id = vol->vol_id;
597 struct ubi_volume *vol = ubi->volumes[idx];
598 struct ubi_vid_hdr *vid_hdr; 613 struct ubi_vid_hdr *vid_hdr;
599 614
600 if (ubi->ro_mode) 615 if (ubi->ro_mode)
@@ -613,7 +628,8 @@ int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum,
613 if (err) { 628 if (err) {
614 ubi_warn("failed to write data to PEB %d", pnum); 629 ubi_warn("failed to write data to PEB %d", pnum);
615 if (err == -EIO && ubi->bad_allowed) 630 if (err == -EIO && ubi->bad_allowed)
616 err = recover_peb(ubi, pnum, vol_id, lnum, buf, offset, len); 631 err = recover_peb(ubi, pnum, vol_id, lnum, buf,
632 offset, len);
617 if (err) 633 if (err)
618 ubi_ro_mode(ubi); 634 ubi_ro_mode(ubi);
619 } 635 }
@@ -656,11 +672,14 @@ retry:
656 goto write_error; 672 goto write_error;
657 } 673 }
658 674
659 err = ubi_io_write_data(ubi, buf, pnum, offset, len); 675 if (len) {
660 if (err) { 676 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
661 ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, " 677 if (err) {
662 "PEB %d", len, offset, vol_id, lnum, pnum); 678 ubi_warn("failed to write %d bytes at offset %d of "
663 goto write_error; 679 "LEB %d:%d, PEB %d", len, offset, vol_id,
680 lnum, pnum);
681 goto write_error;
682 }
664 } 683 }
665 684
666 vol->eba_tbl[lnum] = pnum; 685 vol->eba_tbl[lnum] = pnum;
@@ -698,7 +717,7 @@ write_error:
698/** 717/**
699 * ubi_eba_write_leb_st - write data to static volume. 718 * ubi_eba_write_leb_st - write data to static volume.
700 * @ubi: UBI device description object 719 * @ubi: UBI device description object
701 * @vol_id: volume ID 720 * @vol: volume description object
702 * @lnum: logical eraseblock number 721 * @lnum: logical eraseblock number
703 * @buf: data to write 722 * @buf: data to write
704 * @len: how many bytes to write 723 * @len: how many bytes to write
@@ -706,7 +725,7 @@ write_error:
706 * @used_ebs: how many logical eraseblocks will this volume contain 725 * @used_ebs: how many logical eraseblocks will this volume contain
707 * 726 *
708 * This function writes data to logical eraseblock @lnum of static volume 727 * This function writes data to logical eraseblock @lnum of static volume
709 * @vol_id. The @used_ebs argument should contain total number of logical 728 * @vol. The @used_ebs argument should contain total number of logical
710 * eraseblock in this static volume. 729 * eraseblock in this static volume.
711 * 730 *
712 * When writing to the last logical eraseblock, the @len argument doesn't have 731 * When writing to the last logical eraseblock, the @len argument doesn't have
@@ -718,12 +737,11 @@ write_error:
718 * volumes. This function returns zero in case of success and a negative error 737 * volumes. This function returns zero in case of success and a negative error
719 * code in case of failure. 738 * code in case of failure.
720 */ 739 */
721int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum, 740int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
722 const void *buf, int len, int dtype, int used_ebs) 741 int lnum, const void *buf, int len, int dtype,
742 int used_ebs)
723{ 743{
724 int err, pnum, tries = 0, data_size = len; 744 int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
725 int idx = vol_id2idx(ubi, vol_id);
726 struct ubi_volume *vol = ubi->volumes[idx];
727 struct ubi_vid_hdr *vid_hdr; 745 struct ubi_vid_hdr *vid_hdr;
728 uint32_t crc; 746 uint32_t crc;
729 747
@@ -819,7 +837,7 @@ write_error:
819/* 837/*
820 * ubi_eba_atomic_leb_change - change logical eraseblock atomically. 838 * ubi_eba_atomic_leb_change - change logical eraseblock atomically.
821 * @ubi: UBI device description object 839 * @ubi: UBI device description object
822 * @vol_id: volume ID 840 * @vol: volume description object
823 * @lnum: logical eraseblock number 841 * @lnum: logical eraseblock number
824 * @buf: data to write 842 * @buf: data to write
825 * @len: how many bytes to write 843 * @len: how many bytes to write
@@ -834,17 +852,27 @@ write_error:
834 * UBI reserves one LEB for the "atomic LEB change" operation, so only one 852 * UBI reserves one LEB for the "atomic LEB change" operation, so only one
835 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex. 853 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
836 */ 854 */
837int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, 855int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
838 const void *buf, int len, int dtype) 856 int lnum, const void *buf, int len, int dtype)
839{ 857{
840 int err, pnum, tries = 0, idx = vol_id2idx(ubi, vol_id); 858 int err, pnum, tries = 0, vol_id = vol->vol_id;
841 struct ubi_volume *vol = ubi->volumes[idx];
842 struct ubi_vid_hdr *vid_hdr; 859 struct ubi_vid_hdr *vid_hdr;
843 uint32_t crc; 860 uint32_t crc;
844 861
845 if (ubi->ro_mode) 862 if (ubi->ro_mode)
846 return -EROFS; 863 return -EROFS;
847 864
865 if (len == 0) {
866 /*
867 * Special case when data length is zero. In this case the LEB
868 * has to be unmapped and mapped somewhere else.
869 */
870 err = ubi_eba_unmap_leb(ubi, vol, lnum);
871 if (err)
872 return err;
873 return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
874 }
875
848 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 876 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
849 if (!vid_hdr) 877 if (!vid_hdr)
850 return -ENOMEM; 878 return -ENOMEM;
@@ -928,20 +956,6 @@ write_error:
928} 956}
929 957
930/** 958/**
931 * ltree_entry_ctor - lock tree entries slab cache constructor.
932 * @obj: the lock-tree entry to construct
933 * @cache: the lock tree entry slab cache
934 * @flags: constructor flags
935 */
936static void ltree_entry_ctor(struct kmem_cache *cache, void *obj)
937{
938 struct ltree_entry *le = obj;
939
940 le->users = 0;
941 init_rwsem(&le->mutex);
942}
943
944/**
945 * ubi_eba_copy_leb - copy logical eraseblock. 959 * ubi_eba_copy_leb - copy logical eraseblock.
946 * @ubi: UBI device description object 960 * @ubi: UBI device description object
947 * @from: physical eraseblock number from where to copy 961 * @from: physical eraseblock number from where to copy
@@ -950,14 +964,16 @@ static void ltree_entry_ctor(struct kmem_cache *cache, void *obj)
950 * 964 *
951 * This function copies logical eraseblock from physical eraseblock @from to 965 * This function copies logical eraseblock from physical eraseblock @from to
952 * physical eraseblock @to. The @vid_hdr buffer may be changed by this 966 * physical eraseblock @to. The @vid_hdr buffer may be changed by this
953 * function. Returns zero in case of success, %UBI_IO_BITFLIPS if the operation 967 * function. Returns:
954 * was canceled because bit-flips were detected at the target PEB, and a 968 * o %0 in case of success;
955 * negative error code in case of failure. 969 * o %1 if the operation was canceled and should be tried later (e.g.,
970 * because a bit-flip was detected at the target PEB);
971 * o %2 if the volume is being deleted and this LEB should not be moved.
956 */ 972 */
957int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 973int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
958 struct ubi_vid_hdr *vid_hdr) 974 struct ubi_vid_hdr *vid_hdr)
959{ 975{
960 int err, vol_id, lnum, data_size, aldata_size, pnum, idx; 976 int err, vol_id, lnum, data_size, aldata_size, idx;
961 struct ubi_volume *vol; 977 struct ubi_volume *vol;
962 uint32_t crc; 978 uint32_t crc;
963 979
@@ -973,51 +989,67 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
973 data_size = aldata_size = 989 data_size = aldata_size =
974 ubi->leb_size - be32_to_cpu(vid_hdr->data_pad); 990 ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
975 991
976 /*
977 * We do not want anybody to write to this logical eraseblock while we
978 * are moving it, so we lock it.
979 */
980 err = leb_write_lock(ubi, vol_id, lnum);
981 if (err)
982 return err;
983
984 mutex_lock(&ubi->buf_mutex);
985
986 /*
987 * But the logical eraseblock might have been put by this time.
988 * Cancel if it is true.
989 */
990 idx = vol_id2idx(ubi, vol_id); 992 idx = vol_id2idx(ubi, vol_id);
991 993 spin_lock(&ubi->volumes_lock);
992 /* 994 /*
993 * We may race with volume deletion/re-size, so we have to hold 995 * Note, we may race with volume deletion, which means that the volume
994 * @ubi->volumes_lock. 996 * this logical eraseblock belongs to might be being deleted. Since the
997 * volume deletion unmaps all the volume's logical eraseblocks, it will
998 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
995 */ 999 */
996 spin_lock(&ubi->volumes_lock);
997 vol = ubi->volumes[idx]; 1000 vol = ubi->volumes[idx];
998 if (!vol) { 1001 if (!vol) {
999 dbg_eba("volume %d was removed meanwhile", vol_id); 1002 /* No need to do further work, cancel */
1003 dbg_eba("volume %d is being removed, cancel", vol_id);
1000 spin_unlock(&ubi->volumes_lock); 1004 spin_unlock(&ubi->volumes_lock);
1001 goto out_unlock; 1005 return 2;
1002 } 1006 }
1007 spin_unlock(&ubi->volumes_lock);
1003 1008
1004 pnum = vol->eba_tbl[lnum]; 1009 /*
1005 if (pnum != from) { 1010 * We do not want anybody to write to this logical eraseblock while we
1006 dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to " 1011 * are moving it, so lock it.
1007 "PEB %d, cancel", vol_id, lnum, from, pnum); 1012 *
1008 spin_unlock(&ubi->volumes_lock); 1013 * Note, we are using non-waiting locking here, because we cannot sleep
1009 goto out_unlock; 1014 * on the LEB, since it may cause deadlocks. Indeed, imagine a task is
1015 * unmapping the LEB which is mapped to the PEB we are going to move
1016 * (@from). This task locks the LEB and goes sleep in the
1017 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
1018 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
1019 * LEB is already locked, we just do not move it and return %1.
1020 */
1021 err = leb_write_trylock(ubi, vol_id, lnum);
1022 if (err) {
1023 dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum);
1024 return err;
1010 } 1025 }
1011 spin_unlock(&ubi->volumes_lock);
1012 1026
1013 /* OK, now the LEB is locked and we can safely start moving it */ 1027 /*
1028 * The LEB might have been put meanwhile, and the task which put it is
1029 * probably waiting on @ubi->move_mutex. No need to continue the work,
1030 * cancel it.
1031 */
1032 if (vol->eba_tbl[lnum] != from) {
1033 dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to "
1034 "PEB %d, cancel", vol_id, lnum, from,
1035 vol->eba_tbl[lnum]);
1036 err = 1;
1037 goto out_unlock_leb;
1038 }
1014 1039
1040 /*
1041 * OK, now the LEB is locked and we can safely start moving iy. Since
1042 * this function utilizes thie @ubi->peb1_buf buffer which is shared
1043 * with some other functions, so lock the buffer by taking the
1044 * @ubi->buf_mutex.
1045 */
1046 mutex_lock(&ubi->buf_mutex);
1015 dbg_eba("read %d bytes of data", aldata_size); 1047 dbg_eba("read %d bytes of data", aldata_size);
1016 err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size); 1048 err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size);
1017 if (err && err != UBI_IO_BITFLIPS) { 1049 if (err && err != UBI_IO_BITFLIPS) {
1018 ubi_warn("error %d while reading data from PEB %d", 1050 ubi_warn("error %d while reading data from PEB %d",
1019 err, from); 1051 err, from);
1020 goto out_unlock; 1052 goto out_unlock_buf;
1021 } 1053 }
1022 1054
1023 /* 1055 /*
@@ -1053,7 +1085,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1053 1085
1054 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); 1086 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
1055 if (err) 1087 if (err)
1056 goto out_unlock; 1088 goto out_unlock_buf;
1057 1089
1058 cond_resched(); 1090 cond_resched();
1059 1091
@@ -1062,13 +1094,15 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1062 if (err) { 1094 if (err) {
1063 if (err != UBI_IO_BITFLIPS) 1095 if (err != UBI_IO_BITFLIPS)
1064 ubi_warn("cannot read VID header back from PEB %d", to); 1096 ubi_warn("cannot read VID header back from PEB %d", to);
1065 goto out_unlock; 1097 else
1098 err = 1;
1099 goto out_unlock_buf;
1066 } 1100 }
1067 1101
1068 if (data_size > 0) { 1102 if (data_size > 0) {
1069 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size); 1103 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
1070 if (err) 1104 if (err)
1071 goto out_unlock; 1105 goto out_unlock_buf;
1072 1106
1073 cond_resched(); 1107 cond_resched();
1074 1108
@@ -1082,7 +1116,9 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1082 if (err != UBI_IO_BITFLIPS) 1116 if (err != UBI_IO_BITFLIPS)
1083 ubi_warn("cannot read data back from PEB %d", 1117 ubi_warn("cannot read data back from PEB %d",
1084 to); 1118 to);
1085 goto out_unlock; 1119 else
1120 err = 1;
1121 goto out_unlock_buf;
1086 } 1122 }
1087 1123
1088 cond_resched(); 1124 cond_resched();
@@ -1090,15 +1126,16 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1090 if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) { 1126 if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
1091 ubi_warn("read data back from PEB %d - it is different", 1127 ubi_warn("read data back from PEB %d - it is different",
1092 to); 1128 to);
1093 goto out_unlock; 1129 goto out_unlock_buf;
1094 } 1130 }
1095 } 1131 }
1096 1132
1097 ubi_assert(vol->eba_tbl[lnum] == from); 1133 ubi_assert(vol->eba_tbl[lnum] == from);
1098 vol->eba_tbl[lnum] = to; 1134 vol->eba_tbl[lnum] = to;
1099 1135
1100out_unlock: 1136out_unlock_buf:
1101 mutex_unlock(&ubi->buf_mutex); 1137 mutex_unlock(&ubi->buf_mutex);
1138out_unlock_leb:
1102 leb_write_unlock(ubi, vol_id, lnum); 1139 leb_write_unlock(ubi, vol_id, lnum);
1103 return err; 1140 return err;
1104} 1141}
@@ -1125,14 +1162,6 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1125 mutex_init(&ubi->alc_mutex); 1162 mutex_init(&ubi->alc_mutex);
1126 ubi->ltree = RB_ROOT; 1163 ubi->ltree = RB_ROOT;
1127 1164
1128 if (ubi_devices_cnt == 0) {
1129 ltree_slab = kmem_cache_create("ubi_ltree_slab",
1130 sizeof(struct ltree_entry), 0,
1131 0, &ltree_entry_ctor);
1132 if (!ltree_slab)
1133 return -ENOMEM;
1134 }
1135
1136 ubi->global_sqnum = si->max_sqnum + 1; 1165 ubi->global_sqnum = si->max_sqnum + 1;
1137 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; 1166 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1138 1167
@@ -1168,6 +1197,15 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1168 } 1197 }
1169 } 1198 }
1170 1199
1200 if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
1201 ubi_err("no enough physical eraseblocks (%d, need %d)",
1202 ubi->avail_pebs, EBA_RESERVED_PEBS);
1203 err = -ENOSPC;
1204 goto out_free;
1205 }
1206 ubi->avail_pebs -= EBA_RESERVED_PEBS;
1207 ubi->rsvd_pebs += EBA_RESERVED_PEBS;
1208
1171 if (ubi->bad_allowed) { 1209 if (ubi->bad_allowed) {
1172 ubi_calculate_reserved(ubi); 1210 ubi_calculate_reserved(ubi);
1173 1211
@@ -1184,15 +1222,6 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1184 ubi->rsvd_pebs += ubi->beb_rsvd_pebs; 1222 ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
1185 } 1223 }
1186 1224
1187 if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
1188 ubi_err("no enough physical eraseblocks (%d, need %d)",
1189 ubi->avail_pebs, EBA_RESERVED_PEBS);
1190 err = -ENOSPC;
1191 goto out_free;
1192 }
1193 ubi->avail_pebs -= EBA_RESERVED_PEBS;
1194 ubi->rsvd_pebs += EBA_RESERVED_PEBS;
1195
1196 dbg_eba("EBA unit is initialized"); 1225 dbg_eba("EBA unit is initialized");
1197 return 0; 1226 return 0;
1198 1227
@@ -1202,8 +1231,6 @@ out_free:
1202 continue; 1231 continue;
1203 kfree(ubi->volumes[i]->eba_tbl); 1232 kfree(ubi->volumes[i]->eba_tbl);
1204 } 1233 }
1205 if (ubi_devices_cnt == 0)
1206 kmem_cache_destroy(ltree_slab);
1207 return err; 1234 return err;
1208} 1235}
1209 1236
@@ -1222,6 +1249,4 @@ void ubi_eba_close(const struct ubi_device *ubi)
1222 continue; 1249 continue;
1223 kfree(ubi->volumes[i]->eba_tbl); 1250 kfree(ubi->volumes[i]->eba_tbl);
1224 } 1251 }
1225 if (ubi_devices_cnt == 1)
1226 kmem_cache_destroy(ltree_slab);
1227} 1252}
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 41ff74c60e14..d397219238d3 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -129,8 +129,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
129 if (to_read > total_read) 129 if (to_read > total_read)
130 to_read = total_read; 130 to_read = total_read;
131 131
132 err = ubi_eba_read_leb(ubi, vol->vol_id, lnum, buf, offs, 132 err = ubi_eba_read_leb(ubi, vol, lnum, buf, offs, to_read, 0);
133 to_read, 0);
134 if (err) 133 if (err)
135 break; 134 break;
136 135
@@ -187,8 +186,8 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
187 if (to_write > total_written) 186 if (to_write > total_written)
188 to_write = total_written; 187 to_write = total_written;
189 188
190 err = ubi_eba_write_leb(ubi, vol->vol_id, lnum, buf, offs, 189 err = ubi_eba_write_leb(ubi, vol, lnum, buf, offs, to_write,
191 to_write, UBI_UNKNOWN); 190 UBI_UNKNOWN);
192 if (err) 191 if (err)
193 break; 192 break;
194 193
@@ -237,7 +236,7 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
237 return -EROFS; 236 return -EROFS;
238 237
239 for (i = 0; i < count; i++) { 238 for (i = 0; i < count; i++) {
240 err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum + i); 239 err = ubi_eba_unmap_leb(ubi, vol, lnum + i);
241 if (err) 240 if (err)
242 goto out_err; 241 goto out_err;
243 } 242 }
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 7c304eec78b5..db3efdef2433 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -173,6 +173,16 @@ retry:
173 ubi_err("error %d while reading %d bytes from PEB %d:%d, " 173 ubi_err("error %d while reading %d bytes from PEB %d:%d, "
174 "read %zd bytes", err, len, pnum, offset, read); 174 "read %zd bytes", err, len, pnum, offset, read);
175 ubi_dbg_dump_stack(); 175 ubi_dbg_dump_stack();
176
177 /*
178 * The driver should never return -EBADMSG if it failed to read
179 * all the requested data. But some buggy drivers might do
180 * this, so we change it to -EIO.
181 */
182 if (read != len && err == -EBADMSG) {
183 ubi_assert(0);
184 err = -EIO;
185 }
176 } else { 186 } else {
177 ubi_assert(len == read); 187 ubi_assert(len == read);
178 188
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 03c774f41549..a70d58823f8d 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -30,23 +30,27 @@
30 * @ubi_num: UBI device number 30 * @ubi_num: UBI device number
31 * @di: the information is stored here 31 * @di: the information is stored here
32 * 32 *
33 * This function returns %0 in case of success and a %-ENODEV if there is no 33 * This function returns %0 in case of success, %-EINVAL if the UBI device
34 * such UBI device. 34 * number is invalid, and %-ENODEV if there is no such UBI device.
35 */ 35 */
36int ubi_get_device_info(int ubi_num, struct ubi_device_info *di) 36int ubi_get_device_info(int ubi_num, struct ubi_device_info *di)
37{ 37{
38 const struct ubi_device *ubi; 38 struct ubi_device *ubi;
39
40 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
41 return -EINVAL;
39 42
40 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || 43 ubi = ubi_get_device(ubi_num);
41 !ubi_devices[ubi_num]) 44 if (!ubi)
42 return -ENODEV; 45 return -ENODEV;
43 46
44 ubi = ubi_devices[ubi_num];
45 di->ubi_num = ubi->ubi_num; 47 di->ubi_num = ubi->ubi_num;
46 di->leb_size = ubi->leb_size; 48 di->leb_size = ubi->leb_size;
47 di->min_io_size = ubi->min_io_size; 49 di->min_io_size = ubi->min_io_size;
48 di->ro_mode = ubi->ro_mode; 50 di->ro_mode = ubi->ro_mode;
49 di->cdev = MKDEV(ubi->major, 0); 51 di->cdev = ubi->cdev.dev;
52
53 ubi_put_device(ubi);
50 return 0; 54 return 0;
51} 55}
52EXPORT_SYMBOL_GPL(ubi_get_device_info); 56EXPORT_SYMBOL_GPL(ubi_get_device_info);
@@ -73,7 +77,7 @@ void ubi_get_volume_info(struct ubi_volume_desc *desc,
73 vi->usable_leb_size = vol->usable_leb_size; 77 vi->usable_leb_size = vol->usable_leb_size;
74 vi->name_len = vol->name_len; 78 vi->name_len = vol->name_len;
75 vi->name = vol->name; 79 vi->name = vol->name;
76 vi->cdev = MKDEV(ubi->major, vi->vol_id + 1); 80 vi->cdev = vol->cdev.dev;
77} 81}
78EXPORT_SYMBOL_GPL(ubi_get_volume_info); 82EXPORT_SYMBOL_GPL(ubi_get_volume_info);
79 83
@@ -104,37 +108,39 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
104 108
105 dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode); 109 dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode);
106 110
107 err = -ENODEV; 111 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
108 if (ubi_num < 0) 112 return ERR_PTR(-EINVAL);
109 return ERR_PTR(err);
110
111 ubi = ubi_devices[ubi_num];
112
113 if (!try_module_get(THIS_MODULE))
114 return ERR_PTR(err);
115
116 if (ubi_num >= UBI_MAX_DEVICES || !ubi)
117 goto out_put;
118 113
119 err = -EINVAL;
120 if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
121 goto out_put;
122 if (mode != UBI_READONLY && mode != UBI_READWRITE && 114 if (mode != UBI_READONLY && mode != UBI_READWRITE &&
123 mode != UBI_EXCLUSIVE) 115 mode != UBI_EXCLUSIVE)
124 goto out_put; 116 return ERR_PTR(-EINVAL);
117
118 /*
119 * First of all, we have to get the UBI device to prevent its removal.
120 */
121 ubi = ubi_get_device(ubi_num);
122 if (!ubi)
123 return ERR_PTR(-ENODEV);
124
125 if (vol_id < 0 || vol_id >= ubi->vtbl_slots) {
126 err = -EINVAL;
127 goto out_put_ubi;
128 }
125 129
126 desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL); 130 desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL);
127 if (!desc) { 131 if (!desc) {
128 err = -ENOMEM; 132 err = -ENOMEM;
129 goto out_put; 133 goto out_put_ubi;
130 } 134 }
131 135
136 err = -ENODEV;
137 if (!try_module_get(THIS_MODULE))
138 goto out_free;
139
132 spin_lock(&ubi->volumes_lock); 140 spin_lock(&ubi->volumes_lock);
133 vol = ubi->volumes[vol_id]; 141 vol = ubi->volumes[vol_id];
134 if (!vol) { 142 if (!vol)
135 err = -ENODEV;
136 goto out_unlock; 143 goto out_unlock;
137 }
138 144
139 err = -EBUSY; 145 err = -EBUSY;
140 switch (mode) { 146 switch (mode) {
@@ -156,21 +162,19 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
156 vol->exclusive = 1; 162 vol->exclusive = 1;
157 break; 163 break;
158 } 164 }
165 get_device(&vol->dev);
166 vol->ref_count += 1;
159 spin_unlock(&ubi->volumes_lock); 167 spin_unlock(&ubi->volumes_lock);
160 168
161 desc->vol = vol; 169 desc->vol = vol;
162 desc->mode = mode; 170 desc->mode = mode;
163 171
164 /* 172 mutex_lock(&ubi->ckvol_mutex);
165 * To prevent simultaneous checks of the same volume we use @vtbl_mutex,
166 * although it is not the purpose it was introduced for.
167 */
168 mutex_lock(&ubi->vtbl_mutex);
169 if (!vol->checked) { 173 if (!vol->checked) {
170 /* This is the first open - check the volume */ 174 /* This is the first open - check the volume */
171 err = ubi_check_volume(ubi, vol_id); 175 err = ubi_check_volume(ubi, vol_id);
172 if (err < 0) { 176 if (err < 0) {
173 mutex_unlock(&ubi->vtbl_mutex); 177 mutex_unlock(&ubi->ckvol_mutex);
174 ubi_close_volume(desc); 178 ubi_close_volume(desc);
175 return ERR_PTR(err); 179 return ERR_PTR(err);
176 } 180 }
@@ -181,14 +185,17 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
181 } 185 }
182 vol->checked = 1; 186 vol->checked = 1;
183 } 187 }
184 mutex_unlock(&ubi->vtbl_mutex); 188 mutex_unlock(&ubi->ckvol_mutex);
189
185 return desc; 190 return desc;
186 191
187out_unlock: 192out_unlock:
188 spin_unlock(&ubi->volumes_lock); 193 spin_unlock(&ubi->volumes_lock);
189 kfree(desc);
190out_put:
191 module_put(THIS_MODULE); 194 module_put(THIS_MODULE);
195out_free:
196 kfree(desc);
197out_put_ubi:
198 ubi_put_device(ubi);
192 return ERR_PTR(err); 199 return ERR_PTR(err);
193} 200}
194EXPORT_SYMBOL_GPL(ubi_open_volume); 201EXPORT_SYMBOL_GPL(ubi_open_volume);
@@ -205,8 +212,8 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
205 int mode) 212 int mode)
206{ 213{
207 int i, vol_id = -1, len; 214 int i, vol_id = -1, len;
208 struct ubi_volume_desc *ret;
209 struct ubi_device *ubi; 215 struct ubi_device *ubi;
216 struct ubi_volume_desc *ret;
210 217
211 dbg_msg("open volume %s, mode %d", name, mode); 218 dbg_msg("open volume %s, mode %d", name, mode);
212 219
@@ -217,14 +224,12 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
217 if (len > UBI_VOL_NAME_MAX) 224 if (len > UBI_VOL_NAME_MAX)
218 return ERR_PTR(-EINVAL); 225 return ERR_PTR(-EINVAL);
219 226
220 ret = ERR_PTR(-ENODEV); 227 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
221 if (!try_module_get(THIS_MODULE)) 228 return ERR_PTR(-EINVAL);
222 return ret;
223
224 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || !ubi_devices[ubi_num])
225 goto out_put;
226 229
227 ubi = ubi_devices[ubi_num]; 230 ubi = ubi_get_device(ubi_num);
231 if (!ubi)
232 return ERR_PTR(-ENODEV);
228 233
229 spin_lock(&ubi->volumes_lock); 234 spin_lock(&ubi->volumes_lock);
230 /* Walk all volumes of this UBI device */ 235 /* Walk all volumes of this UBI device */
@@ -238,13 +243,16 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
238 } 243 }
239 spin_unlock(&ubi->volumes_lock); 244 spin_unlock(&ubi->volumes_lock);
240 245
241 if (vol_id < 0) 246 if (vol_id >= 0)
242 goto out_put; 247 ret = ubi_open_volume(ubi_num, vol_id, mode);
248 else
249 ret = ERR_PTR(-ENODEV);
243 250
244 ret = ubi_open_volume(ubi_num, vol_id, mode); 251 /*
245 252 * We should put the UBI device even in case of success, because
246out_put: 253 * 'ubi_open_volume()' took a reference as well.
247 module_put(THIS_MODULE); 254 */
255 ubi_put_device(ubi);
248 return ret; 256 return ret;
249} 257}
250EXPORT_SYMBOL_GPL(ubi_open_volume_nm); 258EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
@@ -256,10 +264,11 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
256void ubi_close_volume(struct ubi_volume_desc *desc) 264void ubi_close_volume(struct ubi_volume_desc *desc)
257{ 265{
258 struct ubi_volume *vol = desc->vol; 266 struct ubi_volume *vol = desc->vol;
267 struct ubi_device *ubi = vol->ubi;
259 268
260 dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode); 269 dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode);
261 270
262 spin_lock(&vol->ubi->volumes_lock); 271 spin_lock(&ubi->volumes_lock);
263 switch (desc->mode) { 272 switch (desc->mode) {
264 case UBI_READONLY: 273 case UBI_READONLY:
265 vol->readers -= 1; 274 vol->readers -= 1;
@@ -270,9 +279,12 @@ void ubi_close_volume(struct ubi_volume_desc *desc)
270 case UBI_EXCLUSIVE: 279 case UBI_EXCLUSIVE:
271 vol->exclusive = 0; 280 vol->exclusive = 0;
272 } 281 }
273 spin_unlock(&vol->ubi->volumes_lock); 282 vol->ref_count -= 1;
283 spin_unlock(&ubi->volumes_lock);
274 284
275 kfree(desc); 285 kfree(desc);
286 put_device(&vol->dev);
287 ubi_put_device(ubi);
276 module_put(THIS_MODULE); 288 module_put(THIS_MODULE);
277} 289}
278EXPORT_SYMBOL_GPL(ubi_close_volume); 290EXPORT_SYMBOL_GPL(ubi_close_volume);
@@ -332,7 +344,7 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
332 if (len == 0) 344 if (len == 0)
333 return 0; 345 return 0;
334 346
335 err = ubi_eba_read_leb(ubi, vol_id, lnum, buf, offset, len, check); 347 err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check);
336 if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) { 348 if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) {
337 ubi_warn("mark volume %d as corrupted", vol_id); 349 ubi_warn("mark volume %d as corrupted", vol_id);
338 vol->corrupted = 1; 350 vol->corrupted = 1;
@@ -399,7 +411,7 @@ int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
399 if (len == 0) 411 if (len == 0)
400 return 0; 412 return 0;
401 413
402 return ubi_eba_write_leb(ubi, vol_id, lnum, buf, offset, len, dtype); 414 return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len, dtype);
403} 415}
404EXPORT_SYMBOL_GPL(ubi_leb_write); 416EXPORT_SYMBOL_GPL(ubi_leb_write);
405 417
@@ -448,7 +460,7 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
448 if (len == 0) 460 if (len == 0)
449 return 0; 461 return 0;
450 462
451 return ubi_eba_atomic_leb_change(ubi, vol_id, lnum, buf, len, dtype); 463 return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len, dtype);
452} 464}
453EXPORT_SYMBOL_GPL(ubi_leb_change); 465EXPORT_SYMBOL_GPL(ubi_leb_change);
454 466
@@ -468,9 +480,9 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
468{ 480{
469 struct ubi_volume *vol = desc->vol; 481 struct ubi_volume *vol = desc->vol;
470 struct ubi_device *ubi = vol->ubi; 482 struct ubi_device *ubi = vol->ubi;
471 int err, vol_id = vol->vol_id; 483 int err;
472 484
473 dbg_msg("erase LEB %d:%d", vol_id, lnum); 485 dbg_msg("erase LEB %d:%d", vol->vol_id, lnum);
474 486
475 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) 487 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
476 return -EROFS; 488 return -EROFS;
@@ -481,7 +493,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
481 if (vol->upd_marker) 493 if (vol->upd_marker)
482 return -EBADF; 494 return -EBADF;
483 495
484 err = ubi_eba_unmap_leb(ubi, vol_id, lnum); 496 err = ubi_eba_unmap_leb(ubi, vol, lnum);
485 if (err) 497 if (err)
486 return err; 498 return err;
487 499
@@ -529,9 +541,8 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
529{ 541{
530 struct ubi_volume *vol = desc->vol; 542 struct ubi_volume *vol = desc->vol;
531 struct ubi_device *ubi = vol->ubi; 543 struct ubi_device *ubi = vol->ubi;
532 int vol_id = vol->vol_id;
533 544
534 dbg_msg("unmap LEB %d:%d", vol_id, lnum); 545 dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum);
535 546
536 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) 547 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
537 return -EROFS; 548 return -EROFS;
@@ -542,11 +553,55 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
542 if (vol->upd_marker) 553 if (vol->upd_marker)
543 return -EBADF; 554 return -EBADF;
544 555
545 return ubi_eba_unmap_leb(ubi, vol_id, lnum); 556 return ubi_eba_unmap_leb(ubi, vol, lnum);
546} 557}
547EXPORT_SYMBOL_GPL(ubi_leb_unmap); 558EXPORT_SYMBOL_GPL(ubi_leb_unmap);
548 559
549/** 560/**
561 * ubi_leb_map - map logical erasblock to a physical eraseblock.
562 * @desc: volume descriptor
563 * @lnum: logical eraseblock number
564 * @dtype: expected data type
565 *
566 * This function maps an un-mapped logical eraseblock @lnum to a physical
567 * eraseblock. This means, that after a successfull invocation of this
568 * function the logical eraseblock @lnum will be empty (contain only %0xFF
569 * bytes) and be mapped to a physical eraseblock, even if an unclean reboot
570 * happens.
571 *
572 * This function returns zero in case of success, %-EBADF if the volume is
573 * damaged because of an interrupted update, %-EBADMSG if the logical
574 * eraseblock is already mapped, and other negative error codes in case of
575 * other failures.
576 */
577int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
578{
579 struct ubi_volume *vol = desc->vol;
580 struct ubi_device *ubi = vol->ubi;
581
582 dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum);
583
584 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
585 return -EROFS;
586
587 if (lnum < 0 || lnum >= vol->reserved_pebs)
588 return -EINVAL;
589
590 if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
591 dtype != UBI_UNKNOWN)
592 return -EINVAL;
593
594 if (vol->upd_marker)
595 return -EBADF;
596
597 if (vol->eba_tbl[lnum] >= 0)
598 return -EBADMSG;
599
600 return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
601}
602EXPORT_SYMBOL_GPL(ubi_leb_map);
603
604/**
550 * ubi_is_mapped - check if logical eraseblock is mapped. 605 * ubi_is_mapped - check if logical eraseblock is mapped.
551 * @desc: volume descriptor 606 * @desc: volume descriptor
552 * @lnum: logical eraseblock number 607 * @lnum: logical eraseblock number
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index 9e2338c8e2cf..93e052812012 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -79,7 +79,7 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
79 else 79 else
80 size = vol->usable_leb_size; 80 size = vol->usable_leb_size;
81 81
82 err = ubi_eba_read_leb(ubi, vol_id, i, buf, 0, size, 1); 82 err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1);
83 if (err) { 83 if (err) {
84 if (err == -EBADMSG) 84 if (err == -EBADMSG)
85 err = 1; 85 err = 1;
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index c7b0afc9d280..05aa3e7daba1 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -286,9 +286,14 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
286 * FIXME: but this is anyway obsolete and will be removed at 286 * FIXME: but this is anyway obsolete and will be removed at
287 * some point. 287 * some point.
288 */ 288 */
289
290 dbg_bld("using old crappy leb_ver stuff"); 289 dbg_bld("using old crappy leb_ver stuff");
291 290
291 if (v1 == v2) {
292 ubi_err("PEB %d and PEB %d have the same version %lld",
293 seb->pnum, pnum, v1);
294 return -EINVAL;
295 }
296
292 abs = v1 - v2; 297 abs = v1 - v2;
293 if (abs < 0) 298 if (abs < 0)
294 abs = -abs; 299 abs = -abs;
@@ -390,7 +395,6 @@ out_free_buf:
390 vfree(buf); 395 vfree(buf);
391out_free_vidh: 396out_free_vidh:
392 ubi_free_vid_hdr(ubi, vh); 397 ubi_free_vid_hdr(ubi, vh);
393 ubi_assert(err < 0);
394 return err; 398 return err;
395} 399}
396 400
@@ -769,7 +773,7 @@ struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
769 */ 773 */
770static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum) 774static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum)
771{ 775{
772 long long ec; 776 long long uninitialized_var(ec);
773 int err, bitflips = 0, vol_id, ec_corr = 0; 777 int err, bitflips = 0, vol_id, ec_corr = 0;
774 778
775 dbg_bld("scan PEB %d", pnum); 779 dbg_bld("scan PEB %d", pnum);
@@ -854,7 +858,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum
854 } 858 }
855 859
856 vol_id = be32_to_cpu(vidh->vol_id); 860 vol_id = be32_to_cpu(vidh->vol_id);
857 if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOL_ID) { 861 if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
858 int lnum = be32_to_cpu(vidh->lnum); 862 int lnum = be32_to_cpu(vidh->lnum);
859 863
860 /* Unsupported internal volume */ 864 /* Unsupported internal volume */
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 5e941a633030..457710615261 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -94,8 +94,43 @@ enum {
94 UBI_IO_BITFLIPS 94 UBI_IO_BITFLIPS
95}; 95};
96 96
97extern int ubi_devices_cnt; 97/**
98extern struct ubi_device *ubi_devices[]; 98 * struct ubi_wl_entry - wear-leveling entry.
99 * @rb: link in the corresponding RB-tree
100 * @ec: erase counter
101 * @pnum: physical eraseblock number
102 *
103 * This data structure is used in the WL unit. Each physical eraseblock has a
104 * corresponding &struct wl_entry object which may be kept in different
105 * RB-trees. See WL unit for details.
106 */
107struct ubi_wl_entry {
108 struct rb_node rb;
109 int ec;
110 int pnum;
111};
112
113/**
114 * struct ubi_ltree_entry - an entry in the lock tree.
115 * @rb: links RB-tree nodes
116 * @vol_id: volume ID of the locked logical eraseblock
117 * @lnum: locked logical eraseblock number
118 * @users: how many tasks are using this logical eraseblock or wait for it
119 * @mutex: read/write mutex to implement read/write access serialization to
120 * the (@vol_id, @lnum) logical eraseblock
121 *
122 * This data structure is used in the EBA unit to implement per-LEB locking.
123 * When a logical eraseblock is being locked - corresponding
124 * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree).
125 * See EBA unit for details.
126 */
127struct ubi_ltree_entry {
128 struct rb_node rb;
129 int vol_id;
130 int lnum;
131 int users;
132 struct rw_semaphore mutex;
133};
99 134
100struct ubi_volume_desc; 135struct ubi_volume_desc;
101 136
@@ -105,11 +140,10 @@ struct ubi_volume_desc;
105 * @cdev: character device object to create character device 140 * @cdev: character device object to create character device
106 * @ubi: reference to the UBI device description object 141 * @ubi: reference to the UBI device description object
107 * @vol_id: volume ID 142 * @vol_id: volume ID
143 * @ref_count: volume reference count
108 * @readers: number of users holding this volume in read-only mode 144 * @readers: number of users holding this volume in read-only mode
109 * @writers: number of users holding this volume in read-write mode 145 * @writers: number of users holding this volume in read-write mode
110 * @exclusive: whether somebody holds this volume in exclusive mode 146 * @exclusive: whether somebody holds this volume in exclusive mode
111 * @removed: if the volume was removed
112 * @checked: if this static volume was checked
113 * 147 *
114 * @reserved_pebs: how many physical eraseblocks are reserved for this volume 148 * @reserved_pebs: how many physical eraseblocks are reserved for this volume
115 * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) 149 * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
@@ -117,21 +151,30 @@ struct ubi_volume_desc;
117 * @used_ebs: how many logical eraseblocks in this volume contain data 151 * @used_ebs: how many logical eraseblocks in this volume contain data
118 * @last_eb_bytes: how many bytes are stored in the last logical eraseblock 152 * @last_eb_bytes: how many bytes are stored in the last logical eraseblock
119 * @used_bytes: how many bytes of data this volume contains 153 * @used_bytes: how many bytes of data this volume contains
120 * @upd_marker: non-zero if the update marker is set for this volume
121 * @corrupted: non-zero if the volume is corrupted (static volumes only)
122 * @alignment: volume alignment 154 * @alignment: volume alignment
123 * @data_pad: how many bytes are not used at the end of physical eraseblocks to 155 * @data_pad: how many bytes are not used at the end of physical eraseblocks to
124 * satisfy the requested alignment 156 * satisfy the requested alignment
125 * @name_len: volume name length 157 * @name_len: volume name length
126 * @name: volume name 158 * @name: volume name
127 * 159 *
128 * @updating: whether the volume is being updated
129 * @upd_ebs: how many eraseblocks are expected to be updated 160 * @upd_ebs: how many eraseblocks are expected to be updated
130 * @upd_bytes: how many bytes are expected to be received 161 * @ch_lnum: LEB number which is being changing by the atomic LEB change
131 * @upd_received: how many update bytes were already received 162 * operation
132 * @upd_buf: update buffer which is used to collect update data 163 * @ch_dtype: data persistency type which is being changing by the atomic LEB
164 * change operation
165 * @upd_bytes: how many bytes are expected to be received for volume update or
166 * atomic LEB change
167 * @upd_received: how many bytes were already received for volume update or
168 * atomic LEB change
169 * @upd_buf: update buffer which is used to collect update data or data for
170 * atomic LEB change
133 * 171 *
134 * @eba_tbl: EBA table of this volume (LEB->PEB mapping) 172 * @eba_tbl: EBA table of this volume (LEB->PEB mapping)
173 * @checked: %1 if this static volume was checked
174 * @corrupted: %1 if the volume is corrupted (static volumes only)
175 * @upd_marker: %1 if the update marker is set for this volume
176 * @updating: %1 if the volume is being updated
177 * @changing_leb: %1 if the atomic LEB change ioctl command is in progress
135 * 178 *
136 * @gluebi_desc: gluebi UBI volume descriptor 179 * @gluebi_desc: gluebi UBI volume descriptor
137 * @gluebi_refcount: reference count of the gluebi MTD device 180 * @gluebi_refcount: reference count of the gluebi MTD device
@@ -150,11 +193,10 @@ struct ubi_volume {
150 struct cdev cdev; 193 struct cdev cdev;
151 struct ubi_device *ubi; 194 struct ubi_device *ubi;
152 int vol_id; 195 int vol_id;
196 int ref_count;
153 int readers; 197 int readers;
154 int writers; 198 int writers;
155 int exclusive; 199 int exclusive;
156 int removed;
157 int checked;
158 200
159 int reserved_pebs; 201 int reserved_pebs;
160 int vol_type; 202 int vol_type;
@@ -162,23 +204,31 @@ struct ubi_volume {
162 int used_ebs; 204 int used_ebs;
163 int last_eb_bytes; 205 int last_eb_bytes;
164 long long used_bytes; 206 long long used_bytes;
165 int upd_marker;
166 int corrupted;
167 int alignment; 207 int alignment;
168 int data_pad; 208 int data_pad;
169 int name_len; 209 int name_len;
170 char name[UBI_VOL_NAME_MAX+1]; 210 char name[UBI_VOL_NAME_MAX+1];
171 211
172 int updating;
173 int upd_ebs; 212 int upd_ebs;
213 int ch_lnum;
214 int ch_dtype;
174 long long upd_bytes; 215 long long upd_bytes;
175 long long upd_received; 216 long long upd_received;
176 void *upd_buf; 217 void *upd_buf;
177 218
178 int *eba_tbl; 219 int *eba_tbl;
220 int checked:1;
221 int corrupted:1;
222 int upd_marker:1;
223 int updating:1;
224 int changing_leb:1;
179 225
180#ifdef CONFIG_MTD_UBI_GLUEBI 226#ifdef CONFIG_MTD_UBI_GLUEBI
181 /* Gluebi-related stuff may be compiled out */ 227 /*
228 * Gluebi-related stuff may be compiled out.
229 * TODO: this should not be built into UBI but should be a separate
230 * ubimtd driver which works on top of UBI and emulates MTD devices.
231 */
182 struct ubi_volume_desc *gluebi_desc; 232 struct ubi_volume_desc *gluebi_desc;
183 int gluebi_refcount; 233 int gluebi_refcount;
184 struct mtd_info gluebi_mtd; 234 struct mtd_info gluebi_mtd;
@@ -200,28 +250,31 @@ struct ubi_wl_entry;
200 250
201/** 251/**
202 * struct ubi_device - UBI device description structure 252 * struct ubi_device - UBI device description structure
203 * @dev: class device object to use the the Linux device model 253 * @dev: UBI device object to use the the Linux device model
204 * @cdev: character device object to create character device 254 * @cdev: character device object to create character device
205 * @ubi_num: UBI device number 255 * @ubi_num: UBI device number
206 * @ubi_name: UBI device name 256 * @ubi_name: UBI device name
207 * @major: character device major number
208 * @vol_count: number of volumes in this UBI device 257 * @vol_count: number of volumes in this UBI device
209 * @volumes: volumes of this UBI device 258 * @volumes: volumes of this UBI device
210 * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs, 259 * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs,
211 * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count, @vol->readers, 260 * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count,
212 * @vol->writers, @vol->exclusive, @vol->removed, @vol->mapping and 261 * @vol->readers, @vol->writers, @vol->exclusive,
213 * @vol->eba_tbl. 262 * @vol->ref_count, @vol->mapping and @vol->eba_tbl.
263 * @ref_count: count of references on the UBI device
214 * 264 *
215 * @rsvd_pebs: count of reserved physical eraseblocks 265 * @rsvd_pebs: count of reserved physical eraseblocks
216 * @avail_pebs: count of available physical eraseblocks 266 * @avail_pebs: count of available physical eraseblocks
217 * @beb_rsvd_pebs: how many physical eraseblocks are reserved for bad PEB 267 * @beb_rsvd_pebs: how many physical eraseblocks are reserved for bad PEB
218 * handling 268 * handling
219 * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling 269 * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling
220 * 270 *
271 * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end
272 * of UBI ititializetion
221 * @vtbl_slots: how many slots are available in the volume table 273 * @vtbl_slots: how many slots are available in the volume table
222 * @vtbl_size: size of the volume table in bytes 274 * @vtbl_size: size of the volume table in bytes
223 * @vtbl: in-RAM volume table copy 275 * @vtbl: in-RAM volume table copy
224 * @vtbl_mutex: protects on-flash volume table 276 * @volumes_mutex: protects on-flash volume table and serializes volume
277 * changes, like creation, deletion, update, resize
225 * 278 *
226 * @max_ec: current highest erase counter value 279 * @max_ec: current highest erase counter value
227 * @mean_ec: current mean erase counter value 280 * @mean_ec: current mean erase counter value
@@ -238,15 +291,15 @@ struct ubi_wl_entry;
238 * @prot.pnum: protection tree indexed by physical eraseblock numbers 291 * @prot.pnum: protection tree indexed by physical eraseblock numbers
239 * @prot.aec: protection tree indexed by absolute erase counter value 292 * @prot.aec: protection tree indexed by absolute erase counter value
240 * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from, 293 * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from,
241 * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works 294 * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works
242 * fields 295 * fields
296 * @move_mutex: serializes eraseblock moves
243 * @wl_scheduled: non-zero if the wear-leveling was scheduled 297 * @wl_scheduled: non-zero if the wear-leveling was scheduled
244 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any 298 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
245 * physical eraseblock 299 * physical eraseblock
246 * @abs_ec: absolute erase counter 300 * @abs_ec: absolute erase counter
247 * @move_from: physical eraseblock from where the data is being moved 301 * @move_from: physical eraseblock from where the data is being moved
248 * @move_to: physical eraseblock where the data is being moved to 302 * @move_to: physical eraseblock where the data is being moved to
249 * @move_from_put: if the "from" PEB was put
250 * @move_to_put: if the "to" PEB was put 303 * @move_to_put: if the "to" PEB was put
251 * @works: list of pending works 304 * @works: list of pending works
252 * @works_count: count of pending works 305 * @works_count: count of pending works
@@ -273,13 +326,13 @@ struct ubi_wl_entry;
273 * @hdrs_min_io_size 326 * @hdrs_min_io_size
274 * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset 327 * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset
275 * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or 328 * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or
276 * not 329 * not
277 * @mtd: MTD device descriptor 330 * @mtd: MTD device descriptor
278 * 331 *
279 * @peb_buf1: a buffer of PEB size used for different purposes 332 * @peb_buf1: a buffer of PEB size used for different purposes
280 * @peb_buf2: another buffer of PEB size used for different purposes 333 * @peb_buf2: another buffer of PEB size used for different purposes
281 * @buf_mutex: proptects @peb_buf1 and @peb_buf2 334 * @buf_mutex: proptects @peb_buf1 and @peb_buf2
282 * @dbg_peb_buf: buffer of PEB size used for debugging 335 * @dbg_peb_buf: buffer of PEB size used for debugging
283 * @dbg_buf_mutex: proptects @dbg_peb_buf 336 * @dbg_buf_mutex: proptects @dbg_peb_buf
284 */ 337 */
285struct ubi_device { 338struct ubi_device {
@@ -287,22 +340,24 @@ struct ubi_device {
287 struct device dev; 340 struct device dev;
288 int ubi_num; 341 int ubi_num;
289 char ubi_name[sizeof(UBI_NAME_STR)+5]; 342 char ubi_name[sizeof(UBI_NAME_STR)+5];
290 int major;
291 int vol_count; 343 int vol_count;
292 struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT]; 344 struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT];
293 spinlock_t volumes_lock; 345 spinlock_t volumes_lock;
346 int ref_count;
294 347
295 int rsvd_pebs; 348 int rsvd_pebs;
296 int avail_pebs; 349 int avail_pebs;
297 int beb_rsvd_pebs; 350 int beb_rsvd_pebs;
298 int beb_rsvd_level; 351 int beb_rsvd_level;
299 352
353 int autoresize_vol_id;
300 int vtbl_slots; 354 int vtbl_slots;
301 int vtbl_size; 355 int vtbl_size;
302 struct ubi_vtbl_record *vtbl; 356 struct ubi_vtbl_record *vtbl;
303 struct mutex vtbl_mutex; 357 struct mutex volumes_mutex;
304 358
305 int max_ec; 359 int max_ec;
360 /* TODO: mean_ec is not updated run-time, fix */
306 int mean_ec; 361 int mean_ec;
307 362
308 /* EBA unit's stuff */ 363 /* EBA unit's stuff */
@@ -320,12 +375,13 @@ struct ubi_device {
320 struct rb_root aec; 375 struct rb_root aec;
321 } prot; 376 } prot;
322 spinlock_t wl_lock; 377 spinlock_t wl_lock;
378 struct mutex move_mutex;
379 struct rw_semaphore work_sem;
323 int wl_scheduled; 380 int wl_scheduled;
324 struct ubi_wl_entry **lookuptbl; 381 struct ubi_wl_entry **lookuptbl;
325 unsigned long long abs_ec; 382 unsigned long long abs_ec;
326 struct ubi_wl_entry *move_from; 383 struct ubi_wl_entry *move_from;
327 struct ubi_wl_entry *move_to; 384 struct ubi_wl_entry *move_to;
328 int move_from_put;
329 int move_to_put; 385 int move_to_put;
330 struct list_head works; 386 struct list_head works;
331 int works_count; 387 int works_count;
@@ -355,15 +411,19 @@ struct ubi_device {
355 void *peb_buf1; 411 void *peb_buf1;
356 void *peb_buf2; 412 void *peb_buf2;
357 struct mutex buf_mutex; 413 struct mutex buf_mutex;
414 struct mutex ckvol_mutex;
358#ifdef CONFIG_MTD_UBI_DEBUG 415#ifdef CONFIG_MTD_UBI_DEBUG
359 void *dbg_peb_buf; 416 void *dbg_peb_buf;
360 struct mutex dbg_buf_mutex; 417 struct mutex dbg_buf_mutex;
361#endif 418#endif
362}; 419};
363 420
421extern struct kmem_cache *ubi_wl_entry_slab;
422extern struct file_operations ubi_ctrl_cdev_operations;
364extern struct file_operations ubi_cdev_operations; 423extern struct file_operations ubi_cdev_operations;
365extern struct file_operations ubi_vol_cdev_operations; 424extern struct file_operations ubi_vol_cdev_operations;
366extern struct class *ubi_class; 425extern struct class *ubi_class;
426extern struct mutex ubi_devices_mutex;
367 427
368/* vtbl.c */ 428/* vtbl.c */
369int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, 429int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
@@ -374,13 +434,18 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si);
374int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req); 434int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req);
375int ubi_remove_volume(struct ubi_volume_desc *desc); 435int ubi_remove_volume(struct ubi_volume_desc *desc);
376int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs); 436int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs);
377int ubi_add_volume(struct ubi_device *ubi, int vol_id); 437int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol);
378void ubi_free_volume(struct ubi_device *ubi, int vol_id); 438void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol);
379 439
380/* upd.c */ 440/* upd.c */
381int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes); 441int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
382int ubi_more_update_data(struct ubi_device *ubi, int vol_id, 442 long long bytes);
443int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
383 const void __user *buf, int count); 444 const void __user *buf, int count);
445int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
446 const struct ubi_leb_change_req *req);
447int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
448 const void __user *buf, int count);
384 449
385/* misc.c */ 450/* misc.c */
386int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length); 451int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length);
@@ -399,16 +464,17 @@ void ubi_gluebi_updated(struct ubi_volume *vol);
399#endif 464#endif
400 465
401/* eba.c */ 466/* eba.c */
402int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum); 467int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
403int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, 468 int lnum);
404 int offset, int len, int check); 469int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
405int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum, 470 void *buf, int offset, int len, int check);
471int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
406 const void *buf, int offset, int len, int dtype); 472 const void *buf, int offset, int len, int dtype);
407int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum, 473int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
408 const void *buf, int len, int dtype, 474 int lnum, const void *buf, int len, int dtype,
409 int used_ebs); 475 int used_ebs);
410int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, 476int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
411 const void *buf, int len, int dtype); 477 int lnum, const void *buf, int len, int dtype);
412int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 478int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
413 struct ubi_vid_hdr *vid_hdr); 479 struct ubi_vid_hdr *vid_hdr);
414int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); 480int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
@@ -421,6 +487,7 @@ int ubi_wl_flush(struct ubi_device *ubi);
421int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum); 487int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
422int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); 488int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
423void ubi_wl_close(struct ubi_device *ubi); 489void ubi_wl_close(struct ubi_device *ubi);
490int ubi_thread(void *u);
424 491
425/* io.c */ 492/* io.c */
426int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, 493int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
@@ -439,6 +506,14 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
439int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, 506int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
440 struct ubi_vid_hdr *vid_hdr); 507 struct ubi_vid_hdr *vid_hdr);
441 508
509/* build.c */
510int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset);
511int ubi_detach_mtd_dev(int ubi_num, int anyway);
512struct ubi_device *ubi_get_device(int ubi_num);
513void ubi_put_device(struct ubi_device *ubi);
514struct ubi_device *ubi_get_by_major(int major);
515int ubi_major2num(int major);
516
442/* 517/*
443 * ubi_rb_for_each_entry - walk an RB-tree. 518 * ubi_rb_for_each_entry - walk an RB-tree.
444 * @rb: a pointer to type 'struct rb_node' to to use as a loop counter 519 * @rb: a pointer to type 'struct rb_node' to to use as a loop counter
@@ -523,8 +598,10 @@ static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf,
523 */ 598 */
524static inline void ubi_ro_mode(struct ubi_device *ubi) 599static inline void ubi_ro_mode(struct ubi_device *ubi)
525{ 600{
526 ubi->ro_mode = 1; 601 if (!ubi->ro_mode) {
527 ubi_warn("switch to read-only mode"); 602 ubi->ro_mode = 1;
603 ubi_warn("switch to read-only mode");
604 }
528} 605}
529 606
530/** 607/**
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 0efc586a8328..ddaa1a56cc69 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -22,7 +22,8 @@
22 */ 22 */
23 23
24/* 24/*
25 * This file contains implementation of the volume update functionality. 25 * This file contains implementation of the volume update and atomic LEB change
26 * functionality.
26 * 27 *
27 * The update operation is based on the per-volume update marker which is 28 * The update operation is based on the per-volume update marker which is
28 * stored in the volume table. The update marker is set before the update 29 * stored in the volume table. The update marker is set before the update
@@ -45,29 +46,31 @@
45/** 46/**
46 * set_update_marker - set update marker. 47 * set_update_marker - set update marker.
47 * @ubi: UBI device description object 48 * @ubi: UBI device description object
48 * @vol_id: volume ID 49 * @vol: volume description object
49 * 50 *
50 * This function sets the update marker flag for volume @vol_id. Returns zero 51 * This function sets the update marker flag for volume @vol. Returns zero
51 * in case of success and a negative error code in case of failure. 52 * in case of success and a negative error code in case of failure.
52 */ 53 */
53static int set_update_marker(struct ubi_device *ubi, int vol_id) 54static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol)
54{ 55{
55 int err; 56 int err;
56 struct ubi_vtbl_record vtbl_rec; 57 struct ubi_vtbl_record vtbl_rec;
57 struct ubi_volume *vol = ubi->volumes[vol_id];
58 58
59 dbg_msg("set update marker for volume %d", vol_id); 59 dbg_msg("set update marker for volume %d", vol->vol_id);
60 60
61 if (vol->upd_marker) { 61 if (vol->upd_marker) {
62 ubi_assert(ubi->vtbl[vol_id].upd_marker); 62 ubi_assert(ubi->vtbl[vol->vol_id].upd_marker);
63 dbg_msg("already set"); 63 dbg_msg("already set");
64 return 0; 64 return 0;
65 } 65 }
66 66
67 memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record)); 67 memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
68 sizeof(struct ubi_vtbl_record));
68 vtbl_rec.upd_marker = 1; 69 vtbl_rec.upd_marker = 1;
69 70
70 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); 71 mutex_lock(&ubi->volumes_mutex);
72 err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
73 mutex_unlock(&ubi->volumes_mutex);
71 vol->upd_marker = 1; 74 vol->upd_marker = 1;
72 return err; 75 return err;
73} 76}
@@ -75,23 +78,24 @@ static int set_update_marker(struct ubi_device *ubi, int vol_id)
75/** 78/**
76 * clear_update_marker - clear update marker. 79 * clear_update_marker - clear update marker.
77 * @ubi: UBI device description object 80 * @ubi: UBI device description object
78 * @vol_id: volume ID 81 * @vol: volume description object
79 * @bytes: new data size in bytes 82 * @bytes: new data size in bytes
80 * 83 *
81 * This function clears the update marker for volume @vol_id, sets new volume 84 * This function clears the update marker for volume @vol, sets new volume
82 * data size and clears the "corrupted" flag (static volumes only). Returns 85 * data size and clears the "corrupted" flag (static volumes only). Returns
83 * zero in case of success and a negative error code in case of failure. 86 * zero in case of success and a negative error code in case of failure.
84 */ 87 */
85static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long bytes) 88static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
89 long long bytes)
86{ 90{
87 int err; 91 int err;
88 uint64_t tmp; 92 uint64_t tmp;
89 struct ubi_vtbl_record vtbl_rec; 93 struct ubi_vtbl_record vtbl_rec;
90 struct ubi_volume *vol = ubi->volumes[vol_id];
91 94
92 dbg_msg("clear update marker for volume %d", vol_id); 95 dbg_msg("clear update marker for volume %d", vol->vol_id);
93 96
94 memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record)); 97 memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
98 sizeof(struct ubi_vtbl_record));
95 ubi_assert(vol->upd_marker && vtbl_rec.upd_marker); 99 ubi_assert(vol->upd_marker && vtbl_rec.upd_marker);
96 vtbl_rec.upd_marker = 0; 100 vtbl_rec.upd_marker = 0;
97 101
@@ -106,7 +110,9 @@ static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long byt
106 vol->last_eb_bytes = vol->usable_leb_size; 110 vol->last_eb_bytes = vol->usable_leb_size;
107 } 111 }
108 112
109 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); 113 mutex_lock(&ubi->volumes_mutex);
114 err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
115 mutex_unlock(&ubi->volumes_mutex);
110 vol->upd_marker = 0; 116 vol->upd_marker = 0;
111 return err; 117 return err;
112} 118}
@@ -114,35 +120,36 @@ static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long byt
114/** 120/**
115 * ubi_start_update - start volume update. 121 * ubi_start_update - start volume update.
116 * @ubi: UBI device description object 122 * @ubi: UBI device description object
117 * @vol_id: volume ID 123 * @vol: volume description object
118 * @bytes: update bytes 124 * @bytes: update bytes
119 * 125 *
120 * This function starts volume update operation. If @bytes is zero, the volume 126 * This function starts volume update operation. If @bytes is zero, the volume
121 * is just wiped out. Returns zero in case of success and a negative error code 127 * is just wiped out. Returns zero in case of success and a negative error code
122 * in case of failure. 128 * in case of failure.
123 */ 129 */
124int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes) 130int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
131 long long bytes)
125{ 132{
126 int i, err; 133 int i, err;
127 uint64_t tmp; 134 uint64_t tmp;
128 struct ubi_volume *vol = ubi->volumes[vol_id];
129 135
130 dbg_msg("start update of volume %d, %llu bytes", vol_id, bytes); 136 dbg_msg("start update of volume %d, %llu bytes", vol->vol_id, bytes);
137 ubi_assert(!vol->updating && !vol->changing_leb);
131 vol->updating = 1; 138 vol->updating = 1;
132 139
133 err = set_update_marker(ubi, vol_id); 140 err = set_update_marker(ubi, vol);
134 if (err) 141 if (err)
135 return err; 142 return err;
136 143
137 /* Before updating - wipe out the volume */ 144 /* Before updating - wipe out the volume */
138 for (i = 0; i < vol->reserved_pebs; i++) { 145 for (i = 0; i < vol->reserved_pebs; i++) {
139 err = ubi_eba_unmap_leb(ubi, vol_id, i); 146 err = ubi_eba_unmap_leb(ubi, vol, i);
140 if (err) 147 if (err)
141 return err; 148 return err;
142 } 149 }
143 150
144 if (bytes == 0) { 151 if (bytes == 0) {
145 err = clear_update_marker(ubi, vol_id, 0); 152 err = clear_update_marker(ubi, vol, 0);
146 if (err) 153 if (err)
147 return err; 154 return err;
148 err = ubi_wl_flush(ubi); 155 err = ubi_wl_flush(ubi);
@@ -163,9 +170,42 @@ int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes)
163} 170}
164 171
165/** 172/**
173 * ubi_start_leb_change - start atomic LEB change.
174 * @ubi: UBI device description object
175 * @vol: volume description object
176 * @req: operation request
177 *
178 * This function starts atomic LEB change operation. Returns zero in case of
179 * success and a negative error code in case of failure.
180 */
181int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
182 const struct ubi_leb_change_req *req)
183{
184 ubi_assert(!vol->updating && !vol->changing_leb);
185
186 dbg_msg("start changing LEB %d:%d, %u bytes",
187 vol->vol_id, req->lnum, req->bytes);
188 if (req->bytes == 0)
189 return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0,
190 req->dtype);
191
192 vol->upd_bytes = req->bytes;
193 vol->upd_received = 0;
194 vol->changing_leb = 1;
195 vol->ch_lnum = req->lnum;
196 vol->ch_dtype = req->dtype;
197
198 vol->upd_buf = vmalloc(req->bytes);
199 if (!vol->upd_buf)
200 return -ENOMEM;
201
202 return 0;
203}
204
205/**
166 * write_leb - write update data. 206 * write_leb - write update data.
167 * @ubi: UBI device description object 207 * @ubi: UBI device description object
168 * @vol_id: volume ID 208 * @vol: volume description object
169 * @lnum: logical eraseblock number 209 * @lnum: logical eraseblock number
170 * @buf: data to write 210 * @buf: data to write
171 * @len: data size 211 * @len: data size
@@ -191,26 +231,22 @@ int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes)
191 * This function returns zero in case of success and a negative error code in 231 * This function returns zero in case of success and a negative error code in
192 * case of failure. 232 * case of failure.
193 */ 233 */
194static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, 234static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
195 int len, int used_ebs) 235 void *buf, int len, int used_ebs)
196{ 236{
197 int err, l; 237 int err;
198 struct ubi_volume *vol = ubi->volumes[vol_id];
199 238
200 if (vol->vol_type == UBI_DYNAMIC_VOLUME) { 239 if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
201 l = ALIGN(len, ubi->min_io_size); 240 len = ALIGN(len, ubi->min_io_size);
202 memset(buf + len, 0xFF, l - len); 241 memset(buf + len, 0xFF, len - len);
203 242
204 l = ubi_calc_data_len(ubi, buf, l); 243 len = ubi_calc_data_len(ubi, buf, len);
205 if (l == 0) { 244 if (len == 0) {
206 dbg_msg("all %d bytes contain 0xFF - skip", len); 245 dbg_msg("all %d bytes contain 0xFF - skip", len);
207 return 0; 246 return 0;
208 } 247 }
209 if (len != l)
210 dbg_msg("skip last %d bytes (0xFF)", len - l);
211 248
212 err = ubi_eba_write_leb(ubi, vol_id, lnum, buf, 0, l, 249 err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len, UBI_UNKNOWN);
213 UBI_UNKNOWN);
214 } else { 250 } else {
215 /* 251 /*
216 * When writing static volume, and this is the last logical 252 * When writing static volume, and this is the last logical
@@ -222,7 +258,7 @@ static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
222 * contain zeros, not random trash. 258 * contain zeros, not random trash.
223 */ 259 */
224 memset(buf + len, 0, vol->usable_leb_size - len); 260 memset(buf + len, 0, vol->usable_leb_size - len);
225 err = ubi_eba_write_leb_st(ubi, vol_id, lnum, buf, len, 261 err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len,
226 UBI_UNKNOWN, used_ebs); 262 UBI_UNKNOWN, used_ebs);
227 } 263 }
228 264
@@ -236,16 +272,15 @@ static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
236 * @count: how much bytes to write 272 * @count: how much bytes to write
237 * 273 *
238 * This function writes more data to the volume which is being updated. It may 274 * This function writes more data to the volume which is being updated. It may
239 * be called arbitrary number of times until all of the update data arrive. 275 * be called arbitrary number of times until all the update data arriveis. This
240 * This function returns %0 in case of success, number of bytes written during 276 * function returns %0 in case of success, number of bytes written during the
241 * the last call if the whole volume update was successfully finished, and a 277 * last call if the whole volume update has been successfully finished, and a
242 * negative error code in case of failure. 278 * negative error code in case of failure.
243 */ 279 */
244int ubi_more_update_data(struct ubi_device *ubi, int vol_id, 280int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
245 const void __user *buf, int count) 281 const void __user *buf, int count)
246{ 282{
247 uint64_t tmp; 283 uint64_t tmp;
248 struct ubi_volume *vol = ubi->volumes[vol_id];
249 int lnum, offs, err = 0, len, to_write = count; 284 int lnum, offs, err = 0, len, to_write = count;
250 285
251 dbg_msg("write %d of %lld bytes, %lld already passed", 286 dbg_msg("write %d of %lld bytes, %lld already passed",
@@ -290,8 +325,8 @@ int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
290 * is the last chunk, it's time to flush the buffer. 325 * is the last chunk, it's time to flush the buffer.
291 */ 326 */
292 ubi_assert(flush_len <= vol->usable_leb_size); 327 ubi_assert(flush_len <= vol->usable_leb_size);
293 err = write_leb(ubi, vol_id, lnum, vol->upd_buf, 328 err = write_leb(ubi, vol, lnum, vol->upd_buf, flush_len,
294 flush_len, vol->upd_ebs); 329 vol->upd_ebs);
295 if (err) 330 if (err)
296 return err; 331 return err;
297 } 332 }
@@ -318,8 +353,8 @@ int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
318 353
319 if (len == vol->usable_leb_size || 354 if (len == vol->usable_leb_size ||
320 vol->upd_received + len == vol->upd_bytes) { 355 vol->upd_received + len == vol->upd_bytes) {
321 err = write_leb(ubi, vol_id, lnum, vol->upd_buf, len, 356 err = write_leb(ubi, vol, lnum, vol->upd_buf,
322 vol->upd_ebs); 357 len, vol->upd_ebs);
323 if (err) 358 if (err)
324 break; 359 break;
325 } 360 }
@@ -333,16 +368,70 @@ int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
333 ubi_assert(vol->upd_received <= vol->upd_bytes); 368 ubi_assert(vol->upd_received <= vol->upd_bytes);
334 if (vol->upd_received == vol->upd_bytes) { 369 if (vol->upd_received == vol->upd_bytes) {
335 /* The update is finished, clear the update marker */ 370 /* The update is finished, clear the update marker */
336 err = clear_update_marker(ubi, vol_id, vol->upd_bytes); 371 err = clear_update_marker(ubi, vol, vol->upd_bytes);
337 if (err) 372 if (err)
338 return err; 373 return err;
339 err = ubi_wl_flush(ubi); 374 err = ubi_wl_flush(ubi);
340 if (err == 0) { 375 if (err == 0) {
376 vol->updating = 0;
341 err = to_write; 377 err = to_write;
342 vfree(vol->upd_buf); 378 vfree(vol->upd_buf);
343 vol->updating = 0;
344 } 379 }
345 } 380 }
346 381
347 return err; 382 return err;
348} 383}
384
385/**
386 * ubi_more_leb_change_data - accept more data for atomic LEB change.
387 * @vol: volume description object
388 * @buf: write data (user-space memory buffer)
389 * @count: how much bytes to write
390 *
391 * This function accepts more data to the volume which is being under the
392 * "atomic LEB change" operation. It may be called arbitrary number of times
393 * until all data arrives. This function returns %0 in case of success, number
394 * of bytes written during the last call if the whole "atomic LEB change"
395 * operation has been successfully finished, and a negative error code in case
396 * of failure.
397 */
398int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
399 const void __user *buf, int count)
400{
401 int err;
402
403 dbg_msg("write %d of %lld bytes, %lld already passed",
404 count, vol->upd_bytes, vol->upd_received);
405
406 if (ubi->ro_mode)
407 return -EROFS;
408
409 if (vol->upd_received + count > vol->upd_bytes)
410 count = vol->upd_bytes - vol->upd_received;
411
412 err = copy_from_user(vol->upd_buf + vol->upd_received, buf, count);
413 if (err)
414 return -EFAULT;
415
416 vol->upd_received += count;
417
418 if (vol->upd_received == vol->upd_bytes) {
419 int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size);
420
421 memset(vol->upd_buf + vol->upd_bytes, 0xFF, len - vol->upd_bytes);
422 len = ubi_calc_data_len(ubi, vol->upd_buf, len);
423 err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum,
424 vol->upd_buf, len, UBI_UNKNOWN);
425 if (err)
426 return err;
427 }
428
429 ubi_assert(vol->upd_received <= vol->upd_bytes);
430 if (vol->upd_received == vol->upd_bytes) {
431 vol->changing_leb = 0;
432 err = count;
433 vfree(vol->upd_buf);
434 }
435
436 return err;
437}
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 88629a320c2b..a3ca2257e601 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -63,21 +63,30 @@ static struct device_attribute attr_vol_upd_marker =
63 * B. process 2 removes volume Y; 63 * B. process 2 removes volume Y;
64 * C. process 1 starts reading the /<sysfs>/class/ubi/ubiX_Y/reserved_ebs file; 64 * C. process 1 starts reading the /<sysfs>/class/ubi/ubiX_Y/reserved_ebs file;
65 * 65 *
66 * What we want to do in a situation like that is to return error when the file 66 * In this situation, this function will return %-ENODEV because it will find
67 * is read. This is done by means of the 'removed' flag and the 'vol_lock' of 67 * out that the volume was removed from the @ubi->volumes array.
68 * the UBI volume description object.
69 */ 68 */
70static ssize_t vol_attribute_show(struct device *dev, 69static ssize_t vol_attribute_show(struct device *dev,
71 struct device_attribute *attr, char *buf) 70 struct device_attribute *attr, char *buf)
72{ 71{
73 int ret; 72 int ret;
74 struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); 73 struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
74 struct ubi_device *ubi;
75 75
76 spin_lock(&vol->ubi->volumes_lock); 76 ubi = ubi_get_device(vol->ubi->ubi_num);
77 if (vol->removed) { 77 if (!ubi)
78 spin_unlock(&vol->ubi->volumes_lock); 78 return -ENODEV;
79
80 spin_lock(&ubi->volumes_lock);
81 if (!ubi->volumes[vol->vol_id]) {
82 spin_unlock(&ubi->volumes_lock);
83 ubi_put_device(ubi);
79 return -ENODEV; 84 return -ENODEV;
80 } 85 }
86 /* Take a reference to prevent volume removal */
87 vol->ref_count += 1;
88 spin_unlock(&ubi->volumes_lock);
89
81 if (attr == &attr_vol_reserved_ebs) 90 if (attr == &attr_vol_reserved_ebs)
82 ret = sprintf(buf, "%d\n", vol->reserved_pebs); 91 ret = sprintf(buf, "%d\n", vol->reserved_pebs);
83 else if (attr == &attr_vol_type) { 92 else if (attr == &attr_vol_type) {
@@ -94,15 +103,22 @@ static ssize_t vol_attribute_show(struct device *dev,
94 ret = sprintf(buf, "%d\n", vol->corrupted); 103 ret = sprintf(buf, "%d\n", vol->corrupted);
95 else if (attr == &attr_vol_alignment) 104 else if (attr == &attr_vol_alignment)
96 ret = sprintf(buf, "%d\n", vol->alignment); 105 ret = sprintf(buf, "%d\n", vol->alignment);
97 else if (attr == &attr_vol_usable_eb_size) { 106 else if (attr == &attr_vol_usable_eb_size)
98 ret = sprintf(buf, "%d\n", vol->usable_leb_size); 107 ret = sprintf(buf, "%d\n", vol->usable_leb_size);
99 } else if (attr == &attr_vol_data_bytes) 108 else if (attr == &attr_vol_data_bytes)
100 ret = sprintf(buf, "%lld\n", vol->used_bytes); 109 ret = sprintf(buf, "%lld\n", vol->used_bytes);
101 else if (attr == &attr_vol_upd_marker) 110 else if (attr == &attr_vol_upd_marker)
102 ret = sprintf(buf, "%d\n", vol->upd_marker); 111 ret = sprintf(buf, "%d\n", vol->upd_marker);
103 else 112 else
104 BUG(); 113 /* This must be a bug */
105 spin_unlock(&vol->ubi->volumes_lock); 114 ret = -EINVAL;
115
116 /* We've done the operation, drop volume and UBI device references */
117 spin_lock(&ubi->volumes_lock);
118 vol->ref_count -= 1;
119 ubi_assert(vol->ref_count >= 0);
120 spin_unlock(&ubi->volumes_lock);
121 ubi_put_device(ubi);
106 return ret; 122 return ret;
107} 123}
108 124
@@ -110,7 +126,7 @@ static ssize_t vol_attribute_show(struct device *dev,
110static void vol_release(struct device *dev) 126static void vol_release(struct device *dev)
111{ 127{
112 struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); 128 struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
113 ubi_assert(vol->removed); 129
114 kfree(vol); 130 kfree(vol);
115} 131}
116 132
@@ -152,9 +168,7 @@ static int volume_sysfs_init(struct ubi_device *ubi, struct ubi_volume *vol)
152 if (err) 168 if (err)
153 return err; 169 return err;
154 err = device_create_file(&vol->dev, &attr_vol_upd_marker); 170 err = device_create_file(&vol->dev, &attr_vol_upd_marker);
155 if (err) 171 return err;
156 return err;
157 return 0;
158} 172}
159 173
160/** 174/**
@@ -180,16 +194,18 @@ static void volume_sysfs_close(struct ubi_volume *vol)
180 * @req: volume creation request 194 * @req: volume creation request
181 * 195 *
182 * This function creates volume described by @req. If @req->vol_id id 196 * This function creates volume described by @req. If @req->vol_id id
183 * %UBI_VOL_NUM_AUTO, this function automatically assigne ID to the new volume 197 * %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume
184 * and saves it in @req->vol_id. Returns zero in case of success and a negative 198 * and saves it in @req->vol_id. Returns zero in case of success and a negative
185 * error code in case of failure. 199 * error code in case of failure. Note, the caller has to have the
200 * @ubi->volumes_mutex locked.
186 */ 201 */
187int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) 202int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
188{ 203{
189 int i, err, vol_id = req->vol_id; 204 int i, err, vol_id = req->vol_id, dont_free = 0;
190 struct ubi_volume *vol; 205 struct ubi_volume *vol;
191 struct ubi_vtbl_record vtbl_rec; 206 struct ubi_vtbl_record vtbl_rec;
192 uint64_t bytes; 207 uint64_t bytes;
208 dev_t dev;
193 209
194 if (ubi->ro_mode) 210 if (ubi->ro_mode)
195 return -EROFS; 211 return -EROFS;
@@ -199,7 +215,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
199 return -ENOMEM; 215 return -ENOMEM;
200 216
201 spin_lock(&ubi->volumes_lock); 217 spin_lock(&ubi->volumes_lock);
202
203 if (vol_id == UBI_VOL_NUM_AUTO) { 218 if (vol_id == UBI_VOL_NUM_AUTO) {
204 /* Find unused volume ID */ 219 /* Find unused volume ID */
205 dbg_msg("search for vacant volume ID"); 220 dbg_msg("search for vacant volume ID");
@@ -252,6 +267,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
252 } 267 }
253 ubi->avail_pebs -= vol->reserved_pebs; 268 ubi->avail_pebs -= vol->reserved_pebs;
254 ubi->rsvd_pebs += vol->reserved_pebs; 269 ubi->rsvd_pebs += vol->reserved_pebs;
270 spin_unlock(&ubi->volumes_lock);
255 271
256 vol->vol_id = vol_id; 272 vol->vol_id = vol_id;
257 vol->alignment = req->alignment; 273 vol->alignment = req->alignment;
@@ -259,10 +275,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
259 vol->vol_type = req->vol_type; 275 vol->vol_type = req->vol_type;
260 vol->name_len = req->name_len; 276 vol->name_len = req->name_len;
261 memcpy(vol->name, req->name, vol->name_len + 1); 277 memcpy(vol->name, req->name, vol->name_len + 1);
262 vol->exclusive = 1;
263 vol->ubi = ubi; 278 vol->ubi = ubi;
264 ubi->volumes[vol_id] = vol;
265 spin_unlock(&ubi->volumes_lock);
266 279
267 /* 280 /*
268 * Finish all pending erases because there may be some LEBs belonging 281 * Finish all pending erases because there may be some LEBs belonging
@@ -299,9 +312,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
299 /* Register character device for the volume */ 312 /* Register character device for the volume */
300 cdev_init(&vol->cdev, &ubi_vol_cdev_operations); 313 cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
301 vol->cdev.owner = THIS_MODULE; 314 vol->cdev.owner = THIS_MODULE;
302 err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol_id + 1), 1); 315 dev = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1);
316 err = cdev_add(&vol->cdev, dev, 1);
303 if (err) { 317 if (err) {
304 ubi_err("cannot add character device for volume %d", vol_id); 318 ubi_err("cannot add character device");
305 goto out_mapping; 319 goto out_mapping;
306 } 320 }
307 321
@@ -311,12 +325,15 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
311 325
312 vol->dev.release = vol_release; 326 vol->dev.release = vol_release;
313 vol->dev.parent = &ubi->dev; 327 vol->dev.parent = &ubi->dev;
314 vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1); 328 vol->dev.devt = dev;
315 vol->dev.class = ubi_class; 329 vol->dev.class = ubi_class;
330
316 sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); 331 sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
317 err = device_register(&vol->dev); 332 err = device_register(&vol->dev);
318 if (err) 333 if (err) {
334 ubi_err("cannot register device");
319 goto out_gluebi; 335 goto out_gluebi;
336 }
320 337
321 err = volume_sysfs_init(ubi, vol); 338 err = volume_sysfs_init(ubi, vol);
322 if (err) 339 if (err)
@@ -339,15 +356,27 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
339 goto out_sysfs; 356 goto out_sysfs;
340 357
341 spin_lock(&ubi->volumes_lock); 358 spin_lock(&ubi->volumes_lock);
359 ubi->volumes[vol_id] = vol;
342 ubi->vol_count += 1; 360 ubi->vol_count += 1;
343 vol->exclusive = 0;
344 spin_unlock(&ubi->volumes_lock); 361 spin_unlock(&ubi->volumes_lock);
345 362
346 paranoid_check_volumes(ubi); 363 paranoid_check_volumes(ubi);
347 return 0; 364 return 0;
348 365
366out_sysfs:
367 /*
368 * We have registered our device, we should not free the volume*
369 * description object in this function in case of an error - it is
370 * freed by the release function.
371 *
372 * Get device reference to prevent the release function from being
373 * called just after sysfs has been closed.
374 */
375 dont_free = 1;
376 get_device(&vol->dev);
377 volume_sysfs_close(vol);
349out_gluebi: 378out_gluebi:
350 err = ubi_destroy_gluebi(vol); 379 ubi_destroy_gluebi(vol);
351out_cdev: 380out_cdev:
352 cdev_del(&vol->cdev); 381 cdev_del(&vol->cdev);
353out_mapping: 382out_mapping:
@@ -356,26 +385,13 @@ out_acc:
356 spin_lock(&ubi->volumes_lock); 385 spin_lock(&ubi->volumes_lock);
357 ubi->rsvd_pebs -= vol->reserved_pebs; 386 ubi->rsvd_pebs -= vol->reserved_pebs;
358 ubi->avail_pebs += vol->reserved_pebs; 387 ubi->avail_pebs += vol->reserved_pebs;
359 ubi->volumes[vol_id] = NULL;
360out_unlock: 388out_unlock:
361 spin_unlock(&ubi->volumes_lock); 389 spin_unlock(&ubi->volumes_lock);
362 kfree(vol); 390 if (dont_free)
363 return err; 391 put_device(&vol->dev);
364 392 else
365 /* 393 kfree(vol);
366 * We are registered, so @vol is destroyed in the release function and 394 ubi_err("cannot create volume %d, error %d", vol_id, err);
367 * we have to de-initialize differently.
368 */
369out_sysfs:
370 err = ubi_destroy_gluebi(vol);
371 cdev_del(&vol->cdev);
372 kfree(vol->eba_tbl);
373 spin_lock(&ubi->volumes_lock);
374 ubi->rsvd_pebs -= vol->reserved_pebs;
375 ubi->avail_pebs += vol->reserved_pebs;
376 ubi->volumes[vol_id] = NULL;
377 spin_unlock(&ubi->volumes_lock);
378 volume_sysfs_close(vol);
379 return err; 395 return err;
380} 396}
381 397
@@ -385,7 +401,8 @@ out_sysfs:
385 * 401 *
386 * This function removes volume described by @desc. The volume has to be opened 402 * This function removes volume described by @desc. The volume has to be opened
387 * in "exclusive" mode. Returns zero in case of success and a negative error 403 * in "exclusive" mode. Returns zero in case of success and a negative error
388 * code in case of failure. 404 * code in case of failure. The caller has to have the @ubi->volumes_mutex
405 * locked.
389 */ 406 */
390int ubi_remove_volume(struct ubi_volume_desc *desc) 407int ubi_remove_volume(struct ubi_volume_desc *desc)
391{ 408{
@@ -400,30 +417,36 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
400 if (ubi->ro_mode) 417 if (ubi->ro_mode)
401 return -EROFS; 418 return -EROFS;
402 419
420 spin_lock(&ubi->volumes_lock);
421 if (vol->ref_count > 1) {
422 /*
423 * The volume is busy, probably someone is reading one of its
424 * sysfs files.
425 */
426 err = -EBUSY;
427 goto out_unlock;
428 }
429 ubi->volumes[vol_id] = NULL;
430 spin_unlock(&ubi->volumes_lock);
431
403 err = ubi_destroy_gluebi(vol); 432 err = ubi_destroy_gluebi(vol);
404 if (err) 433 if (err)
405 return err; 434 goto out_err;
406 435
407 err = ubi_change_vtbl_record(ubi, vol_id, NULL); 436 err = ubi_change_vtbl_record(ubi, vol_id, NULL);
408 if (err) 437 if (err)
409 return err; 438 goto out_err;
410 439
411 for (i = 0; i < vol->reserved_pebs; i++) { 440 for (i = 0; i < vol->reserved_pebs; i++) {
412 err = ubi_eba_unmap_leb(ubi, vol_id, i); 441 err = ubi_eba_unmap_leb(ubi, vol, i);
413 if (err) 442 if (err)
414 return err; 443 goto out_err;
415 } 444 }
416 445
417 spin_lock(&ubi->volumes_lock);
418 vol->removed = 1;
419 ubi->volumes[vol_id] = NULL;
420 spin_unlock(&ubi->volumes_lock);
421
422 kfree(vol->eba_tbl); 446 kfree(vol->eba_tbl);
423 vol->eba_tbl = NULL; 447 vol->eba_tbl = NULL;
424 cdev_del(&vol->cdev); 448 cdev_del(&vol->cdev);
425 volume_sysfs_close(vol); 449 volume_sysfs_close(vol);
426 kfree(desc);
427 450
428 spin_lock(&ubi->volumes_lock); 451 spin_lock(&ubi->volumes_lock);
429 ubi->rsvd_pebs -= reserved_pebs; 452 ubi->rsvd_pebs -= reserved_pebs;
@@ -441,8 +464,15 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
441 spin_unlock(&ubi->volumes_lock); 464 spin_unlock(&ubi->volumes_lock);
442 465
443 paranoid_check_volumes(ubi); 466 paranoid_check_volumes(ubi);
444 module_put(THIS_MODULE);
445 return 0; 467 return 0;
468
469out_err:
470 ubi_err("cannot remove volume %d, error %d", vol_id, err);
471 spin_lock(&ubi->volumes_lock);
472 ubi->volumes[vol_id] = vol;
473out_unlock:
474 spin_unlock(&ubi->volumes_lock);
475 return err;
446} 476}
447 477
448/** 478/**
@@ -450,8 +480,9 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
450 * @desc: volume descriptor 480 * @desc: volume descriptor
451 * @reserved_pebs: new size in physical eraseblocks 481 * @reserved_pebs: new size in physical eraseblocks
452 * 482 *
453 * This function returns zero in case of success, and a negative error code in 483 * This function re-sizes the volume and returns zero in case of success, and a
454 * case of failure. 484 * negative error code in case of failure. The caller has to have the
485 * @ubi->volumes_mutex locked.
455 */ 486 */
456int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) 487int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
457{ 488{
@@ -466,8 +497,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
466 497
467 dbg_msg("re-size volume %d to from %d to %d PEBs", 498 dbg_msg("re-size volume %d to from %d to %d PEBs",
468 vol_id, vol->reserved_pebs, reserved_pebs); 499 vol_id, vol->reserved_pebs, reserved_pebs);
469 ubi_assert(desc->mode == UBI_EXCLUSIVE);
470 ubi_assert(vol == ubi->volumes[vol_id]);
471 500
472 if (vol->vol_type == UBI_STATIC_VOLUME && 501 if (vol->vol_type == UBI_STATIC_VOLUME &&
473 reserved_pebs < vol->used_ebs) { 502 reserved_pebs < vol->used_ebs) {
@@ -487,6 +516,14 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
487 for (i = 0; i < reserved_pebs; i++) 516 for (i = 0; i < reserved_pebs; i++)
488 new_mapping[i] = UBI_LEB_UNMAPPED; 517 new_mapping[i] = UBI_LEB_UNMAPPED;
489 518
519 spin_lock(&ubi->volumes_lock);
520 if (vol->ref_count > 1) {
521 spin_unlock(&ubi->volumes_lock);
522 err = -EBUSY;
523 goto out_free;
524 }
525 spin_unlock(&ubi->volumes_lock);
526
490 /* Reserve physical eraseblocks */ 527 /* Reserve physical eraseblocks */
491 pebs = reserved_pebs - vol->reserved_pebs; 528 pebs = reserved_pebs - vol->reserved_pebs;
492 if (pebs > 0) { 529 if (pebs > 0) {
@@ -516,7 +553,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
516 553
517 if (pebs < 0) { 554 if (pebs < 0) {
518 for (i = 0; i < -pebs; i++) { 555 for (i = 0; i < -pebs; i++) {
519 err = ubi_eba_unmap_leb(ubi, vol_id, reserved_pebs + i); 556 err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
520 if (err) 557 if (err)
521 goto out_acc; 558 goto out_acc;
522 } 559 }
@@ -565,27 +602,28 @@ out_free:
565/** 602/**
566 * ubi_add_volume - add volume. 603 * ubi_add_volume - add volume.
567 * @ubi: UBI device description object 604 * @ubi: UBI device description object
568 * @vol_id: volume ID 605 * @vol: volume description object
569 * 606 *
570 * This function adds an existin volume and initializes all its data 607 * This function adds an existing volume and initializes all its data
571 * structures. Returnes zero in case of success and a negative error code in 608 * structures. Returns zero in case of success and a negative error code in
572 * case of failure. 609 * case of failure.
573 */ 610 */
574int ubi_add_volume(struct ubi_device *ubi, int vol_id) 611int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
575{ 612{
576 int err; 613 int err, vol_id = vol->vol_id;
577 struct ubi_volume *vol = ubi->volumes[vol_id]; 614 dev_t dev;
578 615
579 dbg_msg("add volume %d", vol_id); 616 dbg_msg("add volume %d", vol_id);
580 ubi_dbg_dump_vol_info(vol); 617 ubi_dbg_dump_vol_info(vol);
581 ubi_assert(vol);
582 618
583 /* Register character device for the volume */ 619 /* Register character device for the volume */
584 cdev_init(&vol->cdev, &ubi_vol_cdev_operations); 620 cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
585 vol->cdev.owner = THIS_MODULE; 621 vol->cdev.owner = THIS_MODULE;
586 err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol->vol_id + 1), 1); 622 dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1);
623 err = cdev_add(&vol->cdev, dev, 1);
587 if (err) { 624 if (err) {
588 ubi_err("cannot add character device for volume %d", vol_id); 625 ubi_err("cannot add character device for volume %d, error %d",
626 vol_id, err);
589 return err; 627 return err;
590 } 628 }
591 629
@@ -595,7 +633,7 @@ int ubi_add_volume(struct ubi_device *ubi, int vol_id)
595 633
596 vol->dev.release = vol_release; 634 vol->dev.release = vol_release;
597 vol->dev.parent = &ubi->dev; 635 vol->dev.parent = &ubi->dev;
598 vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1); 636 vol->dev.devt = dev;
599 vol->dev.class = ubi_class; 637 vol->dev.class = ubi_class;
600 sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); 638 sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
601 err = device_register(&vol->dev); 639 err = device_register(&vol->dev);
@@ -623,22 +661,19 @@ out_cdev:
623/** 661/**
624 * ubi_free_volume - free volume. 662 * ubi_free_volume - free volume.
625 * @ubi: UBI device description object 663 * @ubi: UBI device description object
626 * @vol_id: volume ID 664 * @vol: volume description object
627 * 665 *
628 * This function frees all resources for volume @vol_id but does not remove it. 666 * This function frees all resources for volume @vol but does not remove it.
629 * Used only when the UBI device is detached. 667 * Used only when the UBI device is detached.
630 */ 668 */
631void ubi_free_volume(struct ubi_device *ubi, int vol_id) 669void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
632{ 670{
633 int err; 671 int err;
634 struct ubi_volume *vol = ubi->volumes[vol_id];
635 672
636 dbg_msg("free volume %d", vol_id); 673 dbg_msg("free volume %d", vol->vol_id);
637 ubi_assert(vol);
638 674
639 vol->removed = 1; 675 ubi->volumes[vol->vol_id] = NULL;
640 err = ubi_destroy_gluebi(vol); 676 err = ubi_destroy_gluebi(vol);
641 ubi->volumes[vol_id] = NULL;
642 cdev_del(&vol->cdev); 677 cdev_del(&vol->cdev);
643 volume_sysfs_close(vol); 678 volume_sysfs_close(vol);
644} 679}
@@ -708,11 +743,6 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
708 goto fail; 743 goto fail;
709 } 744 }
710 745
711 if (vol->upd_marker != 0 && vol->upd_marker != 1) {
712 ubi_err("bad upd_marker");
713 goto fail;
714 }
715
716 if (vol->upd_marker && vol->corrupted) { 746 if (vol->upd_marker && vol->corrupted) {
717 dbg_err("update marker and corrupted simultaneously"); 747 dbg_err("update marker and corrupted simultaneously");
718 goto fail; 748 goto fail;
@@ -747,7 +777,7 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
747 777
748 n = (long long)vol->used_ebs * vol->usable_leb_size; 778 n = (long long)vol->used_ebs * vol->usable_leb_size;
749 if (vol->vol_type == UBI_DYNAMIC_VOLUME) { 779 if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
750 if (vol->corrupted != 0) { 780 if (vol->corrupted) {
751 ubi_err("corrupted dynamic volume"); 781 ubi_err("corrupted dynamic volume");
752 goto fail; 782 goto fail;
753 } 783 }
@@ -764,10 +794,6 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
764 goto fail; 794 goto fail;
765 } 795 }
766 } else { 796 } else {
767 if (vol->corrupted != 0 && vol->corrupted != 1) {
768 ubi_err("bad corrupted");
769 goto fail;
770 }
771 if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) { 797 if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) {
772 ubi_err("bad used_ebs"); 798 ubi_err("bad used_ebs");
773 goto fail; 799 goto fail;
@@ -820,9 +846,7 @@ static void paranoid_check_volumes(struct ubi_device *ubi)
820{ 846{
821 int i; 847 int i;
822 848
823 mutex_lock(&ubi->vtbl_mutex);
824 for (i = 0; i < ubi->vtbl_slots; i++) 849 for (i = 0; i < ubi->vtbl_slots; i++)
825 paranoid_check_volume(ubi, i); 850 paranoid_check_volume(ubi, i);
826 mutex_unlock(&ubi->vtbl_mutex);
827} 851}
828#endif 852#endif
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 25b3bd61c7ec..56fc3fbce838 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -86,8 +86,10 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
86{ 86{
87 int i, err; 87 int i, err;
88 uint32_t crc; 88 uint32_t crc;
89 struct ubi_volume *layout_vol;
89 90
90 ubi_assert(idx >= 0 && idx < ubi->vtbl_slots); 91 ubi_assert(idx >= 0 && idx < ubi->vtbl_slots);
92 layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
91 93
92 if (!vtbl_rec) 94 if (!vtbl_rec)
93 vtbl_rec = &empty_vtbl_record; 95 vtbl_rec = &empty_vtbl_record;
@@ -96,31 +98,25 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
96 vtbl_rec->crc = cpu_to_be32(crc); 98 vtbl_rec->crc = cpu_to_be32(crc);
97 } 99 }
98 100
99 mutex_lock(&ubi->vtbl_mutex);
100 memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record)); 101 memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record));
101 for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { 102 for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
102 err = ubi_eba_unmap_leb(ubi, UBI_LAYOUT_VOL_ID, i); 103 err = ubi_eba_unmap_leb(ubi, layout_vol, i);
103 if (err) { 104 if (err)
104 mutex_unlock(&ubi->vtbl_mutex);
105 return err; 105 return err;
106 } 106
107 err = ubi_eba_write_leb(ubi, UBI_LAYOUT_VOL_ID, i, ubi->vtbl, 0, 107 err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
108 ubi->vtbl_size, UBI_LONGTERM); 108 ubi->vtbl_size, UBI_LONGTERM);
109 if (err) { 109 if (err)
110 mutex_unlock(&ubi->vtbl_mutex);
111 return err; 110 return err;
112 }
113 } 111 }
114 112
115 paranoid_vtbl_check(ubi); 113 paranoid_vtbl_check(ubi);
116 mutex_unlock(&ubi->vtbl_mutex); 114 return 0;
117 return ubi_wl_flush(ubi);
118} 115}
119 116
120/** 117/**
121 * vol_til_check - check if volume table is not corrupted and contains sensible 118 * vtbl_check - check if volume table is not corrupted and contains sensible
122 * data. 119 * data.
123 *
124 * @ubi: UBI device description object 120 * @ubi: UBI device description object
125 * @vtbl: volume table 121 * @vtbl: volume table
126 * 122 *
@@ -273,7 +269,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
273 * this volume table copy was found during scanning. It has to be wiped 269 * this volume table copy was found during scanning. It has to be wiped
274 * out. 270 * out.
275 */ 271 */
276 sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOL_ID); 272 sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
277 if (sv) 273 if (sv)
278 old_seb = ubi_scan_find_seb(sv, copy); 274 old_seb = ubi_scan_find_seb(sv, copy);
279 275
@@ -285,7 +281,7 @@ retry:
285 } 281 }
286 282
287 vid_hdr->vol_type = UBI_VID_DYNAMIC; 283 vid_hdr->vol_type = UBI_VID_DYNAMIC;
288 vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOL_ID); 284 vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID);
289 vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT; 285 vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT;
290 vid_hdr->data_size = vid_hdr->used_ebs = 286 vid_hdr->data_size = vid_hdr->used_ebs =
291 vid_hdr->data_pad = cpu_to_be32(0); 287 vid_hdr->data_pad = cpu_to_be32(0);
@@ -518,6 +514,17 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
518 vol->name[vol->name_len] = '\0'; 514 vol->name[vol->name_len] = '\0';
519 vol->vol_id = i; 515 vol->vol_id = i;
520 516
517 if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
518 /* Auto re-size flag may be set only for one volume */
519 if (ubi->autoresize_vol_id != -1) {
520 ubi_err("more then one auto-resize volume (%d "
521 "and %d)", ubi->autoresize_vol_id, i);
522 return -EINVAL;
523 }
524
525 ubi->autoresize_vol_id = i;
526 }
527
521 ubi_assert(!ubi->volumes[i]); 528 ubi_assert(!ubi->volumes[i]);
522 ubi->volumes[i] = vol; 529 ubi->volumes[i] = vol;
523 ubi->vol_count += 1; 530 ubi->vol_count += 1;
@@ -568,6 +575,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
568 vol->last_eb_bytes = sv->last_data_size; 575 vol->last_eb_bytes = sv->last_data_size;
569 } 576 }
570 577
578 /* And add the layout volume */
571 vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL); 579 vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
572 if (!vol) 580 if (!vol)
573 return -ENOMEM; 581 return -ENOMEM;
@@ -582,7 +590,8 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
582 vol->last_eb_bytes = vol->reserved_pebs; 590 vol->last_eb_bytes = vol->reserved_pebs;
583 vol->used_bytes = 591 vol->used_bytes =
584 (long long)vol->used_ebs * (ubi->leb_size - vol->data_pad); 592 (long long)vol->used_ebs * (ubi->leb_size - vol->data_pad);
585 vol->vol_id = UBI_LAYOUT_VOL_ID; 593 vol->vol_id = UBI_LAYOUT_VOLUME_ID;
594 vol->ref_count = 1;
586 595
587 ubi_assert(!ubi->volumes[i]); 596 ubi_assert(!ubi->volumes[i]);
588 ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol; 597 ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol;
@@ -734,7 +743,7 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
734 ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE; 743 ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE;
735 ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size); 744 ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size);
736 745
737 sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOL_ID); 746 sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
738 if (!sv) { 747 if (!sv) {
739 /* 748 /*
740 * No logical eraseblocks belonging to the layout volume were 749 * No logical eraseblocks belonging to the layout volume were
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 6330c8cc72b5..a471a491f0ab 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -117,21 +117,6 @@
117#define WL_MAX_FAILURES 32 117#define WL_MAX_FAILURES 32
118 118
119/** 119/**
120 * struct ubi_wl_entry - wear-leveling entry.
121 * @rb: link in the corresponding RB-tree
122 * @ec: erase counter
123 * @pnum: physical eraseblock number
124 *
125 * Each physical eraseblock has a corresponding &struct wl_entry object which
126 * may be kept in different RB-trees.
127 */
128struct ubi_wl_entry {
129 struct rb_node rb;
130 int ec;
131 int pnum;
132};
133
134/**
135 * struct ubi_wl_prot_entry - PEB protection entry. 120 * struct ubi_wl_prot_entry - PEB protection entry.
136 * @rb_pnum: link in the @wl->prot.pnum RB-tree 121 * @rb_pnum: link in the @wl->prot.pnum RB-tree
137 * @rb_aec: link in the @wl->prot.aec RB-tree 122 * @rb_aec: link in the @wl->prot.aec RB-tree
@@ -216,9 +201,6 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
216#define paranoid_check_in_wl_tree(e, root) 201#define paranoid_check_in_wl_tree(e, root)
217#endif 202#endif
218 203
219/* Slab cache for wear-leveling entries */
220static struct kmem_cache *wl_entries_slab;
221
222/** 204/**
223 * wl_tree_add - add a wear-leveling entry to a WL RB-tree. 205 * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
224 * @e: the wear-leveling entry to add 206 * @e: the wear-leveling entry to add
@@ -267,15 +249,26 @@ static int do_work(struct ubi_device *ubi)
267 int err; 249 int err;
268 struct ubi_work *wrk; 250 struct ubi_work *wrk;
269 251
270 spin_lock(&ubi->wl_lock); 252 cond_resched();
271 253
254 /*
255 * @ubi->work_sem is used to synchronize with the workers. Workers take
256 * it in read mode, so many of them may be doing works at a time. But
257 * the queue flush code has to be sure the whole queue of works is
258 * done, and it takes the mutex in write mode.
259 */
260 down_read(&ubi->work_sem);
261 spin_lock(&ubi->wl_lock);
272 if (list_empty(&ubi->works)) { 262 if (list_empty(&ubi->works)) {
273 spin_unlock(&ubi->wl_lock); 263 spin_unlock(&ubi->wl_lock);
264 up_read(&ubi->work_sem);
274 return 0; 265 return 0;
275 } 266 }
276 267
277 wrk = list_entry(ubi->works.next, struct ubi_work, list); 268 wrk = list_entry(ubi->works.next, struct ubi_work, list);
278 list_del(&wrk->list); 269 list_del(&wrk->list);
270 ubi->works_count -= 1;
271 ubi_assert(ubi->works_count >= 0);
279 spin_unlock(&ubi->wl_lock); 272 spin_unlock(&ubi->wl_lock);
280 273
281 /* 274 /*
@@ -286,11 +279,8 @@ static int do_work(struct ubi_device *ubi)
286 err = wrk->func(ubi, wrk, 0); 279 err = wrk->func(ubi, wrk, 0);
287 if (err) 280 if (err)
288 ubi_err("work failed with error code %d", err); 281 ubi_err("work failed with error code %d", err);
282 up_read(&ubi->work_sem);
289 283
290 spin_lock(&ubi->wl_lock);
291 ubi->works_count -= 1;
292 ubi_assert(ubi->works_count >= 0);
293 spin_unlock(&ubi->wl_lock);
294 return err; 284 return err;
295} 285}
296 286
@@ -549,8 +539,12 @@ retry:
549 * prot_tree_del - remove a physical eraseblock from the protection trees 539 * prot_tree_del - remove a physical eraseblock from the protection trees
550 * @ubi: UBI device description object 540 * @ubi: UBI device description object
551 * @pnum: the physical eraseblock to remove 541 * @pnum: the physical eraseblock to remove
542 *
543 * This function returns PEB @pnum from the protection trees and returns zero
544 * in case of success and %-ENODEV if the PEB was not found in the protection
545 * trees.
552 */ 546 */
553static void prot_tree_del(struct ubi_device *ubi, int pnum) 547static int prot_tree_del(struct ubi_device *ubi, int pnum)
554{ 548{
555 struct rb_node *p; 549 struct rb_node *p;
556 struct ubi_wl_prot_entry *pe = NULL; 550 struct ubi_wl_prot_entry *pe = NULL;
@@ -561,7 +555,7 @@ static void prot_tree_del(struct ubi_device *ubi, int pnum)
561 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum); 555 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
562 556
563 if (pnum == pe->e->pnum) 557 if (pnum == pe->e->pnum)
564 break; 558 goto found;
565 559
566 if (pnum < pe->e->pnum) 560 if (pnum < pe->e->pnum)
567 p = p->rb_left; 561 p = p->rb_left;
@@ -569,10 +563,14 @@ static void prot_tree_del(struct ubi_device *ubi, int pnum)
569 p = p->rb_right; 563 p = p->rb_right;
570 } 564 }
571 565
566 return -ENODEV;
567
568found:
572 ubi_assert(pe->e->pnum == pnum); 569 ubi_assert(pe->e->pnum == pnum);
573 rb_erase(&pe->rb_aec, &ubi->prot.aec); 570 rb_erase(&pe->rb_aec, &ubi->prot.aec);
574 rb_erase(&pe->rb_pnum, &ubi->prot.pnum); 571 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
575 kfree(pe); 572 kfree(pe);
573 return 0;
576} 574}
577 575
578/** 576/**
@@ -744,7 +742,8 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
744static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, 742static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
745 int cancel) 743 int cancel)
746{ 744{
747 int err, put = 0; 745 int err, put = 0, scrubbing = 0, protect = 0;
746 struct ubi_wl_prot_entry *uninitialized_var(pe);
748 struct ubi_wl_entry *e1, *e2; 747 struct ubi_wl_entry *e1, *e2;
749 struct ubi_vid_hdr *vid_hdr; 748 struct ubi_vid_hdr *vid_hdr;
750 749
@@ -757,21 +756,17 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
757 if (!vid_hdr) 756 if (!vid_hdr)
758 return -ENOMEM; 757 return -ENOMEM;
759 758
759 mutex_lock(&ubi->move_mutex);
760 spin_lock(&ubi->wl_lock); 760 spin_lock(&ubi->wl_lock);
761 ubi_assert(!ubi->move_from && !ubi->move_to);
762 ubi_assert(!ubi->move_to_put);
761 763
762 /* 764 if (!ubi->free.rb_node ||
763 * Only one WL worker at a time is supported at this implementation, so
764 * make sure a PEB is not being moved already.
765 */
766 if (ubi->move_to || !ubi->free.rb_node ||
767 (!ubi->used.rb_node && !ubi->scrub.rb_node)) { 765 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
768 /* 766 /*
769 * Only one WL worker at a time is supported at this 767 * No free physical eraseblocks? Well, they must be waiting in
770 * implementation, so if a LEB is already being moved, cancel. 768 * the queue to be erased. Cancel movement - it will be
771 * 769 * triggered again when a free physical eraseblock appears.
772 * No free physical eraseblocks? Well, we cancel wear-leveling
773 * then. It will be triggered again when a free physical
774 * eraseblock appears.
775 * 770 *
776 * No used physical eraseblocks? They must be temporarily 771 * No used physical eraseblocks? They must be temporarily
777 * protected from being moved. They will be moved to the 772 * protected from being moved. They will be moved to the
@@ -780,10 +775,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
780 */ 775 */
781 dbg_wl("cancel WL, a list is empty: free %d, used %d", 776 dbg_wl("cancel WL, a list is empty: free %d, used %d",
782 !ubi->free.rb_node, !ubi->used.rb_node); 777 !ubi->free.rb_node, !ubi->used.rb_node);
783 ubi->wl_scheduled = 0; 778 goto out_cancel;
784 spin_unlock(&ubi->wl_lock);
785 ubi_free_vid_hdr(ubi, vid_hdr);
786 return 0;
787 } 779 }
788 780
789 if (!ubi->scrub.rb_node) { 781 if (!ubi->scrub.rb_node) {
@@ -798,27 +790,24 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
798 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { 790 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
799 dbg_wl("no WL needed: min used EC %d, max free EC %d", 791 dbg_wl("no WL needed: min used EC %d, max free EC %d",
800 e1->ec, e2->ec); 792 e1->ec, e2->ec);
801 ubi->wl_scheduled = 0; 793 goto out_cancel;
802 spin_unlock(&ubi->wl_lock);
803 ubi_free_vid_hdr(ubi, vid_hdr);
804 return 0;
805 } 794 }
806 paranoid_check_in_wl_tree(e1, &ubi->used); 795 paranoid_check_in_wl_tree(e1, &ubi->used);
807 rb_erase(&e1->rb, &ubi->used); 796 rb_erase(&e1->rb, &ubi->used);
808 dbg_wl("move PEB %d EC %d to PEB %d EC %d", 797 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
809 e1->pnum, e1->ec, e2->pnum, e2->ec); 798 e1->pnum, e1->ec, e2->pnum, e2->ec);
810 } else { 799 } else {
800 /* Perform scrubbing */
801 scrubbing = 1;
811 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); 802 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
812 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 803 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
813 paranoid_check_in_wl_tree(e1, &ubi->scrub); 804 paranoid_check_in_wl_tree(e1, &ubi->scrub);
814 rb_erase(&e1->rb, &ubi->scrub); 805 rb_erase(&e1->rb, &ubi->scrub);
815 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); 806 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
816 } 807 }
817 808
818 paranoid_check_in_wl_tree(e2, &ubi->free); 809 paranoid_check_in_wl_tree(e2, &ubi->free);
819 rb_erase(&e2->rb, &ubi->free); 810 rb_erase(&e2->rb, &ubi->free);
820 ubi_assert(!ubi->move_from && !ubi->move_to);
821 ubi_assert(!ubi->move_to_put && !ubi->move_from_put);
822 ubi->move_from = e1; 811 ubi->move_from = e1;
823 ubi->move_to = e2; 812 ubi->move_to = e2;
824 spin_unlock(&ubi->wl_lock); 813 spin_unlock(&ubi->wl_lock);
@@ -828,6 +817,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
828 * We so far do not know which logical eraseblock our physical 817 * We so far do not know which logical eraseblock our physical
829 * eraseblock (@e1) belongs to. We have to read the volume identifier 818 * eraseblock (@e1) belongs to. We have to read the volume identifier
830 * header first. 819 * header first.
820 *
821 * Note, we are protected from this PEB being unmapped and erased. The
822 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
823 * which is being moved was unmapped.
831 */ 824 */
832 825
833 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); 826 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
@@ -842,32 +835,51 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
842 * likely have the VID header in place. 835 * likely have the VID header in place.
843 */ 836 */
844 dbg_wl("PEB %d has no VID header", e1->pnum); 837 dbg_wl("PEB %d has no VID header", e1->pnum);
845 err = 0; 838 goto out_not_moved;
846 } else {
847 ubi_err("error %d while reading VID header from PEB %d",
848 err, e1->pnum);
849 if (err > 0)
850 err = -EIO;
851 } 839 }
852 goto error; 840
841 ubi_err("error %d while reading VID header from PEB %d",
842 err, e1->pnum);
843 if (err > 0)
844 err = -EIO;
845 goto out_error;
853 } 846 }
854 847
855 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); 848 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
856 if (err) { 849 if (err) {
857 if (err == UBI_IO_BITFLIPS) 850
858 err = 0; 851 if (err < 0)
859 goto error; 852 goto out_error;
853 if (err == 1)
854 goto out_not_moved;
855
856 /*
857 * For some reason the LEB was not moved - it might be because
858 * the volume is being deleted. We should prevent this PEB from
859 * being selected for wear-levelling movement for some "time",
860 * so put it to the protection tree.
861 */
862
863 dbg_wl("cancelled moving PEB %d", e1->pnum);
864 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
865 if (!pe) {
866 err = -ENOMEM;
867 goto out_error;
868 }
869
870 protect = 1;
860 } 871 }
861 872
862 ubi_free_vid_hdr(ubi, vid_hdr); 873 ubi_free_vid_hdr(ubi, vid_hdr);
863 spin_lock(&ubi->wl_lock); 874 spin_lock(&ubi->wl_lock);
875 if (protect)
876 prot_tree_add(ubi, e1, pe, protect);
864 if (!ubi->move_to_put) 877 if (!ubi->move_to_put)
865 wl_tree_add(e2, &ubi->used); 878 wl_tree_add(e2, &ubi->used);
866 else 879 else
867 put = 1; 880 put = 1;
868 ubi->move_from = ubi->move_to = NULL; 881 ubi->move_from = ubi->move_to = NULL;
869 ubi->move_from_put = ubi->move_to_put = 0; 882 ubi->move_to_put = ubi->wl_scheduled = 0;
870 ubi->wl_scheduled = 0;
871 spin_unlock(&ubi->wl_lock); 883 spin_unlock(&ubi->wl_lock);
872 884
873 if (put) { 885 if (put) {
@@ -877,62 +889,67 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
877 */ 889 */
878 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum); 890 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
879 err = schedule_erase(ubi, e2, 0); 891 err = schedule_erase(ubi, e2, 0);
880 if (err) { 892 if (err)
881 kmem_cache_free(wl_entries_slab, e2); 893 goto out_error;
882 ubi_ro_mode(ubi);
883 }
884 } 894 }
885 895
886 err = schedule_erase(ubi, e1, 0); 896 if (!protect) {
887 if (err) { 897 err = schedule_erase(ubi, e1, 0);
888 kmem_cache_free(wl_entries_slab, e1); 898 if (err)
889 ubi_ro_mode(ubi); 899 goto out_error;
890 } 900 }
891 901
902
892 dbg_wl("done"); 903 dbg_wl("done");
893 return err; 904 mutex_unlock(&ubi->move_mutex);
905 return 0;
894 906
895 /* 907 /*
896 * Some error occurred. @e1 was not changed, so return it back. @e2 908 * For some reasons the LEB was not moved, might be an error, might be
897 * might be changed, schedule it for erasure. 909 * something else. @e1 was not changed, so return it back. @e2 might
910 * be changed, schedule it for erasure.
898 */ 911 */
899error: 912out_not_moved:
900 if (err)
901 dbg_wl("error %d occurred, cancel operation", err);
902 ubi_assert(err <= 0);
903
904 ubi_free_vid_hdr(ubi, vid_hdr); 913 ubi_free_vid_hdr(ubi, vid_hdr);
905 spin_lock(&ubi->wl_lock); 914 spin_lock(&ubi->wl_lock);
906 ubi->wl_scheduled = 0; 915 if (scrubbing)
907 if (ubi->move_from_put) 916 wl_tree_add(e1, &ubi->scrub);
908 put = 1;
909 else 917 else
910 wl_tree_add(e1, &ubi->used); 918 wl_tree_add(e1, &ubi->used);
911 ubi->move_from = ubi->move_to = NULL; 919 ubi->move_from = ubi->move_to = NULL;
912 ubi->move_from_put = ubi->move_to_put = 0; 920 ubi->move_to_put = ubi->wl_scheduled = 0;
913 spin_unlock(&ubi->wl_lock); 921 spin_unlock(&ubi->wl_lock);
914 922
915 if (put) {
916 /*
917 * Well, the target PEB was put meanwhile, schedule it for
918 * erasure.
919 */
920 dbg_wl("PEB %d was put meanwhile, erase", e1->pnum);
921 err = schedule_erase(ubi, e1, 0);
922 if (err) {
923 kmem_cache_free(wl_entries_slab, e1);
924 ubi_ro_mode(ubi);
925 }
926 }
927
928 err = schedule_erase(ubi, e2, 0); 923 err = schedule_erase(ubi, e2, 0);
929 if (err) { 924 if (err)
930 kmem_cache_free(wl_entries_slab, e2); 925 goto out_error;
931 ubi_ro_mode(ubi); 926
932 } 927 mutex_unlock(&ubi->move_mutex);
928 return 0;
929
930out_error:
931 ubi_err("error %d while moving PEB %d to PEB %d",
932 err, e1->pnum, e2->pnum);
933 933
934 yield(); 934 ubi_free_vid_hdr(ubi, vid_hdr);
935 spin_lock(&ubi->wl_lock);
936 ubi->move_from = ubi->move_to = NULL;
937 ubi->move_to_put = ubi->wl_scheduled = 0;
938 spin_unlock(&ubi->wl_lock);
939
940 kmem_cache_free(ubi_wl_entry_slab, e1);
941 kmem_cache_free(ubi_wl_entry_slab, e2);
942 ubi_ro_mode(ubi);
943
944 mutex_unlock(&ubi->move_mutex);
935 return err; 945 return err;
946
947out_cancel:
948 ubi->wl_scheduled = 0;
949 spin_unlock(&ubi->wl_lock);
950 mutex_unlock(&ubi->move_mutex);
951 ubi_free_vid_hdr(ubi, vid_hdr);
952 return 0;
936} 953}
937 954
938/** 955/**
@@ -1020,7 +1037,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1020 if (cancel) { 1037 if (cancel) {
1021 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); 1038 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1022 kfree(wl_wrk); 1039 kfree(wl_wrk);
1023 kmem_cache_free(wl_entries_slab, e); 1040 kmem_cache_free(ubi_wl_entry_slab, e);
1024 return 0; 1041 return 0;
1025 } 1042 }
1026 1043
@@ -1049,7 +1066,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1049 1066
1050 ubi_err("failed to erase PEB %d, error %d", pnum, err); 1067 ubi_err("failed to erase PEB %d, error %d", pnum, err);
1051 kfree(wl_wrk); 1068 kfree(wl_wrk);
1052 kmem_cache_free(wl_entries_slab, e); 1069 kmem_cache_free(ubi_wl_entry_slab, e);
1053 1070
1054 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || 1071 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1055 err == -EBUSY) { 1072 err == -EBUSY) {
@@ -1119,8 +1136,7 @@ out_ro:
1119} 1136}
1120 1137
1121/** 1138/**
1122 * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling 1139 * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling unit.
1123 * unit.
1124 * @ubi: UBI device description object 1140 * @ubi: UBI device description object
1125 * @pnum: physical eraseblock to return 1141 * @pnum: physical eraseblock to return
1126 * @torture: if this physical eraseblock has to be tortured 1142 * @torture: if this physical eraseblock has to be tortured
@@ -1128,7 +1144,7 @@ out_ro:
1128 * This function is called to return physical eraseblock @pnum to the pool of 1144 * This function is called to return physical eraseblock @pnum to the pool of
1129 * free physical eraseblocks. The @torture flag has to be set if an I/O error 1145 * free physical eraseblocks. The @torture flag has to be set if an I/O error
1130 * occurred to this @pnum and it has to be tested. This function returns zero 1146 * occurred to this @pnum and it has to be tested. This function returns zero
1131 * in case of success and a negative error code in case of failure. 1147 * in case of success, and a negative error code in case of failure.
1132 */ 1148 */
1133int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) 1149int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1134{ 1150{
@@ -1139,8 +1155,8 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1139 ubi_assert(pnum >= 0); 1155 ubi_assert(pnum >= 0);
1140 ubi_assert(pnum < ubi->peb_count); 1156 ubi_assert(pnum < ubi->peb_count);
1141 1157
1158retry:
1142 spin_lock(&ubi->wl_lock); 1159 spin_lock(&ubi->wl_lock);
1143
1144 e = ubi->lookuptbl[pnum]; 1160 e = ubi->lookuptbl[pnum];
1145 if (e == ubi->move_from) { 1161 if (e == ubi->move_from) {
1146 /* 1162 /*
@@ -1148,17 +1164,22 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1148 * be moved. It will be scheduled for erasure in the 1164 * be moved. It will be scheduled for erasure in the
1149 * wear-leveling worker. 1165 * wear-leveling worker.
1150 */ 1166 */
1151 dbg_wl("PEB %d is being moved", pnum); 1167 dbg_wl("PEB %d is being moved, wait", pnum);
1152 ubi_assert(!ubi->move_from_put);
1153 ubi->move_from_put = 1;
1154 spin_unlock(&ubi->wl_lock); 1168 spin_unlock(&ubi->wl_lock);
1155 return 0; 1169
1170 /* Wait for the WL worker by taking the @ubi->move_mutex */
1171 mutex_lock(&ubi->move_mutex);
1172 mutex_unlock(&ubi->move_mutex);
1173 goto retry;
1156 } else if (e == ubi->move_to) { 1174 } else if (e == ubi->move_to) {
1157 /* 1175 /*
1158 * User is putting the physical eraseblock which was selected 1176 * User is putting the physical eraseblock which was selected
1159 * as the target the data is moved to. It may happen if the EBA 1177 * as the target the data is moved to. It may happen if the EBA
1160 * unit already re-mapped the LEB but the WL unit did has not 1178 * unit already re-mapped the LEB in 'ubi_eba_copy_leb()' but
1161 * put the PEB to the "used" tree. 1179 * the WL unit has not put the PEB to the "used" tree yet, but
1180 * it is about to do this. So we just set a flag which will
1181 * tell the WL worker that the PEB is not needed anymore and
1182 * should be scheduled for erasure.
1162 */ 1183 */
1163 dbg_wl("PEB %d is the target of data moving", pnum); 1184 dbg_wl("PEB %d is the target of data moving", pnum);
1164 ubi_assert(!ubi->move_to_put); 1185 ubi_assert(!ubi->move_to_put);
@@ -1172,8 +1193,15 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1172 } else if (in_wl_tree(e, &ubi->scrub)) { 1193 } else if (in_wl_tree(e, &ubi->scrub)) {
1173 paranoid_check_in_wl_tree(e, &ubi->scrub); 1194 paranoid_check_in_wl_tree(e, &ubi->scrub);
1174 rb_erase(&e->rb, &ubi->scrub); 1195 rb_erase(&e->rb, &ubi->scrub);
1175 } else 1196 } else {
1176 prot_tree_del(ubi, e->pnum); 1197 err = prot_tree_del(ubi, e->pnum);
1198 if (err) {
1199 ubi_err("PEB %d not found", pnum);
1200 ubi_ro_mode(ubi);
1201 spin_unlock(&ubi->wl_lock);
1202 return err;
1203 }
1204 }
1177 } 1205 }
1178 spin_unlock(&ubi->wl_lock); 1206 spin_unlock(&ubi->wl_lock);
1179 1207
@@ -1227,8 +1255,17 @@ retry:
1227 if (in_wl_tree(e, &ubi->used)) { 1255 if (in_wl_tree(e, &ubi->used)) {
1228 paranoid_check_in_wl_tree(e, &ubi->used); 1256 paranoid_check_in_wl_tree(e, &ubi->used);
1229 rb_erase(&e->rb, &ubi->used); 1257 rb_erase(&e->rb, &ubi->used);
1230 } else 1258 } else {
1231 prot_tree_del(ubi, pnum); 1259 int err;
1260
1261 err = prot_tree_del(ubi, e->pnum);
1262 if (err) {
1263 ubi_err("PEB %d not found", pnum);
1264 ubi_ro_mode(ubi);
1265 spin_unlock(&ubi->wl_lock);
1266 return err;
1267 }
1268 }
1232 1269
1233 wl_tree_add(e, &ubi->scrub); 1270 wl_tree_add(e, &ubi->scrub);
1234 spin_unlock(&ubi->wl_lock); 1271 spin_unlock(&ubi->wl_lock);
@@ -1249,17 +1286,32 @@ retry:
1249 */ 1286 */
1250int ubi_wl_flush(struct ubi_device *ubi) 1287int ubi_wl_flush(struct ubi_device *ubi)
1251{ 1288{
1252 int err, pending_count; 1289 int err;
1253
1254 pending_count = ubi->works_count;
1255
1256 dbg_wl("flush (%d pending works)", pending_count);
1257 1290
1258 /* 1291 /*
1259 * Erase while the pending works queue is not empty, but not more then 1292 * Erase while the pending works queue is not empty, but not more then
1260 * the number of currently pending works. 1293 * the number of currently pending works.
1261 */ 1294 */
1262 while (pending_count-- > 0) { 1295 dbg_wl("flush (%d pending works)", ubi->works_count);
1296 while (ubi->works_count) {
1297 err = do_work(ubi);
1298 if (err)
1299 return err;
1300 }
1301
1302 /*
1303 * Make sure all the works which have been done in parallel are
1304 * finished.
1305 */
1306 down_write(&ubi->work_sem);
1307 up_write(&ubi->work_sem);
1308
1309 /*
1310 * And in case last was the WL worker and it cancelled the LEB
1311 * movement, flush again.
1312 */
1313 while (ubi->works_count) {
1314 dbg_wl("flush more (%d pending works)", ubi->works_count);
1263 err = do_work(ubi); 1315 err = do_work(ubi);
1264 if (err) 1316 if (err)
1265 return err; 1317 return err;
@@ -1294,7 +1346,7 @@ static void tree_destroy(struct rb_root *root)
1294 rb->rb_right = NULL; 1346 rb->rb_right = NULL;
1295 } 1347 }
1296 1348
1297 kmem_cache_free(wl_entries_slab, e); 1349 kmem_cache_free(ubi_wl_entry_slab, e);
1298 } 1350 }
1299 } 1351 }
1300} 1352}
@@ -1303,7 +1355,7 @@ static void tree_destroy(struct rb_root *root)
1303 * ubi_thread - UBI background thread. 1355 * ubi_thread - UBI background thread.
1304 * @u: the UBI device description object pointer 1356 * @u: the UBI device description object pointer
1305 */ 1357 */
1306static int ubi_thread(void *u) 1358int ubi_thread(void *u)
1307{ 1359{
1308 int failures = 0; 1360 int failures = 0;
1309 struct ubi_device *ubi = u; 1361 struct ubi_device *ubi = u;
@@ -1394,36 +1446,22 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1394 ubi->used = ubi->free = ubi->scrub = RB_ROOT; 1446 ubi->used = ubi->free = ubi->scrub = RB_ROOT;
1395 ubi->prot.pnum = ubi->prot.aec = RB_ROOT; 1447 ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
1396 spin_lock_init(&ubi->wl_lock); 1448 spin_lock_init(&ubi->wl_lock);
1449 mutex_init(&ubi->move_mutex);
1450 init_rwsem(&ubi->work_sem);
1397 ubi->max_ec = si->max_ec; 1451 ubi->max_ec = si->max_ec;
1398 INIT_LIST_HEAD(&ubi->works); 1452 INIT_LIST_HEAD(&ubi->works);
1399 1453
1400 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); 1454 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1401 1455
1402 ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
1403 if (IS_ERR(ubi->bgt_thread)) {
1404 err = PTR_ERR(ubi->bgt_thread);
1405 ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
1406 err);
1407 return err;
1408 }
1409
1410 if (ubi_devices_cnt == 0) {
1411 wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab",
1412 sizeof(struct ubi_wl_entry),
1413 0, 0, NULL);
1414 if (!wl_entries_slab)
1415 return -ENOMEM;
1416 }
1417
1418 err = -ENOMEM; 1456 err = -ENOMEM;
1419 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL); 1457 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1420 if (!ubi->lookuptbl) 1458 if (!ubi->lookuptbl)
1421 goto out_free; 1459 return err;
1422 1460
1423 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { 1461 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1424 cond_resched(); 1462 cond_resched();
1425 1463
1426 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1464 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1427 if (!e) 1465 if (!e)
1428 goto out_free; 1466 goto out_free;
1429 1467
@@ -1431,7 +1469,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1431 e->ec = seb->ec; 1469 e->ec = seb->ec;
1432 ubi->lookuptbl[e->pnum] = e; 1470 ubi->lookuptbl[e->pnum] = e;
1433 if (schedule_erase(ubi, e, 0)) { 1471 if (schedule_erase(ubi, e, 0)) {
1434 kmem_cache_free(wl_entries_slab, e); 1472 kmem_cache_free(ubi_wl_entry_slab, e);
1435 goto out_free; 1473 goto out_free;
1436 } 1474 }
1437 } 1475 }
@@ -1439,7 +1477,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1439 list_for_each_entry(seb, &si->free, u.list) { 1477 list_for_each_entry(seb, &si->free, u.list) {
1440 cond_resched(); 1478 cond_resched();
1441 1479
1442 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1480 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1443 if (!e) 1481 if (!e)
1444 goto out_free; 1482 goto out_free;
1445 1483
@@ -1453,7 +1491,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1453 list_for_each_entry(seb, &si->corr, u.list) { 1491 list_for_each_entry(seb, &si->corr, u.list) {
1454 cond_resched(); 1492 cond_resched();
1455 1493
1456 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1494 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1457 if (!e) 1495 if (!e)
1458 goto out_free; 1496 goto out_free;
1459 1497
@@ -1461,7 +1499,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1461 e->ec = seb->ec; 1499 e->ec = seb->ec;
1462 ubi->lookuptbl[e->pnum] = e; 1500 ubi->lookuptbl[e->pnum] = e;
1463 if (schedule_erase(ubi, e, 0)) { 1501 if (schedule_erase(ubi, e, 0)) {
1464 kmem_cache_free(wl_entries_slab, e); 1502 kmem_cache_free(ubi_wl_entry_slab, e);
1465 goto out_free; 1503 goto out_free;
1466 } 1504 }
1467 } 1505 }
@@ -1470,7 +1508,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1470 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { 1508 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1471 cond_resched(); 1509 cond_resched();
1472 1510
1473 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1511 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1474 if (!e) 1512 if (!e)
1475 goto out_free; 1513 goto out_free;
1476 1514
@@ -1510,8 +1548,6 @@ out_free:
1510 tree_destroy(&ubi->free); 1548 tree_destroy(&ubi->free);
1511 tree_destroy(&ubi->scrub); 1549 tree_destroy(&ubi->scrub);
1512 kfree(ubi->lookuptbl); 1550 kfree(ubi->lookuptbl);
1513 if (ubi_devices_cnt == 0)
1514 kmem_cache_destroy(wl_entries_slab);
1515 return err; 1551 return err;
1516} 1552}
1517 1553
@@ -1541,7 +1577,7 @@ static void protection_trees_destroy(struct ubi_device *ubi)
1541 rb->rb_right = NULL; 1577 rb->rb_right = NULL;
1542 } 1578 }
1543 1579
1544 kmem_cache_free(wl_entries_slab, pe->e); 1580 kmem_cache_free(ubi_wl_entry_slab, pe->e);
1545 kfree(pe); 1581 kfree(pe);
1546 } 1582 }
1547 } 1583 }
@@ -1553,10 +1589,6 @@ static void protection_trees_destroy(struct ubi_device *ubi)
1553 */ 1589 */
1554void ubi_wl_close(struct ubi_device *ubi) 1590void ubi_wl_close(struct ubi_device *ubi)
1555{ 1591{
1556 dbg_wl("disable \"%s\"", ubi->bgt_name);
1557 if (ubi->bgt_thread)
1558 kthread_stop(ubi->bgt_thread);
1559
1560 dbg_wl("close the UBI wear-leveling unit"); 1592 dbg_wl("close the UBI wear-leveling unit");
1561 1593
1562 cancel_pending(ubi); 1594 cancel_pending(ubi);
@@ -1565,8 +1597,6 @@ void ubi_wl_close(struct ubi_device *ubi)
1565 tree_destroy(&ubi->free); 1597 tree_destroy(&ubi->free);
1566 tree_destroy(&ubi->scrub); 1598 tree_destroy(&ubi->scrub);
1567 kfree(ubi->lookuptbl); 1599 kfree(ubi->lookuptbl);
1568 if (ubi_devices_cnt == 1)
1569 kmem_cache_destroy(wl_entries_slab);
1570} 1600}
1571 1601
1572#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 1602#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID