aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS9
-rw-r--r--drivers/mtd/ubi/block.c247
-rw-r--r--drivers/mtd/ubi/build.c6
-rw-r--r--drivers/mtd/ubi/cdev.c19
-rw-r--r--drivers/mtd/ubi/eba.c56
-rw-r--r--drivers/mtd/ubi/fastmap.c13
-rw-r--r--drivers/mtd/ubi/io.c3
-rw-r--r--drivers/mtd/ubi/kapi.c110
-rw-r--r--drivers/mtd/ubi/misc.c2
-rw-r--r--drivers/mtd/ubi/ubi.h19
-rw-r--r--drivers/mtd/ubi/vtbl.c7
-rw-r--r--drivers/mtd/ubi/wl.c10
-rw-r--r--fs/ubifs/debug.c4
-rw-r--r--fs/ubifs/dir.c16
-rw-r--r--fs/ubifs/file.c4
-rw-r--r--fs/ubifs/replay.c19
-rw-r--r--fs/ubifs/super.c1
-rw-r--r--fs/ubifs/ubifs.h4
-rw-r--r--fs/ubifs/xattr.c112
-rw-r--r--include/linux/mtd/ubi.h53
20 files changed, 521 insertions, 193 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 3ac697a64280..1c7e321f77b8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9990,20 +9990,15 @@ F: drivers/scsi/ufs/
9990 9990
9991UNSORTED BLOCK IMAGES (UBI) 9991UNSORTED BLOCK IMAGES (UBI)
9992M: Artem Bityutskiy <dedekind1@gmail.com> 9992M: Artem Bityutskiy <dedekind1@gmail.com>
9993M: Richard Weinberger <richard@nod.at>
9993W: http://www.linux-mtd.infradead.org/ 9994W: http://www.linux-mtd.infradead.org/
9994L: linux-mtd@lists.infradead.org 9995L: linux-mtd@lists.infradead.org
9995T: git git://git.infradead.org/ubifs-2.6.git 9996T: git git://git.infradead.org/ubifs-2.6.git
9996S: Maintained 9997S: Supported
9997F: drivers/mtd/ubi/ 9998F: drivers/mtd/ubi/
9998F: include/linux/mtd/ubi.h 9999F: include/linux/mtd/ubi.h
9999F: include/uapi/mtd/ubi-user.h 10000F: include/uapi/mtd/ubi-user.h
10000 10001
10001UNSORTED BLOCK IMAGES (UBI) Fastmap
10002M: Richard Weinberger <richard@nod.at>
10003L: linux-mtd@lists.infradead.org
10004S: Maintained
10005F: drivers/mtd/ubi/fastmap.c
10006
10007USB ACM DRIVER 10002USB ACM DRIVER
10008M: Oliver Neukum <oliver@neukum.org> 10003M: Oliver Neukum <oliver@neukum.org>
10009L: linux-usb@vger.kernel.org 10004L: linux-usb@vger.kernel.org
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 6b6bce28bd63..db2c05b6fe7f 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -42,11 +42,12 @@
42#include <linux/list.h> 42#include <linux/list.h>
43#include <linux/mutex.h> 43#include <linux/mutex.h>
44#include <linux/slab.h> 44#include <linux/slab.h>
45#include <linux/vmalloc.h>
46#include <linux/mtd/ubi.h> 45#include <linux/mtd/ubi.h>
47#include <linux/workqueue.h> 46#include <linux/workqueue.h>
48#include <linux/blkdev.h> 47#include <linux/blkdev.h>
48#include <linux/blk-mq.h>
49#include <linux/hdreg.h> 49#include <linux/hdreg.h>
50#include <linux/scatterlist.h>
50#include <asm/div64.h> 51#include <asm/div64.h>
51 52
52#include "ubi-media.h" 53#include "ubi-media.h"
@@ -67,6 +68,11 @@ struct ubiblock_param {
67 char name[UBIBLOCK_PARAM_LEN+1]; 68 char name[UBIBLOCK_PARAM_LEN+1];
68}; 69};
69 70
71struct ubiblock_pdu {
72 struct work_struct work;
73 struct ubi_sgl usgl;
74};
75
70/* Numbers of elements set in the @ubiblock_param array */ 76/* Numbers of elements set in the @ubiblock_param array */
71static int ubiblock_devs __initdata; 77static int ubiblock_devs __initdata;
72 78
@@ -84,11 +90,10 @@ struct ubiblock {
84 struct request_queue *rq; 90 struct request_queue *rq;
85 91
86 struct workqueue_struct *wq; 92 struct workqueue_struct *wq;
87 struct work_struct work;
88 93
89 struct mutex dev_mutex; 94 struct mutex dev_mutex;
90 spinlock_t queue_lock;
91 struct list_head list; 95 struct list_head list;
96 struct blk_mq_tag_set tag_set;
92}; 97};
93 98
94/* Linked list of all ubiblock instances */ 99/* Linked list of all ubiblock instances */
@@ -181,31 +186,20 @@ static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
181 return NULL; 186 return NULL;
182} 187}
183 188
184static int ubiblock_read_to_buf(struct ubiblock *dev, char *buffer, 189static int ubiblock_read(struct ubiblock_pdu *pdu)
185 int leb, int offset, int len)
186{ 190{
187 int ret; 191 int ret, leb, offset, bytes_left, to_read;
188 192 u64 pos;
189 ret = ubi_read(dev->desc, leb, buffer, offset, len); 193 struct request *req = blk_mq_rq_from_pdu(pdu);
190 if (ret) { 194 struct ubiblock *dev = req->q->queuedata;
191 dev_err(disk_to_dev(dev->gd), "%d while reading from LEB %d (offset %d, length %d)",
192 ret, leb, offset, len);
193 return ret;
194 }
195 return 0;
196}
197 195
198static int ubiblock_read(struct ubiblock *dev, char *buffer, 196 to_read = blk_rq_bytes(req);
199 sector_t sec, int len) 197 pos = blk_rq_pos(req) << 9;
200{
201 int ret, leb, offset;
202 int bytes_left = len;
203 int to_read = len;
204 u64 pos = sec << 9;
205 198
206 /* Get LEB:offset address to read from */ 199 /* Get LEB:offset address to read from */
207 offset = do_div(pos, dev->leb_size); 200 offset = do_div(pos, dev->leb_size);
208 leb = pos; 201 leb = pos;
202 bytes_left = to_read;
209 203
210 while (bytes_left) { 204 while (bytes_left) {
211 /* 205 /*
@@ -215,11 +209,10 @@ static int ubiblock_read(struct ubiblock *dev, char *buffer,
215 if (offset + to_read > dev->leb_size) 209 if (offset + to_read > dev->leb_size)
216 to_read = dev->leb_size - offset; 210 to_read = dev->leb_size - offset;
217 211
218 ret = ubiblock_read_to_buf(dev, buffer, leb, offset, to_read); 212 ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
219 if (ret) 213 if (ret < 0)
220 return ret; 214 return ret;
221 215
222 buffer += to_read;
223 bytes_left -= to_read; 216 bytes_left -= to_read;
224 to_read = bytes_left; 217 to_read = bytes_left;
225 leb += 1; 218 leb += 1;
@@ -228,79 +221,6 @@ static int ubiblock_read(struct ubiblock *dev, char *buffer,
228 return 0; 221 return 0;
229} 222}
230 223
231static int do_ubiblock_request(struct ubiblock *dev, struct request *req)
232{
233 int len, ret;
234 sector_t sec;
235
236 if (req->cmd_type != REQ_TYPE_FS)
237 return -EIO;
238
239 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
240 get_capacity(req->rq_disk))
241 return -EIO;
242
243 if (rq_data_dir(req) != READ)
244 return -ENOSYS; /* Write not implemented */
245
246 sec = blk_rq_pos(req);
247 len = blk_rq_cur_bytes(req);
248
249 /*
250 * Let's prevent the device from being removed while we're doing I/O
251 * work. Notice that this means we serialize all the I/O operations,
252 * but it's probably of no impact given the NAND core serializes
253 * flash access anyway.
254 */
255 mutex_lock(&dev->dev_mutex);
256 ret = ubiblock_read(dev, bio_data(req->bio), sec, len);
257 mutex_unlock(&dev->dev_mutex);
258
259 return ret;
260}
261
262static void ubiblock_do_work(struct work_struct *work)
263{
264 struct ubiblock *dev =
265 container_of(work, struct ubiblock, work);
266 struct request_queue *rq = dev->rq;
267 struct request *req;
268 int res;
269
270 spin_lock_irq(rq->queue_lock);
271
272 req = blk_fetch_request(rq);
273 while (req) {
274
275 spin_unlock_irq(rq->queue_lock);
276 res = do_ubiblock_request(dev, req);
277 spin_lock_irq(rq->queue_lock);
278
279 /*
280 * If we're done with this request,
281 * we need to fetch a new one
282 */
283 if (!__blk_end_request_cur(req, res))
284 req = blk_fetch_request(rq);
285 }
286
287 spin_unlock_irq(rq->queue_lock);
288}
289
290static void ubiblock_request(struct request_queue *rq)
291{
292 struct ubiblock *dev;
293 struct request *req;
294
295 dev = rq->queuedata;
296
297 if (!dev)
298 while ((req = blk_fetch_request(rq)) != NULL)
299 __blk_end_request_all(req, -ENODEV);
300 else
301 queue_work(dev->wq, &dev->work);
302}
303
304static int ubiblock_open(struct block_device *bdev, fmode_t mode) 224static int ubiblock_open(struct block_device *bdev, fmode_t mode)
305{ 225{
306 struct ubiblock *dev = bdev->bd_disk->private_data; 226 struct ubiblock *dev = bdev->bd_disk->private_data;
@@ -374,6 +294,63 @@ static const struct block_device_operations ubiblock_ops = {
374 .getgeo = ubiblock_getgeo, 294 .getgeo = ubiblock_getgeo,
375}; 295};
376 296
297static void ubiblock_do_work(struct work_struct *work)
298{
299 int ret;
300 struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
301 struct request *req = blk_mq_rq_from_pdu(pdu);
302
303 blk_mq_start_request(req);
304
305 /*
306 * It is safe to ignore the return value of blk_rq_map_sg() because
307 * the number of sg entries is limited to UBI_MAX_SG_COUNT
308 * and ubi_read_sg() will check that limit.
309 */
310 blk_rq_map_sg(req->q, req, pdu->usgl.sg);
311
312 ret = ubiblock_read(pdu);
313 blk_mq_end_request(req, ret);
314}
315
316static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
317 const struct blk_mq_queue_data *bd)
318{
319 struct request *req = bd->rq;
320 struct ubiblock *dev = hctx->queue->queuedata;
321 struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
322
323 if (req->cmd_type != REQ_TYPE_FS)
324 return BLK_MQ_RQ_QUEUE_ERROR;
325
326 if (rq_data_dir(req) != READ)
327 return BLK_MQ_RQ_QUEUE_ERROR; /* Write not implemented */
328
329 ubi_sgl_init(&pdu->usgl);
330 queue_work(dev->wq, &pdu->work);
331
332 return BLK_MQ_RQ_QUEUE_OK;
333}
334
335static int ubiblock_init_request(void *data, struct request *req,
336 unsigned int hctx_idx,
337 unsigned int request_idx,
338 unsigned int numa_node)
339{
340 struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
341
342 sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
343 INIT_WORK(&pdu->work, ubiblock_do_work);
344
345 return 0;
346}
347
348static struct blk_mq_ops ubiblock_mq_ops = {
349 .queue_rq = ubiblock_queue_rq,
350 .init_request = ubiblock_init_request,
351 .map_queue = blk_mq_map_queue,
352};
353
377int ubiblock_create(struct ubi_volume_info *vi) 354int ubiblock_create(struct ubi_volume_info *vi)
378{ 355{
379 struct ubiblock *dev; 356 struct ubiblock *dev;
@@ -417,14 +394,28 @@ int ubiblock_create(struct ubi_volume_info *vi)
417 set_capacity(gd, disk_capacity); 394 set_capacity(gd, disk_capacity);
418 dev->gd = gd; 395 dev->gd = gd;
419 396
420 spin_lock_init(&dev->queue_lock); 397 dev->tag_set.ops = &ubiblock_mq_ops;
421 dev->rq = blk_init_queue(ubiblock_request, &dev->queue_lock); 398 dev->tag_set.queue_depth = 64;
422 if (!dev->rq) { 399 dev->tag_set.numa_node = NUMA_NO_NODE;
423 dev_err(disk_to_dev(gd), "blk_init_queue failed"); 400 dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
424 ret = -ENODEV; 401 dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
402 dev->tag_set.driver_data = dev;
403 dev->tag_set.nr_hw_queues = 1;
404
405 ret = blk_mq_alloc_tag_set(&dev->tag_set);
406 if (ret) {
407 dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
425 goto out_put_disk; 408 goto out_put_disk;
426 } 409 }
427 410
411 dev->rq = blk_mq_init_queue(&dev->tag_set);
412 if (IS_ERR(dev->rq)) {
413 dev_err(disk_to_dev(gd), "blk_mq_init_queue failed");
414 ret = PTR_ERR(dev->rq);
415 goto out_free_tags;
416 }
417 blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
418
428 dev->rq->queuedata = dev; 419 dev->rq->queuedata = dev;
429 dev->gd->queue = dev->rq; 420 dev->gd->queue = dev->rq;
430 421
@@ -437,7 +428,6 @@ int ubiblock_create(struct ubi_volume_info *vi)
437 ret = -ENOMEM; 428 ret = -ENOMEM;
438 goto out_free_queue; 429 goto out_free_queue;
439 } 430 }
440 INIT_WORK(&dev->work, ubiblock_do_work);
441 431
442 mutex_lock(&devices_mutex); 432 mutex_lock(&devices_mutex);
443 list_add_tail(&dev->list, &ubiblock_devices); 433 list_add_tail(&dev->list, &ubiblock_devices);
@@ -451,6 +441,8 @@ int ubiblock_create(struct ubi_volume_info *vi)
451 441
452out_free_queue: 442out_free_queue:
453 blk_cleanup_queue(dev->rq); 443 blk_cleanup_queue(dev->rq);
444out_free_tags:
445 blk_mq_free_tag_set(&dev->tag_set);
454out_put_disk: 446out_put_disk:
455 put_disk(dev->gd); 447 put_disk(dev->gd);
456out_free_dev: 448out_free_dev:
@@ -461,8 +453,13 @@ out_free_dev:
461 453
462static void ubiblock_cleanup(struct ubiblock *dev) 454static void ubiblock_cleanup(struct ubiblock *dev)
463{ 455{
456 /* Stop new requests to arrive */
464 del_gendisk(dev->gd); 457 del_gendisk(dev->gd);
458 /* Flush pending work */
459 destroy_workqueue(dev->wq);
460 /* Finally destroy the blk queue */
465 blk_cleanup_queue(dev->rq); 461 blk_cleanup_queue(dev->rq);
462 blk_mq_free_tag_set(&dev->tag_set);
466 dev_info(disk_to_dev(dev->gd), "released"); 463 dev_info(disk_to_dev(dev->gd), "released");
467 put_disk(dev->gd); 464 put_disk(dev->gd);
468} 465}
@@ -490,9 +487,6 @@ int ubiblock_remove(struct ubi_volume_info *vi)
490 list_del(&dev->list); 487 list_del(&dev->list);
491 mutex_unlock(&devices_mutex); 488 mutex_unlock(&devices_mutex);
492 489
493 /* Flush pending work and stop this workqueue */
494 destroy_workqueue(dev->wq);
495
496 ubiblock_cleanup(dev); 490 ubiblock_cleanup(dev);
497 mutex_unlock(&dev->dev_mutex); 491 mutex_unlock(&dev->dev_mutex);
498 kfree(dev); 492 kfree(dev);
@@ -583,22 +577,28 @@ open_volume_desc(const char *name, int ubi_num, int vol_id)
583 return ubi_open_volume(ubi_num, vol_id, UBI_READONLY); 577 return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
584} 578}
585 579
586static int __init ubiblock_create_from_param(void) 580static void __init ubiblock_create_from_param(void)
587{ 581{
588 int i, ret; 582 int i, ret = 0;
589 struct ubiblock_param *p; 583 struct ubiblock_param *p;
590 struct ubi_volume_desc *desc; 584 struct ubi_volume_desc *desc;
591 struct ubi_volume_info vi; 585 struct ubi_volume_info vi;
592 586
587 /*
588 * If there is an error creating one of the ubiblocks, continue on to
589 * create the following ubiblocks. This helps in a circumstance where
590 * the kernel command-line specifies multiple block devices and some
591 * may be broken, but we still want the working ones to come up.
592 */
593 for (i = 0; i < ubiblock_devs; i++) { 593 for (i = 0; i < ubiblock_devs; i++) {
594 p = &ubiblock_param[i]; 594 p = &ubiblock_param[i];
595 595
596 desc = open_volume_desc(p->name, p->ubi_num, p->vol_id); 596 desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
597 if (IS_ERR(desc)) { 597 if (IS_ERR(desc)) {
598 pr_err("UBI: block: can't open volume, err=%ld\n", 598 pr_err(
599 PTR_ERR(desc)); 599 "UBI: block: can't open volume on ubi%d_%d, err=%ld",
600 ret = PTR_ERR(desc); 600 p->ubi_num, p->vol_id, PTR_ERR(desc));
601 break; 601 continue;
602 } 602 }
603 603
604 ubi_get_volume_info(desc, &vi); 604 ubi_get_volume_info(desc, &vi);
@@ -606,12 +606,12 @@ static int __init ubiblock_create_from_param(void)
606 606
607 ret = ubiblock_create(&vi); 607 ret = ubiblock_create(&vi);
608 if (ret) { 608 if (ret) {
609 pr_err("UBI: block: can't add '%s' volume, err=%d\n", 609 pr_err(
610 vi.name, ret); 610 "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d",
611 break; 611 vi.name, p->ubi_num, p->vol_id, ret);
612 continue;
612 } 613 }
613 } 614 }
614 return ret;
615} 615}
616 616
617static void ubiblock_remove_all(void) 617static void ubiblock_remove_all(void)
@@ -620,8 +620,6 @@ static void ubiblock_remove_all(void)
620 struct ubiblock *dev; 620 struct ubiblock *dev;
621 621
622 list_for_each_entry_safe(dev, next, &ubiblock_devices, list) { 622 list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
623 /* Flush pending work and stop workqueue */
624 destroy_workqueue(dev->wq);
625 /* The module is being forcefully removed */ 623 /* The module is being forcefully removed */
626 WARN_ON(dev->desc); 624 WARN_ON(dev->desc);
627 /* Remove from device list */ 625 /* Remove from device list */
@@ -639,10 +637,12 @@ int __init ubiblock_init(void)
639 if (ubiblock_major < 0) 637 if (ubiblock_major < 0)
640 return ubiblock_major; 638 return ubiblock_major;
641 639
642 /* Attach block devices from 'block=' module param */ 640 /*
643 ret = ubiblock_create_from_param(); 641 * Attach block devices from 'block=' module param.
644 if (ret) 642 * Even if one block device in the param list fails to come up,
645 goto err_remove; 643 * still allow the module to load and leave any others up.
644 */
645 ubiblock_create_from_param();
646 646
647 /* 647 /*
648 * Block devices are only created upon user requests, so we ignore 648 * Block devices are only created upon user requests, so we ignore
@@ -655,7 +655,6 @@ int __init ubiblock_init(void)
655 655
656err_unreg: 656err_unreg:
657 unregister_blkdev(ubiblock_major, "ubiblock"); 657 unregister_blkdev(ubiblock_major, "ubiblock");
658err_remove:
659 ubiblock_remove_all(); 658 ubiblock_remove_all();
660 return ret; 659 return ret;
661} 660}
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 3405be46ebe9..ba01a8d22d28 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -923,7 +923,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
923 923
924 /* Make sure ubi_num is not busy */ 924 /* Make sure ubi_num is not busy */
925 if (ubi_devices[ubi_num]) { 925 if (ubi_devices[ubi_num]) {
926 ubi_err(ubi, "ubi%d already exists", ubi_num); 926 ubi_err(ubi, "already exists");
927 return -EEXIST; 927 return -EEXIST;
928 } 928 }
929 } 929 }
@@ -973,7 +973,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
973 mutex_init(&ubi->fm_mutex); 973 mutex_init(&ubi->fm_mutex);
974 init_rwsem(&ubi->fm_sem); 974 init_rwsem(&ubi->fm_sem);
975 975
976 ubi_msg(ubi, "attaching mtd%d to ubi%d", mtd->index, ubi_num); 976 ubi_msg(ubi, "attaching mtd%d", mtd->index);
977 977
978 err = io_init(ubi, max_beb_per1024); 978 err = io_init(ubi, max_beb_per1024);
979 if (err) 979 if (err)
@@ -1428,7 +1428,7 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
1428 } 1428 }
1429 1429
1430 if (len == 0) { 1430 if (len == 0) {
1431 pr_err("UBI warning: empty 'mtd=' parameter - ignored\n"); 1431 pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
1432 return 0; 1432 return 0;
1433 } 1433 }
1434 1434
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 3410ea8109f8..d647e504f9b1 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -48,26 +48,25 @@
48 48
49/** 49/**
50 * get_exclusive - get exclusive access to an UBI volume. 50 * get_exclusive - get exclusive access to an UBI volume.
51 * @ubi: UBI device description object
52 * @desc: volume descriptor 51 * @desc: volume descriptor
53 * 52 *
54 * This function changes UBI volume open mode to "exclusive". Returns previous 53 * This function changes UBI volume open mode to "exclusive". Returns previous
55 * mode value (positive integer) in case of success and a negative error code 54 * mode value (positive integer) in case of success and a negative error code
56 * in case of failure. 55 * in case of failure.
57 */ 56 */
58static int get_exclusive(struct ubi_device *ubi, struct ubi_volume_desc *desc) 57static int get_exclusive(struct ubi_volume_desc *desc)
59{ 58{
60 int users, err; 59 int users, err;
61 struct ubi_volume *vol = desc->vol; 60 struct ubi_volume *vol = desc->vol;
62 61
63 spin_lock(&vol->ubi->volumes_lock); 62 spin_lock(&vol->ubi->volumes_lock);
64 users = vol->readers + vol->writers + vol->exclusive; 63 users = vol->readers + vol->writers + vol->exclusive + vol->metaonly;
65 ubi_assert(users > 0); 64 ubi_assert(users > 0);
66 if (users > 1) { 65 if (users > 1) {
67 ubi_err(ubi, "%d users for volume %d", users, vol->vol_id); 66 ubi_err(vol->ubi, "%d users for volume %d", users, vol->vol_id);
68 err = -EBUSY; 67 err = -EBUSY;
69 } else { 68 } else {
70 vol->readers = vol->writers = 0; 69 vol->readers = vol->writers = vol->metaonly = 0;
71 vol->exclusive = 1; 70 vol->exclusive = 1;
72 err = desc->mode; 71 err = desc->mode;
73 desc->mode = UBI_EXCLUSIVE; 72 desc->mode = UBI_EXCLUSIVE;
@@ -87,13 +86,15 @@ static void revoke_exclusive(struct ubi_volume_desc *desc, int mode)
87 struct ubi_volume *vol = desc->vol; 86 struct ubi_volume *vol = desc->vol;
88 87
89 spin_lock(&vol->ubi->volumes_lock); 88 spin_lock(&vol->ubi->volumes_lock);
90 ubi_assert(vol->readers == 0 && vol->writers == 0); 89 ubi_assert(vol->readers == 0 && vol->writers == 0 && vol->metaonly == 0);
91 ubi_assert(vol->exclusive == 1 && desc->mode == UBI_EXCLUSIVE); 90 ubi_assert(vol->exclusive == 1 && desc->mode == UBI_EXCLUSIVE);
92 vol->exclusive = 0; 91 vol->exclusive = 0;
93 if (mode == UBI_READONLY) 92 if (mode == UBI_READONLY)
94 vol->readers = 1; 93 vol->readers = 1;
95 else if (mode == UBI_READWRITE) 94 else if (mode == UBI_READWRITE)
96 vol->writers = 1; 95 vol->writers = 1;
96 else if (mode == UBI_METAONLY)
97 vol->metaonly = 1;
97 else 98 else
98 vol->exclusive = 1; 99 vol->exclusive = 1;
99 spin_unlock(&vol->ubi->volumes_lock); 100 spin_unlock(&vol->ubi->volumes_lock);
@@ -421,7 +422,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
421 break; 422 break;
422 } 423 }
423 424
424 err = get_exclusive(ubi, desc); 425 err = get_exclusive(desc);
425 if (err < 0) 426 if (err < 0)
426 break; 427 break;
427 428
@@ -457,7 +458,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
457 req.bytes < 0 || req.lnum >= vol->usable_leb_size) 458 req.bytes < 0 || req.lnum >= vol->usable_leb_size)
458 break; 459 break;
459 460
460 err = get_exclusive(ubi, desc); 461 err = get_exclusive(desc);
461 if (err < 0) 462 if (err < 0)
462 break; 463 break;
463 464
@@ -734,7 +735,7 @@ static int rename_volumes(struct ubi_device *ubi,
734 goto out_free; 735 goto out_free;
735 } 736 }
736 737
737 re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_READWRITE); 738 re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_METAONLY);
738 if (IS_ERR(re->desc)) { 739 if (IS_ERR(re->desc)) {
739 err = PTR_ERR(re->desc); 740 err = PTR_ERR(re->desc);
740 ubi_err(ubi, "cannot open volume %d, error %d", 741 ubi_err(ubi, "cannot open volume %d, error %d",
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index a40020cf0923..da4c79259f67 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -426,6 +426,7 @@ retry:
426 pnum, vol_id, lnum); 426 pnum, vol_id, lnum);
427 err = -EBADMSG; 427 err = -EBADMSG;
428 } else 428 } else
429 err = -EINVAL;
429 ubi_ro_mode(ubi); 430 ubi_ro_mode(ubi);
430 } 431 }
431 goto out_free; 432 goto out_free;
@@ -480,6 +481,61 @@ out_unlock:
480} 481}
481 482
482/** 483/**
484 * ubi_eba_read_leb_sg - read data into a scatter gather list.
485 * @ubi: UBI device description object
486 * @vol: volume description object
487 * @lnum: logical eraseblock number
488 * @sgl: UBI scatter gather list to store the read data
489 * @offset: offset from where to read
490 * @len: how many bytes to read
491 * @check: data CRC check flag
492 *
493 * This function works exactly like ubi_eba_read_leb(). But instead of
494 * storing the read data into a buffer it writes to an UBI scatter gather
495 * list.
496 */
497int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
498 struct ubi_sgl *sgl, int lnum, int offset, int len,
499 int check)
500{
501 int to_read;
502 int ret;
503 struct scatterlist *sg;
504
505 for (;;) {
506 ubi_assert(sgl->list_pos < UBI_MAX_SG_COUNT);
507 sg = &sgl->sg[sgl->list_pos];
508 if (len < sg->length - sgl->page_pos)
509 to_read = len;
510 else
511 to_read = sg->length - sgl->page_pos;
512
513 ret = ubi_eba_read_leb(ubi, vol, lnum,
514 sg_virt(sg) + sgl->page_pos, offset,
515 to_read, check);
516 if (ret < 0)
517 return ret;
518
519 offset += to_read;
520 len -= to_read;
521 if (!len) {
522 sgl->page_pos += to_read;
523 if (sgl->page_pos == sg->length) {
524 sgl->list_pos++;
525 sgl->page_pos = 0;
526 }
527
528 break;
529 }
530
531 sgl->list_pos++;
532 sgl->page_pos = 0;
533 }
534
535 return ret;
536}
537
538/**
483 * recover_peb - recover from write failure. 539 * recover_peb - recover from write failure.
484 * @ubi: UBI device description object 540 * @ubi: UBI device description object
485 * @pnum: the physical eraseblock to recover 541 * @pnum: the physical eraseblock to recover
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index b56672bf3294..db3defdfc3c0 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1196,6 +1196,19 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
1196 fm_pos += sizeof(*fec); 1196 fm_pos += sizeof(*fec);
1197 ubi_assert(fm_pos <= ubi->fm_size); 1197 ubi_assert(fm_pos <= ubi->fm_size);
1198 } 1198 }
1199
1200 for (i = 0; i < UBI_PROT_QUEUE_LEN; i++) {
1201 list_for_each_entry(wl_e, &ubi->pq[i], u.list) {
1202 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1203
1204 fec->pnum = cpu_to_be32(wl_e->pnum);
1205 fec->ec = cpu_to_be32(wl_e->ec);
1206
1207 used_peb_count++;
1208 fm_pos += sizeof(*fec);
1209 ubi_assert(fm_pos <= ubi->fm_size);
1210 }
1211 }
1199 fmh->used_peb_count = cpu_to_be32(used_peb_count); 1212 fmh->used_peb_count = cpu_to_be32(used_peb_count);
1200 1213
1201 for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) { 1214 for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 396aaa543362..ed0bcb35472f 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -1419,8 +1419,7 @@ int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
1419 1419
1420fail: 1420fail:
1421 ubi_err(ubi, "self-check failed for PEB %d", pnum); 1421 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1422 ubi_msg(ubi, "hex dump of the %d-%d region", 1422 ubi_msg(ubi, "hex dump of the %d-%d region", offset, offset + len);
1423 offset, offset + len);
1424 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1); 1423 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
1425 err = -EINVAL; 1424 err = -EINVAL;
1426error: 1425error:
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index f3bab669f6bb..478e00cf2d9e 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -137,7 +137,7 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
137 return ERR_PTR(-EINVAL); 137 return ERR_PTR(-EINVAL);
138 138
139 if (mode != UBI_READONLY && mode != UBI_READWRITE && 139 if (mode != UBI_READONLY && mode != UBI_READWRITE &&
140 mode != UBI_EXCLUSIVE) 140 mode != UBI_EXCLUSIVE && mode != UBI_METAONLY)
141 return ERR_PTR(-EINVAL); 141 return ERR_PTR(-EINVAL);
142 142
143 /* 143 /*
@@ -182,10 +182,17 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
182 break; 182 break;
183 183
184 case UBI_EXCLUSIVE: 184 case UBI_EXCLUSIVE:
185 if (vol->exclusive || vol->writers || vol->readers) 185 if (vol->exclusive || vol->writers || vol->readers ||
186 vol->metaonly)
186 goto out_unlock; 187 goto out_unlock;
187 vol->exclusive = 1; 188 vol->exclusive = 1;
188 break; 189 break;
190
191 case UBI_METAONLY:
192 if (vol->metaonly || vol->exclusive)
193 goto out_unlock;
194 vol->metaonly = 1;
195 break;
189 } 196 }
190 get_device(&vol->dev); 197 get_device(&vol->dev);
191 vol->ref_count += 1; 198 vol->ref_count += 1;
@@ -343,6 +350,10 @@ void ubi_close_volume(struct ubi_volume_desc *desc)
343 break; 350 break;
344 case UBI_EXCLUSIVE: 351 case UBI_EXCLUSIVE:
345 vol->exclusive = 0; 352 vol->exclusive = 0;
353 break;
354 case UBI_METAONLY:
355 vol->metaonly = 0;
356 break;
346 } 357 }
347 vol->ref_count -= 1; 358 vol->ref_count -= 1;
348 spin_unlock(&ubi->volumes_lock); 359 spin_unlock(&ubi->volumes_lock);
@@ -355,6 +366,43 @@ void ubi_close_volume(struct ubi_volume_desc *desc)
355EXPORT_SYMBOL_GPL(ubi_close_volume); 366EXPORT_SYMBOL_GPL(ubi_close_volume);
356 367
357/** 368/**
369 * leb_read_sanity_check - does sanity checks on read requests.
370 * @desc: volume descriptor
371 * @lnum: logical eraseblock number to read from
372 * @offset: offset within the logical eraseblock to read from
373 * @len: how many bytes to read
374 *
375 * This function is used by ubi_leb_read() and ubi_leb_read_sg()
376 * to perform sanity checks.
377 */
378static int leb_read_sanity_check(struct ubi_volume_desc *desc, int lnum,
379 int offset, int len)
380{
381 struct ubi_volume *vol = desc->vol;
382 struct ubi_device *ubi = vol->ubi;
383 int vol_id = vol->vol_id;
384
385 if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 ||
386 lnum >= vol->used_ebs || offset < 0 || len < 0 ||
387 offset + len > vol->usable_leb_size)
388 return -EINVAL;
389
390 if (vol->vol_type == UBI_STATIC_VOLUME) {
391 if (vol->used_ebs == 0)
392 /* Empty static UBI volume */
393 return 0;
394 if (lnum == vol->used_ebs - 1 &&
395 offset + len > vol->last_eb_bytes)
396 return -EINVAL;
397 }
398
399 if (vol->upd_marker)
400 return -EBADF;
401
402 return 0;
403}
404
405/**
358 * ubi_leb_read - read data. 406 * ubi_leb_read - read data.
359 * @desc: volume descriptor 407 * @desc: volume descriptor
360 * @lnum: logical eraseblock number to read from 408 * @lnum: logical eraseblock number to read from
@@ -390,22 +438,10 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
390 438
391 dbg_gen("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset); 439 dbg_gen("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset);
392 440
393 if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 || 441 err = leb_read_sanity_check(desc, lnum, offset, len);
394 lnum >= vol->used_ebs || offset < 0 || len < 0 || 442 if (err < 0)
395 offset + len > vol->usable_leb_size) 443 return err;
396 return -EINVAL;
397
398 if (vol->vol_type == UBI_STATIC_VOLUME) {
399 if (vol->used_ebs == 0)
400 /* Empty static UBI volume */
401 return 0;
402 if (lnum == vol->used_ebs - 1 &&
403 offset + len > vol->last_eb_bytes)
404 return -EINVAL;
405 }
406 444
407 if (vol->upd_marker)
408 return -EBADF;
409 if (len == 0) 445 if (len == 0)
410 return 0; 446 return 0;
411 447
@@ -419,6 +455,46 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
419} 455}
420EXPORT_SYMBOL_GPL(ubi_leb_read); 456EXPORT_SYMBOL_GPL(ubi_leb_read);
421 457
458
459/**
460 * ubi_leb_read_sg - read data into a scatter gather list.
461 * @desc: volume descriptor
462 * @lnum: logical eraseblock number to read from
463 * @buf: buffer where to store the read data
464 * @offset: offset within the logical eraseblock to read from
465 * @len: how many bytes to read
466 * @check: whether UBI has to check the read data's CRC or not.
467 *
468 * This function works exactly like ubi_leb_read_sg(). But instead of
469 * storing the read data into a buffer it writes to an UBI scatter gather
470 * list.
471 */
472int ubi_leb_read_sg(struct ubi_volume_desc *desc, int lnum, struct ubi_sgl *sgl,
473 int offset, int len, int check)
474{
475 struct ubi_volume *vol = desc->vol;
476 struct ubi_device *ubi = vol->ubi;
477 int err, vol_id = vol->vol_id;
478
479 dbg_gen("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset);
480
481 err = leb_read_sanity_check(desc, lnum, offset, len);
482 if (err < 0)
483 return err;
484
485 if (len == 0)
486 return 0;
487
488 err = ubi_eba_read_leb_sg(ubi, vol, sgl, lnum, offset, len, check);
489 if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) {
490 ubi_warn(ubi, "mark volume %d as corrupted", vol_id);
491 vol->corrupted = 1;
492 }
493
494 return err;
495}
496EXPORT_SYMBOL_GPL(ubi_leb_read_sg);
497
422/** 498/**
423 * ubi_leb_write - write data. 499 * ubi_leb_write - write data.
424 * @desc: volume descriptor 500 * @desc: volume descriptor
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index dbda77e556cb..2a45ac210b16 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -74,6 +74,8 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
74 for (i = 0; i < vol->used_ebs; i++) { 74 for (i = 0; i < vol->used_ebs; i++) {
75 int size; 75 int size;
76 76
77 cond_resched();
78
77 if (i == vol->used_ebs - 1) 79 if (i == vol->used_ebs - 1)
78 size = vol->last_eb_bytes; 80 size = vol->last_eb_bytes;
79 else 81 else
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index f80ffaba9058..c5be82d9d345 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -50,13 +50,13 @@
50#define UBI_NAME_STR "ubi" 50#define UBI_NAME_STR "ubi"
51 51
52/* Normal UBI messages */ 52/* Normal UBI messages */
53#define ubi_msg(ubi, fmt, ...) pr_notice("UBI-%d: %s:" fmt "\n", \ 53#define ubi_msg(ubi, fmt, ...) pr_notice(UBI_NAME_STR "%d: " fmt "\n", \
54 ubi->ubi_num, __func__, ##__VA_ARGS__) 54 ubi->ubi_num, ##__VA_ARGS__)
55/* UBI warning messages */ 55/* UBI warning messages */
56#define ubi_warn(ubi, fmt, ...) pr_warn("UBI-%d warning: %s: " fmt "\n", \ 56#define ubi_warn(ubi, fmt, ...) pr_warn(UBI_NAME_STR "%d warning: %s: " fmt "\n", \
57 ubi->ubi_num, __func__, ##__VA_ARGS__) 57 ubi->ubi_num, __func__, ##__VA_ARGS__)
58/* UBI error messages */ 58/* UBI error messages */
59#define ubi_err(ubi, fmt, ...) pr_err("UBI-%d error: %s: " fmt "\n", \ 59#define ubi_err(ubi, fmt, ...) pr_err(UBI_NAME_STR "%d error: %s: " fmt "\n", \
60 ubi->ubi_num, __func__, ##__VA_ARGS__) 60 ubi->ubi_num, __func__, ##__VA_ARGS__)
61 61
62/* Background thread name pattern */ 62/* Background thread name pattern */
@@ -261,6 +261,7 @@ struct ubi_fm_pool {
261 * @readers: number of users holding this volume in read-only mode 261 * @readers: number of users holding this volume in read-only mode
262 * @writers: number of users holding this volume in read-write mode 262 * @writers: number of users holding this volume in read-write mode
263 * @exclusive: whether somebody holds this volume in exclusive mode 263 * @exclusive: whether somebody holds this volume in exclusive mode
264 * @metaonly: whether somebody is altering only meta data of this volume
264 * 265 *
265 * @reserved_pebs: how many physical eraseblocks are reserved for this volume 266 * @reserved_pebs: how many physical eraseblocks are reserved for this volume
266 * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) 267 * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
@@ -309,6 +310,7 @@ struct ubi_volume {
309 int readers; 310 int readers;
310 int writers; 311 int writers;
311 int exclusive; 312 int exclusive;
313 int metaonly;
312 314
313 int reserved_pebs; 315 int reserved_pebs;
314 int vol_type; 316 int vol_type;
@@ -339,7 +341,8 @@ struct ubi_volume {
339/** 341/**
340 * struct ubi_volume_desc - UBI volume descriptor returned when it is opened. 342 * struct ubi_volume_desc - UBI volume descriptor returned when it is opened.
341 * @vol: reference to the corresponding volume description object 343 * @vol: reference to the corresponding volume description object
342 * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, or %UBI_EXCLUSIVE) 344 * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, %UBI_EXCLUSIVE
345 * or %UBI_METAONLY)
343 */ 346 */
344struct ubi_volume_desc { 347struct ubi_volume_desc {
345 struct ubi_volume *vol; 348 struct ubi_volume *vol;
@@ -390,7 +393,8 @@ struct ubi_debug_info {
390 * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs, 393 * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs,
391 * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count, 394 * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count,
392 * @vol->readers, @vol->writers, @vol->exclusive, 395 * @vol->readers, @vol->writers, @vol->exclusive,
393 * @vol->ref_count, @vol->mapping and @vol->eba_tbl. 396 * @vol->metaonly, @vol->ref_count, @vol->mapping and
397 * @vol->eba_tbl.
394 * @ref_count: count of references on the UBI device 398 * @ref_count: count of references on the UBI device
395 * @image_seq: image sequence number recorded on EC headers 399 * @image_seq: image sequence number recorded on EC headers
396 * 400 *
@@ -791,6 +795,9 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
791 int lnum); 795 int lnum);
792int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, 796int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
793 void *buf, int offset, int len, int check); 797 void *buf, int offset, int len, int check);
798int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
799 struct ubi_sgl *sgl, int lnum, int offset, int len,
800 int check);
794int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, 801int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
795 const void *buf, int offset, int len); 802 const void *buf, int offset, int len);
796int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, 803int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index f8fc3081bbb4..68c9c5ea676f 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -655,14 +655,13 @@ static int init_volumes(struct ubi_device *ubi,
655 655
656/** 656/**
657 * check_av - check volume attaching information. 657 * check_av - check volume attaching information.
658 * @ubi: UBI device description object
659 * @vol: UBI volume description object 658 * @vol: UBI volume description object
660 * @av: volume attaching information 659 * @av: volume attaching information
661 * 660 *
662 * This function returns zero if the volume attaching information is consistent 661 * This function returns zero if the volume attaching information is consistent
663 * to the data read from the volume tabla, and %-EINVAL if not. 662 * to the data read from the volume tabla, and %-EINVAL if not.
664 */ 663 */
665static int check_av(const struct ubi_device *ubi, const struct ubi_volume *vol, 664static int check_av(const struct ubi_volume *vol,
666 const struct ubi_ainf_volume *av) 665 const struct ubi_ainf_volume *av)
667{ 666{
668 int err; 667 int err;
@@ -690,7 +689,7 @@ static int check_av(const struct ubi_device *ubi, const struct ubi_volume *vol,
690 return 0; 689 return 0;
691 690
692bad: 691bad:
693 ubi_err(ubi, "bad attaching information, error %d", err); 692 ubi_err(vol->ubi, "bad attaching information, error %d", err);
694 ubi_dump_av(av); 693 ubi_dump_av(av);
695 ubi_dump_vol_info(vol); 694 ubi_dump_vol_info(vol);
696 return -EINVAL; 695 return -EINVAL;
@@ -753,7 +752,7 @@ static int check_attaching_info(const struct ubi_device *ubi,
753 ubi_msg(ubi, "finish volume %d removal", av->vol_id); 752 ubi_msg(ubi, "finish volume %d removal", av->vol_id);
754 ubi_remove_av(ai, av); 753 ubi_remove_av(ai, av);
755 } else if (av) { 754 } else if (av) {
756 err = check_av(ubi, vol, av); 755 err = check_av(vol, av);
757 if (err) 756 if (err)
758 return err; 757 return err;
759 } 758 }
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 834f6fe1f5fa..8f7bde6a85d6 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -470,11 +470,8 @@ struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
470{ 470{
471 struct ubi_wl_entry *e = NULL; 471 struct ubi_wl_entry *e = NULL;
472 472
473 if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1)) { 473 if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
474 ubi_warn(ubi, "Can't get peb for fastmap:anchor=%d, free_cnt=%d, reserved=%d",
475 anchor, ubi->free_count, ubi->beb_rsvd_pebs);
476 goto out; 474 goto out;
477 }
478 475
479 if (anchor) 476 if (anchor)
480 e = find_anchor_wl_entry(&ubi->free); 477 e = find_anchor_wl_entry(&ubi->free);
@@ -1806,11 +1803,8 @@ int ubi_thread(void *u)
1806 for (;;) { 1803 for (;;) {
1807 int err; 1804 int err;
1808 1805
1809 if (kthread_should_stop()) { 1806 if (kthread_should_stop())
1810 ubi_msg(ubi, "background thread \"%s\" should stop, PID %d",
1811 ubi->bgt_name, task_pid_nr(current));
1812 break; 1807 break;
1813 }
1814 1808
1815 if (try_to_freeze()) 1809 if (try_to_freeze())
1816 continue; 1810 continue;
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 7ed13e1e216a..4cfb3e82c56f 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -2032,6 +2032,8 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
2032 long long blk_offs; 2032 long long blk_offs;
2033 struct ubifs_data_node *dn = node; 2033 struct ubifs_data_node *dn = node;
2034 2034
2035 ubifs_assert(zbr->len >= UBIFS_DATA_NODE_SZ);
2036
2035 /* 2037 /*
2036 * Search the inode node this data node belongs to and insert 2038 * Search the inode node this data node belongs to and insert
2037 * it to the RB-tree of inodes. 2039 * it to the RB-tree of inodes.
@@ -2060,6 +2062,8 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
2060 struct ubifs_dent_node *dent = node; 2062 struct ubifs_dent_node *dent = node;
2061 struct fsck_inode *fscki1; 2063 struct fsck_inode *fscki1;
2062 2064
2065 ubifs_assert(zbr->len >= UBIFS_DENT_NODE_SZ);
2066
2063 err = ubifs_validate_entry(c, dent); 2067 err = ubifs_validate_entry(c, dent);
2064 if (err) 2068 if (err)
2065 goto out_dump; 2069 goto out_dump;
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index c49b1981ac95..0fa6c803992e 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -270,6 +270,10 @@ static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
270 goto out_budg; 270 goto out_budg;
271 } 271 }
272 272
273 err = ubifs_init_security(dir, inode, &dentry->d_name);
274 if (err)
275 goto out_cancel;
276
273 mutex_lock(&dir_ui->ui_mutex); 277 mutex_lock(&dir_ui->ui_mutex);
274 dir->i_size += sz_change; 278 dir->i_size += sz_change;
275 dir_ui->ui_size = dir->i_size; 279 dir_ui->ui_size = dir->i_size;
@@ -726,6 +730,10 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
726 goto out_budg; 730 goto out_budg;
727 } 731 }
728 732
733 err = ubifs_init_security(dir, inode, &dentry->d_name);
734 if (err)
735 goto out_cancel;
736
729 mutex_lock(&dir_ui->ui_mutex); 737 mutex_lock(&dir_ui->ui_mutex);
730 insert_inode_hash(inode); 738 insert_inode_hash(inode);
731 inc_nlink(inode); 739 inc_nlink(inode);
@@ -806,6 +814,10 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
806 ui->data = dev; 814 ui->data = dev;
807 ui->data_len = devlen; 815 ui->data_len = devlen;
808 816
817 err = ubifs_init_security(dir, inode, &dentry->d_name);
818 if (err)
819 goto out_cancel;
820
809 mutex_lock(&dir_ui->ui_mutex); 821 mutex_lock(&dir_ui->ui_mutex);
810 dir->i_size += sz_change; 822 dir->i_size += sz_change;
811 dir_ui->ui_size = dir->i_size; 823 dir_ui->ui_size = dir->i_size;
@@ -882,6 +894,10 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
882 ui->data_len = len; 894 ui->data_len = len;
883 inode->i_size = ubifs_inode(inode)->ui_size = len; 895 inode->i_size = ubifs_inode(inode)->ui_size = len;
884 896
897 err = ubifs_init_security(dir, inode, &dentry->d_name);
898 if (err)
899 goto out_cancel;
900
885 mutex_lock(&dir_ui->ui_mutex); 901 mutex_lock(&dir_ui->ui_mutex);
886 dir->i_size += sz_change; 902 dir->i_size += sz_change;
887 dir_ui->ui_size = dir->i_size; 903 dir_ui->ui_size = dir->i_size;
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 035e51011444..e627c0acf626 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1573,6 +1573,10 @@ const struct inode_operations ubifs_symlink_inode_operations = {
1573 .follow_link = ubifs_follow_link, 1573 .follow_link = ubifs_follow_link,
1574 .setattr = ubifs_setattr, 1574 .setattr = ubifs_setattr,
1575 .getattr = ubifs_getattr, 1575 .getattr = ubifs_getattr,
1576 .setxattr = ubifs_setxattr,
1577 .getxattr = ubifs_getxattr,
1578 .listxattr = ubifs_listxattr,
1579 .removexattr = ubifs_removexattr,
1576}; 1580};
1577 1581
1578const struct file_operations ubifs_file_operations = { 1582const struct file_operations ubifs_file_operations = {
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index 3187925e9879..9b40a1c5e160 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -1028,9 +1028,22 @@ int ubifs_replay_journal(struct ubifs_info *c)
1028 1028
1029 do { 1029 do {
1030 err = replay_log_leb(c, lnum, 0, c->sbuf); 1030 err = replay_log_leb(c, lnum, 0, c->sbuf);
1031 if (err == 1) 1031 if (err == 1) {
1032 /* We hit the end of the log */ 1032 if (lnum != c->lhead_lnum)
1033 break; 1033 /* We hit the end of the log */
1034 break;
1035
1036 /*
1037 * The head of the log must always start with the
1038 * "commit start" node on a properly formatted UBIFS.
1039 * But we found no nodes at all, which means that
1040 * someting went wrong and we cannot proceed mounting
1041 * the file-system.
1042 */
1043 ubifs_err("no UBIFS nodes found at the log head LEB %d:%d, possibly corrupted",
1044 lnum, 0);
1045 err = -EINVAL;
1046 }
1034 if (err) 1047 if (err)
1035 goto out; 1048 goto out;
1036 lnum = ubifs_next_log_lnum(c, lnum); 1049 lnum = ubifs_next_log_lnum(c, lnum);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 6197154f36ca..93e946561c5c 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2036,6 +2036,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
2036 if (c->max_inode_sz > MAX_LFS_FILESIZE) 2036 if (c->max_inode_sz > MAX_LFS_FILESIZE)
2037 sb->s_maxbytes = c->max_inode_sz = MAX_LFS_FILESIZE; 2037 sb->s_maxbytes = c->max_inode_sz = MAX_LFS_FILESIZE;
2038 sb->s_op = &ubifs_super_operations; 2038 sb->s_op = &ubifs_super_operations;
2039 sb->s_xattr = ubifs_xattr_handlers;
2039 2040
2040 mutex_lock(&c->umount_mutex); 2041 mutex_lock(&c->umount_mutex);
2041 err = mount_ubifs(c); 2042 err = mount_ubifs(c);
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index c4fe900c67ab..bc04b9c69891 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -36,6 +36,7 @@
36#include <linux/mtd/ubi.h> 36#include <linux/mtd/ubi.h>
37#include <linux/pagemap.h> 37#include <linux/pagemap.h>
38#include <linux/backing-dev.h> 38#include <linux/backing-dev.h>
39#include <linux/security.h>
39#include "ubifs-media.h" 40#include "ubifs-media.h"
40 41
41/* Version of this UBIFS implementation */ 42/* Version of this UBIFS implementation */
@@ -1465,6 +1466,7 @@ extern spinlock_t ubifs_infos_lock;
1465extern atomic_long_t ubifs_clean_zn_cnt; 1466extern atomic_long_t ubifs_clean_zn_cnt;
1466extern struct kmem_cache *ubifs_inode_slab; 1467extern struct kmem_cache *ubifs_inode_slab;
1467extern const struct super_operations ubifs_super_operations; 1468extern const struct super_operations ubifs_super_operations;
1469extern const struct xattr_handler *ubifs_xattr_handlers[];
1468extern const struct address_space_operations ubifs_file_address_operations; 1470extern const struct address_space_operations ubifs_file_address_operations;
1469extern const struct file_operations ubifs_file_operations; 1471extern const struct file_operations ubifs_file_operations;
1470extern const struct inode_operations ubifs_file_inode_operations; 1472extern const struct inode_operations ubifs_file_inode_operations;
@@ -1754,6 +1756,8 @@ ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf,
1754 size_t size); 1756 size_t size);
1755ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size); 1757ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size);
1756int ubifs_removexattr(struct dentry *dentry, const char *name); 1758int ubifs_removexattr(struct dentry *dentry, const char *name);
1759int ubifs_init_security(struct inode *dentry, struct inode *inode,
1760 const struct qstr *qstr);
1757 1761
1758/* super.c */ 1762/* super.c */
1759struct inode *ubifs_iget(struct super_block *sb, unsigned long inum); 1763struct inode *ubifs_iget(struct super_block *sb, unsigned long inum);
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 5e0a63b1b0d5..a92be244a6fb 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -100,24 +100,30 @@ static const struct file_operations empty_fops;
100static int create_xattr(struct ubifs_info *c, struct inode *host, 100static int create_xattr(struct ubifs_info *c, struct inode *host,
101 const struct qstr *nm, const void *value, int size) 101 const struct qstr *nm, const void *value, int size)
102{ 102{
103 int err; 103 int err, names_len;
104 struct inode *inode; 104 struct inode *inode;
105 struct ubifs_inode *ui, *host_ui = ubifs_inode(host); 105 struct ubifs_inode *ui, *host_ui = ubifs_inode(host);
106 struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1, 106 struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
107 .new_ino_d = ALIGN(size, 8), .dirtied_ino = 1, 107 .new_ino_d = ALIGN(size, 8), .dirtied_ino = 1,
108 .dirtied_ino_d = ALIGN(host_ui->data_len, 8) }; 108 .dirtied_ino_d = ALIGN(host_ui->data_len, 8) };
109 109
110 if (host_ui->xattr_cnt >= MAX_XATTRS_PER_INODE) 110 if (host_ui->xattr_cnt >= MAX_XATTRS_PER_INODE) {
111 ubifs_err("inode %lu already has too many xattrs (%d), cannot create more",
112 host->i_ino, host_ui->xattr_cnt);
111 return -ENOSPC; 113 return -ENOSPC;
114 }
112 /* 115 /*
113 * Linux limits the maximum size of the extended attribute names list 116 * Linux limits the maximum size of the extended attribute names list
114 * to %XATTR_LIST_MAX. This means we should not allow creating more 117 * to %XATTR_LIST_MAX. This means we should not allow creating more
115 * extended attributes if the name list becomes larger. This limitation 118 * extended attributes if the name list becomes larger. This limitation
116 * is artificial for UBIFS, though. 119 * is artificial for UBIFS, though.
117 */ 120 */
118 if (host_ui->xattr_names + host_ui->xattr_cnt + 121 names_len = host_ui->xattr_names + host_ui->xattr_cnt + nm->len + 1;
119 nm->len + 1 > XATTR_LIST_MAX) 122 if (names_len > XATTR_LIST_MAX) {
123 ubifs_err("cannot add one more xattr name to inode %lu, total names length would become %d, max. is %d",
124 host->i_ino, names_len, XATTR_LIST_MAX);
120 return -ENOSPC; 125 return -ENOSPC;
126 }
121 127
122 err = ubifs_budget_space(c, &req); 128 err = ubifs_budget_space(c, &req);
123 if (err) 129 if (err)
@@ -293,18 +299,16 @@ static struct inode *iget_xattr(struct ubifs_info *c, ino_t inum)
293 return ERR_PTR(-EINVAL); 299 return ERR_PTR(-EINVAL);
294} 300}
295 301
296int ubifs_setxattr(struct dentry *dentry, const char *name, 302static int setxattr(struct inode *host, const char *name, const void *value,
297 const void *value, size_t size, int flags) 303 size_t size, int flags)
298{ 304{
299 struct inode *inode, *host = dentry->d_inode; 305 struct inode *inode;
300 struct ubifs_info *c = host->i_sb->s_fs_info; 306 struct ubifs_info *c = host->i_sb->s_fs_info;
301 struct qstr nm = QSTR_INIT(name, strlen(name)); 307 struct qstr nm = QSTR_INIT(name, strlen(name));
302 struct ubifs_dent_node *xent; 308 struct ubifs_dent_node *xent;
303 union ubifs_key key; 309 union ubifs_key key;
304 int err, type; 310 int err, type;
305 311
306 dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd", name,
307 host->i_ino, dentry, size);
308 ubifs_assert(mutex_is_locked(&host->i_mutex)); 312 ubifs_assert(mutex_is_locked(&host->i_mutex));
309 313
310 if (size > UBIFS_MAX_INO_DATA) 314 if (size > UBIFS_MAX_INO_DATA)
@@ -356,6 +360,15 @@ out_free:
356 return err; 360 return err;
357} 361}
358 362
363int ubifs_setxattr(struct dentry *dentry, const char *name,
364 const void *value, size_t size, int flags)
365{
366 dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd",
367 name, dentry->d_inode->i_ino, dentry, size);
368
369 return setxattr(dentry->d_inode, name, value, size, flags);
370}
371
359ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf, 372ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf,
360 size_t size) 373 size_t size)
361{ 374{
@@ -568,3 +581,84 @@ out_free:
568 kfree(xent); 581 kfree(xent);
569 return err; 582 return err;
570} 583}
584
585static size_t security_listxattr(struct dentry *d, char *list, size_t list_size,
586 const char *name, size_t name_len, int flags)
587{
588 const int prefix_len = XATTR_SECURITY_PREFIX_LEN;
589 const size_t total_len = prefix_len + name_len + 1;
590
591 if (list && total_len <= list_size) {
592 memcpy(list, XATTR_SECURITY_PREFIX, prefix_len);
593 memcpy(list + prefix_len, name, name_len);
594 list[prefix_len + name_len] = '\0';
595 }
596
597 return total_len;
598}
599
600static int security_getxattr(struct dentry *d, const char *name, void *buffer,
601 size_t size, int flags)
602{
603 return ubifs_getxattr(d, name, buffer, size);
604}
605
606static int security_setxattr(struct dentry *d, const char *name,
607 const void *value, size_t size, int flags,
608 int handler_flags)
609{
610 return ubifs_setxattr(d, name, value, size, flags);
611}
612
613static const struct xattr_handler ubifs_xattr_security_handler = {
614 .prefix = XATTR_SECURITY_PREFIX,
615 .list = security_listxattr,
616 .get = security_getxattr,
617 .set = security_setxattr,
618};
619
620const struct xattr_handler *ubifs_xattr_handlers[] = {
621 &ubifs_xattr_security_handler,
622 NULL,
623};
624
625static int init_xattrs(struct inode *inode, const struct xattr *xattr_array,
626 void *fs_info)
627{
628 const struct xattr *xattr;
629 char *name;
630 int err = 0;
631
632 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
633 name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
634 strlen(xattr->name) + 1, GFP_NOFS);
635 if (!name) {
636 err = -ENOMEM;
637 break;
638 }
639 strcpy(name, XATTR_SECURITY_PREFIX);
640 strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name);
641 err = setxattr(inode, name, xattr->value, xattr->value_len, 0);
642 kfree(name);
643 if (err < 0)
644 break;
645 }
646
647 return err;
648}
649
650int ubifs_init_security(struct inode *dentry, struct inode *inode,
651 const struct qstr *qstr)
652{
653 int err;
654
655 mutex_lock(&inode->i_mutex);
656 err = security_inode_init_security(inode, dentry, qstr,
657 &init_xattrs, 0);
658 mutex_unlock(&inode->i_mutex);
659
660 if (err)
661 ubifs_err("cannot initialize security for inode %lu, error %d",
662 inode->i_ino, err);
663 return err;
664}
diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h
index c3918a0684fe..1e271cb559cd 100644
--- a/include/linux/mtd/ubi.h
+++ b/include/linux/mtd/ubi.h
@@ -23,22 +23,32 @@
23 23
24#include <linux/ioctl.h> 24#include <linux/ioctl.h>
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/scatterlist.h>
26#include <mtd/ubi-user.h> 27#include <mtd/ubi-user.h>
27 28
28/* All voumes/LEBs */ 29/* All voumes/LEBs */
29#define UBI_ALL -1 30#define UBI_ALL -1
30 31
31/* 32/*
33 * Maximum number of scatter gather list entries,
34 * we use only 64 to have a lower memory foot print.
35 */
36#define UBI_MAX_SG_COUNT 64
37
38/*
32 * enum ubi_open_mode - UBI volume open mode constants. 39 * enum ubi_open_mode - UBI volume open mode constants.
33 * 40 *
34 * UBI_READONLY: read-only mode 41 * UBI_READONLY: read-only mode
35 * UBI_READWRITE: read-write mode 42 * UBI_READWRITE: read-write mode
36 * UBI_EXCLUSIVE: exclusive mode 43 * UBI_EXCLUSIVE: exclusive mode
44 * UBI_METAONLY: modify only the volume meta-data,
45 * i.e. the data stored in the volume table, but not in any of volume LEBs.
37 */ 46 */
38enum { 47enum {
39 UBI_READONLY = 1, 48 UBI_READONLY = 1,
40 UBI_READWRITE, 49 UBI_READWRITE,
41 UBI_EXCLUSIVE 50 UBI_EXCLUSIVE,
51 UBI_METAONLY
42}; 52};
43 53
44/** 54/**
@@ -116,6 +126,35 @@ struct ubi_volume_info {
116}; 126};
117 127
118/** 128/**
129 * struct ubi_sgl - UBI scatter gather list data structure.
130 * @list_pos: current position in @sg[]
131 * @page_pos: current position in @sg[@list_pos]
132 * @sg: the scatter gather list itself
133 *
134 * ubi_sgl is a wrapper around a scatter list which keeps track of the
135 * current position in the list and the current list item such that
136 * it can be used across multiple ubi_leb_read_sg() calls.
137 */
138struct ubi_sgl {
139 int list_pos;
140 int page_pos;
141 struct scatterlist sg[UBI_MAX_SG_COUNT];
142};
143
144/**
145 * ubi_sgl_init - initialize an UBI scatter gather list data structure.
146 * @usgl: the UBI scatter gather struct itself
147 *
148 * Please note that you still have to use sg_init_table() or any adequate
149 * function to initialize the unterlaying struct scatterlist.
150 */
151static inline void ubi_sgl_init(struct ubi_sgl *usgl)
152{
153 usgl->list_pos = 0;
154 usgl->page_pos = 0;
155}
156
157/**
119 * struct ubi_device_info - UBI device description data structure. 158 * struct ubi_device_info - UBI device description data structure.
120 * @ubi_num: ubi device number 159 * @ubi_num: ubi device number
121 * @leb_size: logical eraseblock size on this UBI device 160 * @leb_size: logical eraseblock size on this UBI device
@@ -210,6 +249,8 @@ int ubi_unregister_volume_notifier(struct notifier_block *nb);
210void ubi_close_volume(struct ubi_volume_desc *desc); 249void ubi_close_volume(struct ubi_volume_desc *desc);
211int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset, 250int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
212 int len, int check); 251 int len, int check);
252int ubi_leb_read_sg(struct ubi_volume_desc *desc, int lnum, struct ubi_sgl *sgl,
253 int offset, int len, int check);
213int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf, 254int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
214 int offset, int len); 255 int offset, int len);
215int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf, 256int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
@@ -230,4 +271,14 @@ static inline int ubi_read(struct ubi_volume_desc *desc, int lnum, char *buf,
230{ 271{
231 return ubi_leb_read(desc, lnum, buf, offset, len, 0); 272 return ubi_leb_read(desc, lnum, buf, offset, len, 0);
232} 273}
274
275/*
276 * This function is the same as the 'ubi_leb_read_sg()' function, but it does
277 * not provide the checking capability.
278 */
279static inline int ubi_read_sg(struct ubi_volume_desc *desc, int lnum,
280 struct ubi_sgl *sgl, int offset, int len)
281{
282 return ubi_leb_read_sg(desc, lnum, sgl, offset, len, 0);
283}
233#endif /* !__LINUX_UBI_H__ */ 284#endif /* !__LINUX_UBI_H__ */