aboutsummaryrefslogtreecommitdiffstats
path: root/block
Commit message (Collapse)AuthorAge
* Merge branch 'llseek' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/bklLinus Torvalds2010-10-22
|\ | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | * 'llseek' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/bkl: vfs: make no_llseek the default vfs: don't use BKL in default_llseek llseek: automatically add .llseek fop libfs: use generic_file_llseek for simple_attr mac80211: disallow seeks in minstrel debug code lirc: make chardev nonseekable viotape: use noop_llseek raw: use explicit llseek file operations ibmasmfs: use generic_file_llseek spufs: use llseek in all file operations arm/omap: use generic_file_llseek in iommu_debug lkdtm: use generic_file_llseek in debugfs net/wireless: use generic_file_llseek in debugfs drm: use noop_llseek
| * llseek: automatically add .llseek fopArnd Bergmann2010-10-15
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | All file_operations should get a .llseek operation so we can make nonseekable_open the default for future file operations without a .llseek pointer. The three cases that we can automatically detect are no_llseek, seq_lseek and default_llseek. For cases where we can we can automatically prove that the file offset is always ignored, we use noop_llseek, which maintains the current behavior of not returning an error from a seek. New drivers should normally not use noop_llseek but instead use no_llseek and call nonseekable_open at open time. Existing drivers can be converted to do the same when the maintainer knows for certain that no user code relies on calling seek on the device file. The generated code is often incorrectly indented and right now contains comments that clarify for each added line why a specific variant was chosen. In the version that gets submitted upstream, the comments will be gone and I will manually fix the indentation, because there does not seem to be a way to do that using coccinelle. Some amount of new code is currently sitting in linux-next that should get the same modifications, which I will do at the end of the merge window. Many thanks to Julia Lawall for helping me learn to write a semantic patch that does all this. ===== begin semantic patch ===== // This adds an llseek= method to all file operations, // as a preparation for making no_llseek the default. // // The rules are // - use no_llseek explicitly if we do nonseekable_open // - use seq_lseek for sequential files // - use default_llseek if we know we access f_pos // - use noop_llseek if we know we don't access f_pos, // but we still want to allow users to call lseek // @ open1 exists @ identifier nested_open; @@ nested_open(...) { <+... nonseekable_open(...) ...+> } @ open exists@ identifier open_f; identifier i, f; identifier open1.nested_open; @@ int open_f(struct inode *i, struct file *f) { <+... ( nonseekable_open(...) | nested_open(...) ) ...+> } @ read disable optional_qualifier exists @ identifier read_f; identifier f, p, s, off; type ssize_t, size_t, loff_t; expression E; identifier func; @@ ssize_t read_f(struct file *f, char *p, size_t s, loff_t *off) { <+... ( *off = E | *off += E | func(..., off, ...) | E = *off ) ...+> } @ read_no_fpos disable optional_qualifier exists @ identifier read_f; identifier f, p, s, off; type ssize_t, size_t, loff_t; @@ ssize_t read_f(struct file *f, char *p, size_t s, loff_t *off) { ... when != off } @ write @ identifier write_f; identifier f, p, s, off; type ssize_t, size_t, loff_t; expression E; identifier func; @@ ssize_t write_f(struct file *f, const char *p, size_t s, loff_t *off) { <+... ( *off = E | *off += E | func(..., off, ...) | E = *off ) ...+> } @ write_no_fpos @ identifier write_f; identifier f, p, s, off; type ssize_t, size_t, loff_t; @@ ssize_t write_f(struct file *f, const char *p, size_t s, loff_t *off) { ... when != off } @ fops0 @ identifier fops; @@ struct file_operations fops = { ... }; @ has_llseek depends on fops0 @ identifier fops0.fops; identifier llseek_f; @@ struct file_operations fops = { ... .llseek = llseek_f, ... }; @ has_read depends on fops0 @ identifier fops0.fops; identifier read_f; @@ struct file_operations fops = { ... .read = read_f, ... }; @ has_write depends on fops0 @ identifier fops0.fops; identifier write_f; @@ struct file_operations fops = { ... .write = write_f, ... }; @ has_open depends on fops0 @ identifier fops0.fops; identifier open_f; @@ struct file_operations fops = { ... .open = open_f, ... }; // use no_llseek if we call nonseekable_open //////////////////////////////////////////// @ nonseekable1 depends on !has_llseek && has_open @ identifier fops0.fops; identifier nso ~= "nonseekable_open"; @@ struct file_operations fops = { ... .open = nso, ... +.llseek = no_llseek, /* nonseekable */ }; @ nonseekable2 depends on !has_llseek @ identifier fops0.fops; identifier open.open_f; @@ struct file_operations fops = { ... .open = open_f, ... +.llseek = no_llseek, /* open uses nonseekable */ }; // use seq_lseek for sequential files ///////////////////////////////////// @ seq depends on !has_llseek @ identifier fops0.fops; identifier sr ~= "seq_read"; @@ struct file_operations fops = { ... .read = sr, ... +.llseek = seq_lseek, /* we have seq_read */ }; // use default_llseek if there is a readdir /////////////////////////////////////////// @ fops1 depends on !has_llseek && !nonseekable1 && !nonseekable2 && !seq @ identifier fops0.fops; identifier readdir_e; @@ // any other fop is used that changes pos struct file_operations fops = { ... .readdir = readdir_e, ... +.llseek = default_llseek, /* readdir is present */ }; // use default_llseek if at least one of read/write touches f_pos ///////////////////////////////////////////////////////////////// @ fops2 depends on !fops1 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @ identifier fops0.fops; identifier read.read_f; @@ // read fops use offset struct file_operations fops = { ... .read = read_f, ... +.llseek = default_llseek, /* read accesses f_pos */ }; @ fops3 depends on !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @ identifier fops0.fops; identifier write.write_f; @@ // write fops use offset struct file_operations fops = { ... .write = write_f, ... + .llseek = default_llseek, /* write accesses f_pos */ }; // Use noop_llseek if neither read nor write accesses f_pos /////////////////////////////////////////////////////////// @ fops4 depends on !fops1 && !fops2 && !fops3 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @ identifier fops0.fops; identifier read_no_fpos.read_f; identifier write_no_fpos.write_f; @@ // write fops use offset struct file_operations fops = { ... .write = write_f, .read = read_f, ... +.llseek = noop_llseek, /* read and write both use no f_pos */ }; @ depends on has_write && !has_read && !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @ identifier fops0.fops; identifier write_no_fpos.write_f; @@ struct file_operations fops = { ... .write = write_f, ... +.llseek = noop_llseek, /* write uses no f_pos */ }; @ depends on has_read && !has_write && !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @ identifier fops0.fops; identifier read_no_fpos.read_f; @@ struct file_operations fops = { ... .read = read_f, ... +.llseek = noop_llseek, /* read uses no f_pos */ }; @ depends on !has_read && !has_write && !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @ identifier fops0.fops; @@ struct file_operations fops = { ... +.llseek = noop_llseek, /* no read or write fn */ }; ===== End semantic patch ===== Signed-off-by: Arnd Bergmann <arnd@arndb.de> Cc: Julia Lawall <julia@diku.dk> Cc: Christoph Hellwig <hch@infradead.org>
* | Merge branch 'trivial' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/bklLinus Torvalds2010-10-22
|\ \ | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | * 'trivial' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/bkl: block: autoconvert trivial BKL users to private mutex drivers: autoconvert trivial BKL users to private mutex ipmi: autoconvert trivial BKL users to private mutex mac: autoconvert trivial BKL users to private mutex mtd: autoconvert trivial BKL users to private mutex scsi: autoconvert trivial BKL users to private mutex Fix up trivial conflicts (due to addition of private mutex right next to deletion of a version string) in drivers/char/pcmcia/cm40[04]0_cs.c
| * | block: autoconvert trivial BKL users to private mutexArnd Bergmann2010-10-05
| |/ | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | The block device drivers have all gained new lock_kernel calls from a recent pushdown, and some of the drivers were already using the BKL before. This turns the BKL into a set of per-driver mutexes. Still need to check whether this is safe to do. file=$1 name=$2 if grep -q lock_kernel ${file} ; then if grep -q 'include.*linux.mutex.h' ${file} ; then sed -i '/include.*<linux\/smp_lock.h>/d' ${file} else sed -i 's/include.*<linux\/smp_lock.h>.*$/include <linux\/mutex.h>/g' ${file} fi sed -i ${file} \ -e "/^#include.*linux.mutex.h/,$ { 1,/^\(static\|int\|long\)/ { /^\(static\|int\|long\)/istatic DEFINE_MUTEX(${name}_mutex); } }" \ -e "s/\(un\)*lock_kernel\>[ ]*()/mutex_\1lock(\&${name}_mutex)/g" \ -e '/[ ]*cycle_kernel_lock();/d' else sed -i -e '/include.*\<smp_lock.h\>/d' ${file} \ -e '/cycle_kernel_lock()/d' fi Signed-off-by: Arnd Bergmann <arnd@arndb.de>
* | Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6Linus Torvalds2010-10-20
|\ \ | | | | | | | | | | | | | | | * git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6: [SCSI] bsg: fix incorrect device_status value [SCSI] Fix VPD inquiry page wrapper
| * | [SCSI] bsg: fix incorrect device_status valueFUJITA Tomonori2010-10-15
| |/ | | | | | | | | | | | | | | | | | | bsg incorrectly returns sg's masked_status value for device_status. [jejb: fix up expression logic] Reported-by: Douglas Gilbert <dgilbert@interlog.com> Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Stable Tree <stable@kernel.org> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
* | elevator: fix oops on early call to elevator_change()Jens Axboe2010-10-07
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | 2.6.36 introduces an API for drivers to switch the IO scheduler instead of manually calling the elevator exit and init functions. This API was added since q->elevator must be cleared in between those two calls. And since we already have this functionality directly from use by the sysfs interface to switch schedulers online, it was prudent to reuse it internally too. But this API needs the queue to be in a fully initialized state before it is called, or it will attempt to unregister elevator kobjects before they have been added. This results in an oops like this: BUG: unable to handle kernel NULL pointer dereference at 0000000000000051 IP: [<ffffffff8116f15e>] sysfs_create_dir+0x2e/0xc0 PGD 47ddfc067 PUD 47c6a1067 PMD 0 Oops: 0000 [#1] PREEMPT SMP last sysfs file: /sys/devices/pci0000:00/0000:00:02.0/0000:04:00.1/irq CPU 2 Modules linked in: t(+) loop hid_apple usbhid ahci ehci_hcd uhci_hcd libahci usbcore nls_base igb Pid: 7319, comm: modprobe Not tainted 2.6.36-rc6+ #132 QSSC-S4R/QSSC-S4R RIP: 0010:[<ffffffff8116f15e>] [<ffffffff8116f15e>] sysfs_create_dir+0x2e/0xc0 RSP: 0018:ffff88027da25d08 EFLAGS: 00010246 RAX: ffff88047c68c528 RBX: 00000000fffffffe RCX: 0000000000000000 RDX: 000000000000002f RSI: 000000000000002f RDI: ffff88047e196c88 RBP: ffff88027da25d38 R08: 0000000000000000 R09: d84156c5635688c0 R10: d84156c5635688c0 R11: 0000000000000000 R12: ffff88047e196c88 R13: 0000000000000000 R14: 0000000000000000 R15: ffff88047c68c528 FS: 00007fcb0b26f6e0(0000) GS:ffff880287400000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 0000000000000051 CR3: 000000047e76e000 CR4: 00000000000006e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 Process modprobe (pid: 7319, threadinfo ffff88027da24000, task ffff88027d377090) Stack: ffff88027da25d58 ffff88047c68c528 00000000fffffffe ffff88047e196c88 <0> ffff88047c68c528 ffff88047e05bd90 ffff88027da25d78 ffffffff8123fb77 <0> ffff88047e05bd90 0000000000000000 ffff88047e196c88 ffff88047c68c528 Call Trace: [<ffffffff8123fb77>] kobject_add_internal+0xe7/0x1f0 [<ffffffff8123fd98>] kobject_add_varg+0x38/0x60 [<ffffffff8123feb9>] kobject_add+0x69/0x90 [<ffffffff8116efe0>] ? sysfs_remove_dir+0x20/0xa0 [<ffffffff8103d48d>] ? sub_preempt_count+0x9d/0xe0 [<ffffffff8143de20>] ? _raw_spin_unlock+0x30/0x50 [<ffffffff8116efe0>] ? sysfs_remove_dir+0x20/0xa0 [<ffffffff8116eff4>] ? sysfs_remove_dir+0x34/0xa0 [<ffffffff81224204>] elv_register_queue+0x34/0xa0 [<ffffffff81224aad>] elevator_change+0xfd/0x250 [<ffffffffa007e000>] ? t_init+0x0/0x361 [t] [<ffffffffa007e000>] ? t_init+0x0/0x361 [t] [<ffffffffa007e0a8>] t_init+0xa8/0x361 [t] [<ffffffff810001de>] do_one_initcall+0x3e/0x170 [<ffffffff8108c3fd>] sys_init_module+0xbd/0x220 [<ffffffff81002f2b>] system_call_fastpath+0x16/0x1b Code: e5 41 56 41 55 41 54 49 89 fc 53 48 83 ec 10 48 85 ff 74 52 48 8b 47 18 49 c7 c5 00 46 61 81 48 85 c0 74 04 4c 8b 68 30 45 31 f6 <41> 80 7d 51 00 74 0e 49 8b 44 24 28 4c 89 e7 ff 50 20 49 89 c6 RIP [<ffffffff8116f15e>] sysfs_create_dir+0x2e/0xc0 RSP <ffff88027da25d08> CR2: 0000000000000051 ---[ end trace a6541d3bf07945df ]--- Fix this by adding a registered bit to the elevator queue, which is set when the sysfs kobjects have been registered. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* | block: prevent merges of discard and write requestsAdrian Hunter2010-09-25
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | Add logic to prevent two I/O requests being merged if only one of them is a discard. Ditto secure discard. Without this fix, it is possible for write requests to transform into discard requests. For example: Submit bio 1 to discard 8 sectors from sector n Submit bio 2 to write 8 sectors from sector n + 16 Submit bio 3 to write 8 sectors from sector n + 8 Bio 1 becomes request 1. Bio 2 becomes request 2. Bio 3 is merged with request 2, and then subsequently request 2 is merged with request 1 resulting in just one I/O request which discards all 24 sectors. Signed-off-by: Adrian Hunter <adrian.hunter@nokia.com> (Moved the checks above the position checks /Jens) Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* | cfq-iosched: fix a kernel OOPs when usb key is insertedVivek Goyal2010-09-21
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | Mike reported a kernel crash when a usb key hotplug is performed while all kernel thrads are not in a root cgroup and are running in one of the child cgroups of blkio controller. BUG: unable to handle kernel NULL pointer dereference at 0000002c IP: [<c11c7b08>] cfq_get_queue+0x232/0x412 *pde = 00000000 Oops: 0000 [#1] PREEMPT last sysfs file: /sys/devices/pci0000:00/0000:00:1d.7/usb2/2-1/2-1:1.0/host3/scsi_host/host3/uevent [..] Pid: 30039, comm: scsi_scan_3 Not tainted 2.6.35.2-fg.roam #1 Volvi2 /Aspire 4315 EIP: 0060:[<c11c7b08>] EFLAGS: 00010086 CPU: 0 EIP is at cfq_get_queue+0x232/0x412 EAX: f705f9c0 EBX: e977abac ECX: 00000000 EDX: 00000000 ESI: f00da400 EDI: f00da4ec EBP: e977a800 ESP: dff8fd00 DS: 007b ES: 007b FS: 0000 GS: 0000 SS: 0068 Process scsi_scan_3 (pid: 30039, ti=dff8e000 task=f6b6c9a0 task.ti=dff8e000) Stack: 00000000 00000000 00000001 01ff0000 f00da508 00000000 f00da524 f00da540 <0> e7994940 dd631750 f705f9c0 e977a820 e977ac44 f00da4d0 00000001 f6b6c9a0 <0> 00000010 00008010 0000000b 00000000 00000001 e977a800 dd76fac0 00000246 Call Trace: [<c11c7f10>] ? cfq_set_request+0x228/0x34c [<c11c7ce8>] ? cfq_set_request+0x0/0x34c [<c11bb3b9>] ? elv_set_request+0xf/0x1c [<c11bdd51>] ? get_request+0x1ad/0x22f [<c11bddf2>] ? get_request_wait+0x1f/0x11a [<c11d013b>] ? kvasprintf+0x33/0x3b [<c127b537>] ? scsi_execute+0x1d/0x103 [<c127b675>] ? scsi_execute_req+0x58/0x83 [<c127c391>] ? scsi_probe_and_add_lun+0x188/0x7c2 [<c12718c6>] ? attribute_container_add_device+0x15/0xfa [<c11c95d1>] ? kobject_get+0xf/0x13 [<c126d1db>] ? get_device+0x10/0x14 [<c127be93>] ? scsi_alloc_target+0x217/0x24d [<c127cbd8>] ? __scsi_scan_target+0x95/0x480 [<c10204eb>] ? dequeue_entity+0x14/0x1fe [<c1020491>] ? update_curr+0x165/0x1ab [<c1020491>] ? update_curr+0x165/0x1ab [<c127d00d>] ? scsi_scan_channel+0x4a/0x76 [<c127d0b0>] ? scsi_scan_host_selected+0x77/0xad [<c127d13c>] ? do_scan_async+0x0/0x11a [<c127d137>] ? do_scsi_scan_host+0x51/0x56 [<c127d13c>] ? do_scan_async+0x0/0x11a [<c127d14a>] ? do_scan_async+0xe/0x11a [<c127d13c>] ? do_scan_async+0x0/0x11a [<c10354c5>] ? kthread+0x5e/0x63 [<c1035467>] ? kthread+0x0/0x63 [<c1002af6>] ? kernel_thread_helper+0x6/0x10 Code: 44 24 1c 54 83 44 24 18 54 83 fa 03 75 94 8b 06 c7 86 64 02 00 00 01 00 00 00 83 e0 03 09 f0 89 06 8b 44 24 28 8b 90 58 01 00 00 <8b> 42 2c 85 c0 75 03 8b 42 08 8d 54 24 48 52 8d 4c 24 50 51 68 EIP: [<c11c7b08>] cfq_get_queue+0x232/0x412 SS:ESP 0068:dff8fd00 CR2: 000000000000002c ---[ end trace 9a88306573f69b12 ]--- The problem here is that we don't have bdi->dev information available when thread does some IO. Hence when dev_name() tries to access bdi->dev, it crashes. This problem does not happen if kernel threads are in root group as root group is statically allocated at device initialization time and we don't hit this piece of code. Fix it by delaying the filling of major and minor number information of device in blk_group. Initially a blk_group is created with 0 as device information and this information is filled later once some more IO comes in from same group. Reported-by: Mike Kazantsev <mk.fraggod@gmail.com> Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* | block: fix blk_rq_map_kern bio direction flagBenny Halevy2010-09-21
|/ | | | | | | | | This bug was introduced in 7b6d91daee5cac6402186ff224c3af39d79f4a0e "block: unify flags for struct bio and struct request" Cc: Boaz Harrosh <bharrosh@panasas.com> Signed-off-by: Benny Halevy <bhalevy@panasas.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: Range check cpu in blk_cpu_to_groupBrian King2010-09-10
| | | | | | | | | | | | | | | | | | | | While testing CPU DLPAR, the following problem was discovered. We were DLPAR removing the first CPU, which in this case was logical CPUs 0-3. CPUs 0-2 were already marked offline and we were in the process of offlining CPU 3. After marking the CPU inactive and offline in cpu_disable, but before the cpu was completely idle (cpu_die), we ended up in __make_request on CPU 3. There we looked at the topology map to see which CPU to complete the I/O on and found no CPUs in the cpu_sibling_map. This resulted in the block layer setting the completion cpu to be NR_CPUS, which then caused an oops when we tried to complete the I/O. Fix this by sanity checking the value we return from blk_cpu_to_group to be a valid cpu value. Signed-off-by: Brian King <brking@linux.vnet.ibm.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: add function call to switch the IO scheduler from a driverJens Axboe2010-08-23
| | | | | | | | | | | | | | | | | | | Currently drivers must do an elevator_exit() + elevator_init() to switch IO schedulers. There are a few problems with this: - Since commit 1abec4fdbb142e3ccb6ce99832fae42129134a96, elevator_init() requires a zeroed out q->elevator pointer. The two existing in-kernel users don't do that. - It will only work at initialization time, since using the above two-staged construct does not properly quisce the queue. So add elevator_change() which takes care of this, and convert the elv_iosched_store() sysfs interface to use this helper as well. Reported-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com> Reported-by: Kevin Vigor <kevin@vigor.nu> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* BLOCK: fix bio.bi_rw handlingJiri Slaby2010-08-23
| | | | | | | | | | | | Return of the bi_rw tests is no longer bool after commit 74450be1. But results of such tests are stored in bools. This doesn't fit in there for some compilers (gcc 4.5 here), so either use !! magic to get real bools or use ulong where the result is assigned somewhere. Signed-off-by: Jiri Slaby <jslaby@suse.cz> Cc: Christoph Hellwig <hch@lst.de> Reviewed-by: Jeff Moyer <jmoyer@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: put dev->kobj in blk_register_queue fail pathXiaotian Feng2010-08-23
| | | | | | | | | | | kernel needs to kobject_put on dev->kobj if elv_register_queue fails. Signed-off-by: Xiaotian Feng <dfeng@redhat.com> Cc: "Martin K. Petersen" <martin.petersen@oracle.com> Cc: Stephen Hemminger <shemminger@vyatta.com> Cc: Nikanth Karthikesan <knikanth@suse.de> Cc: David Teigland <teigland@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* cfq-iosched: blktrace print per slice sector statsVivek Goyal2010-08-23
| | | | | | | | | | o Divyesh had gotten rid of this code in the past. I want to re-introduce it back as it helps me a lot during debugging. Reviewed-by: Jeff Moyer <jmoyer@redhat.com> Reviewed-by: Divyesh Shah <dpshah@google.com> Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* cfq-iosched: Implement tunable group_idleVivek Goyal2010-08-23
| | | | | | | | | | | | o Implement a new tunable group_idle, which allows idling on the group instead of a cfq queue. Hence one can set slice_idle = 0 and not idle on the individual queues but idle on the group. This way on fast storage we can get fairness between groups at the same time overall throughput improves. Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Acked-by: Jeff Moyer <jmoyer@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* cfq-iosched: Do group share accounting in IOPS when slice_idle=0Vivek Goyal2010-08-23
| | | | | | | | | | | | | | | | | | | | | | | | | | | o Implement another CFQ mode where we charge group in terms of number of requests dispatched instead of measuring the time. Measuring in terms of time is not possible when we are driving deeper queue depths and there are requests from multiple cfq queues in the request queue. o This mode currently gets activated if one sets slice_idle=0 and associated disk supports NCQ. Again the idea is that on an NCQ disk with idling disabled most of the queues will dispatch 1 or more requests and then cfq queue expiry happens and we don't have a way to measure time. So start providing fairness in terms of IOPS. o Currently IOPS mode works only with cfq group scheduling. CFQ is following different scheduling algorithms for queue and group scheduling. These IOPS stats are used only for group scheduling hence in non-croup mode nothing should change. o For CFQ group scheduling one can disable slice idling so that we don't idle on queue and drive deeper request queue depths (achieving better throughput), at the same time group idle is enabled so one should get service differentiation among groups. Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Acked-by: Jeff Moyer <jmoyer@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* cfq-iosched: Do not idle if slice_idle=0Vivek Goyal2010-08-23
| | | | | | | | | | | Do not idle either on cfq queue or service tree if slice_idle=0. User does not want any queue or service tree idling. Currently even if slice_idle=0, we were waiting for request to finish before expiring the queue and that can lead to lower queue depths. Acked-by: Jeff Moyer <jmoyer@redhat.com> Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* blkio: Fix return code for mkdir callsCiju Rajan K2010-08-23
| | | | | | | | | | | If the cgroup hierarchy for blkio control groups is deeper than two levels, kernel should not allow the creation of further levels. mkdir system call does not except EINVAL as a return value. This patch replaces EINVAL with more appropriate EPERM Signed-off-by: Ciju Rajan K <ciju@linux.vnet.ibm.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: add secure discardAdrian Hunter2010-08-12
| | | | | | | | | | | | | | | | Secure discard is the same as discard except that all copies of the discarded sectors (perhaps created by garbage collection) must also be erased. Signed-off-by: Adrian Hunter <adrian.hunter@nokia.com> Acked-by: Jens Axboe <axboe@kernel.dk> Cc: Kyungmin Park <kmpark@infradead.org> Cc: Madhusudhan Chikkature <madhu.cr@ti.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Ben Gardiner <bengardiner@nanometrics.ca> Cc: <linux-mmc@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
* blkdev: fix blkdev_issue_zeroout return valueDmitry Monakhov2010-08-08
| | | | | | | - If function called without barrier option retvalue is incorrect Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: update request stacking methods to support discardsike Snitzer2010-08-08
| | | | | | | | | | Propagate REQ_DISCARD in cmd_flags when cloning a discard request. Skip blk_rq_check_limits's existing checks for discard requests because discard limits will have already been checked in blkdev_issue_discard. Signed-off-by: Mike Snitzer <snitzer@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: set up rq->rq_disk properly for flush requestsFUJITA Tomonori2010-08-07
| | | | | | | | q->bar_rq.rq_disk is NULL. Use the rq_disk of the original request instead. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: set REQ_TYPE_FS on flush requestsFUJITA Tomonori2010-08-07
| | | | | | | | | | the block layer doesn't set rq->cmd_type on flush requests. By definition, it should be REQ_TYPE_FS (the lower layers build a command and interpret the result of it, that is, the block layer doesn't know the details). Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: fix problem with sending down discard that isn't of correct granularityJens Axboe2010-08-07
| | | | | | | | | | | If the queue doesn't have a limit set, or it just set UINT_MAX like we default to, we coud be sending down a discard request that isn't of the correct granularity if the block size is > 512b. Fix this by adjusting max_discard_sectors down to the proper alignment. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* blkdev: check for valid request queue before issuing flushDave Chinner2010-08-07
| | | | | | | | | | | | Issuing a blkdev_issue_flush() on an unconfigured loop device causes a panic as q->make_request_fn is not configured. This can occur when trying to mount the unconfigured loop device as an XFS filesystem. There are no guards that catch the bio before the request function is called because we don't add a payload to the bio. Instead, manually check this case as soon as we have a pointer to the queue to flush. Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: remove BKL from partition ioctlsArnd Bergmann2010-08-07
| | | | | | | | | | | The blkpg_ioctl and blkdev_reread_part access fields of the bdev and gendisk structures, yet they always do so under the protection of bdev->bd_mutex, which seems sufficient. Signed-off-by: Arnd Bergmann <arnd@arndb.de> cked-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: remove BKL from BLKROSET and BLKFLSBUFArnd Bergmann2010-08-07
| | | | | | | | | | | | | | | | We only call the functions set_device_ro(), invalidate_bdev(), sync_filesystem() and sync_blockdev() while holding the BKL in these commands. All of these are also done in other code paths without the BKL, which leads me to the conclusion that the BKL is not needed here either. The reason we hold it here is that it was originally pushed down into the ioctl function from vfs_ioctl. Signed-off-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: push BKL into blktrace ioctlsArnd Bergmann2010-08-07
| | | | | | | | | | | | | | The blktrace driver currently needs the BKL, but we should not need to take that in the block layer, so just push it down into the driver itself. It is quite likely that the BKL is not actually required in blktrace code and could be removed in a follow-on patch. Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: push down BKL into .locked_ioctlArnd Bergmann2010-08-07
| | | | | | | | | | | As a preparation for the removal of the big kernel lock in the block layer, this removes the BKL from the common ioctl handling code, moving it into every single driver still using it. Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: remove q->prepare_flush_fn completelyFUJITA Tomonori2010-08-07
| | | | | | | | | This removes q->prepare_flush_fn completely (changes the blk_queue_ordered API). Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: permit PREFLUSH and POSTFLUSH without prepare_flush_fnFUJITA Tomonori2010-08-07
| | | | | | | | | | | This is preparation for removing q->prepare_flush_fn. Temporarily, blk_queue_ordered() permits QUEUE_ORDERED_DO_PREFLUSH and QUEUE_ORDERED_DO_POSTFLUSH without prepare_flush_fn. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: introduce REQ_FLUSH flagFUJITA Tomonori2010-08-07
| | | | | | | | | | | | | | | | | | | | | | SCSI-ml needs a way to mark a request as flush request in q->prepare_flush_fn because it needs to identify them later (e.g. in q->request_fn or prep_rq_fn). queue_flush sets REQ_HARDBARRIER in rq->cmd_flags however the block layer also sends normal REQ_TYPE_FS requests with REQ_HARDBARRIER. So SCSI-ml can't use REQ_HARDBARRIER to identify flush requests. We could change the block layer to clear REQ_HARDBARRIER bit before sending non flush requests to the lower layers. However, intorudcing the new flag looks cleaner (surely easier). Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: James Bottomley <James.Bottomley@suse.de> Cc: David S. Miller <davem@davemloft.net> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Alasdair G Kergon <agk@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: implement an unprep function corresponding directly to prepJames Bottomley2010-08-07
| | | | | | Reviewed-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: fixup missing conversion from BIO_RW_DISCARD to REQ_DISCARDJens Axboe2010-08-07
| | | | | | | Didn't cause a merge conflict, so fixed this one up manually post merge. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* gcc-4.6: block: fix unused but set variables in blk-mergeAndi Kleen2010-08-07
| | | | | | | | Just some dead code. Signed-off-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: don't allocate a payload for discard requestChristoph Hellwig2010-08-07
| | | | | | | | | | | | | | | Allocating a fixed payload for discard requests always was a horrible hack, and it's not coming to byte us when adding support for discard in DM/MD. So change the code to leave the allocation of a payload to the lowlevel driver. Unfortunately that means we'll need another hack, which allows us to update the various block layer length fields indicating that we have a payload. Instead of hiding this in sd.c, which we already partially do for UNMAP support add a documented helper in the core block layer for it. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: unify flags for struct bio and struct requestChristoph Hellwig2010-08-07
| | | | | | | | | | | | | | Remove the current bio flags and reuse the request flags for the bio, too. This allows to more easily trace the type of I/O from the filesystem down to the block driver. There were two flags in the bio that were missing in the requests: BIO_RW_UNPLUG and BIO_RW_AHEAD. Also I've renamed two request flags that had a superflous RW in them. Note that the flags are in bio.h despite having the REQ_ name - as blkdev.h includes bio.h that is the only way to go for now. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: remove wrappers for request type/flagsChristoph Hellwig2010-08-07
| | | | | | | | | Remove all the trivial wrappers for the cmd_type and cmd_flags fields in struct requests. This allows much easier grepping for different request types instead of unwinding through macros. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: add helpers for the trivial queue flag sysfs show/store entriesJens Axboe2010-08-07
| | | | | | The code for nonrot, random, and io stats are completely identical. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: add sysfs knob for turning off disk entropy contributionsJens Axboe2010-08-07
| | | | | | | | | | | | | | | | There are two reasons for doing this: - On SSD disks, the completion times aren't as random as they are for rotational drives. So it's questionable whether they should contribute to the random pool in the first place. - Calling add_disk_randomness() has a lot of overhead. This adds /sys/block/<dev>/queue/add_random that will allow you to switch off on a per-device basis. The default setting is on, so there should be no functional changes from this patch. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: Don't count_vm_events for discard bio in submit_bio.Tao Ma2010-06-24
| | | | | | | | | | | In submit_bio, we count vm events by check READ/WRITE. But actually DISCARD_NOBARRIER also has the WRITE flag set. It looks as if in blkdev_issue_discard, we also add a page as the payload and the bio_has_data check isn't enough. So add another check for discard bio. Signed-off-by: Tao Ma <tao.ma@oracle.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* cfq: fix recursive call in cfq_blkiocg_update_completion_stats()Jens Axboe2010-06-21
| | | | | | | | e98ef89b has a typo, causing cfq_blkiocg_update_completion_stats() to call itself instead of blkiocg_update_completion_stats(). Reported-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* cfq-iosched: Fixed boot warning with BLK_CGROUP=y and CFQ_GROUP_IOSCHED=nVivek Goyal2010-06-18
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | Hi Jens, Few days back Ingo noticed a CFQ boot time warning. This patch fixes it. The issue here is that with CFQ_GROUP_IOSCHED=n, CFQ should not really be making blkio stat related calls. > Hm, it's still not entirely fixed, as of 2.6.35-rc2-00131-g7908a9e. With > some > configs i get bad spinlock warnings during bootup: > > [ 28.968013] initcall net_olddevs_init+0x0/0x82 returned 0 after 93750 > usecs > [ 28.972003] calling b44_init+0x0/0x55 @ 1 > [ 28.976009] bus: 'pci': add driver b44 > [ 28.976374] sda: > [ 28.978157] BUG: spinlock bad magic on CPU#1, async/0/117 > [ 28.980000] lock: 7e1c5bbc, .magic: 00000000, .owner: <none>/-1, +.owner_cpu: 0 > [ 28.980000] Pid: 117, comm: async/0 Not tainted +2.6.35-rc2-tip-01092-g010e7ef-dirty #8183 > [ 28.980000] Call Trace: > [ 28.980000] [<41ba6d55>] ? printk+0x20/0x24 > [ 28.980000] [<4134b7b7>] spin_bug+0x7c/0x87 > [ 28.980000] [<4134b853>] do_raw_spin_lock+0x1e/0x123 > [ 28.980000] [<41ba92ca>] ? _raw_spin_lock_irqsave+0x12/0x20 > [ 28.980000] [<41ba92d2>] _raw_spin_lock_irqsave+0x1a/0x20 > [ 28.980000] [<4133476f>] blkiocg_update_io_add_stats+0x25/0xfb > [ 28.980000] [<41335dae>] ? cfq_prio_tree_add+0xb1/0xc1 > [ 28.980000] [<41337bc7>] cfq_insert_request+0x8c/0x425 Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* cfq: Don't allow queue merges for queues that have no process referencesJeff Moyer2010-06-17
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | Hi, A user reported a kernel bug when running a particular program that did the following: created 32 threads - each thread took a mutex, grabbed a global offset, added a buffer size to that offset, released the lock - read from the given offset in the file - created a new thread to do the same - exited The result is that cfq's close cooperator logic would trigger, as the threads were issuing I/O within the mean seek distance of one another. This workload managed to routinely trigger a use after free bug when walking the list of merge candidates for a particular cfqq (cfqq->new_cfqq). The logic used for merging queues looks like this: static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq) { int process_refs, new_process_refs; struct cfq_queue *__cfqq; /* Avoid a circular list and skip interim queue merges */ while ((__cfqq = new_cfqq->new_cfqq)) { if (__cfqq == cfqq) return; new_cfqq = __cfqq; } process_refs = cfqq_process_refs(cfqq); /* * If the process for the cfqq has gone away, there is no * sense in merging the queues. */ if (process_refs == 0) return; /* * Merge in the direction of the lesser amount of work. */ new_process_refs = cfqq_process_refs(new_cfqq); if (new_process_refs >= process_refs) { cfqq->new_cfqq = new_cfqq; atomic_add(process_refs, &new_cfqq->ref); } else { new_cfqq->new_cfqq = cfqq; atomic_add(new_process_refs, &cfqq->ref); } } When a merge candidate is found, we add the process references for the queue with less references to the queue with more. The actual merging of queues happens when a new request is issued for a given cfqq. In the case of the test program, it only does a single pread call to read in 1MB, so the actual merge never happens. Normally, this is fine, as when the queue exits, we simply drop the references we took on the other cfqqs in the merge chain: /* * If this queue was scheduled to merge with another queue, be * sure to drop the reference taken on that queue (and others in * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs. */ __cfqq = cfqq->new_cfqq; while (__cfqq) { if (__cfqq == cfqq) { WARN(1, "cfqq->new_cfqq loop detected\n"); break; } next = __cfqq->new_cfqq; cfq_put_queue(__cfqq); __cfqq = next; } However, there is a hole in this logic. Consider the following (and keep in mind that each I/O keeps a reference to the cfqq): q1->new_cfqq = q2 // q2 now has 2 process references q3->new_cfqq = q2 // q2 now has 3 process references // the process associated with q2 exits // q2 now has 2 process references // queue 1 exits, drops its reference on q2 // q2 now has 1 process reference // q3 exits, so has 0 process references, and hence drops its references // to q2, which leaves q2 also with 0 process references q4 comes along and wants to merge with q3 q3->new_cfqq still points at q2! We follow that link and end up at an already freed cfqq. So, the fix is to not follow a merge chain if the top-most queue does not have a process reference, otherwise any queue in the chain could be already freed. I also changed the logic to disallow merging with a queue that does not have any process references. Previously, we did this check for one of the merge candidates, but not the other. That doesn't really make sense. Without the attached patch, my system would BUG within a couple of seconds of running the reproducer program. With the patch applied, my system ran the program for over an hour without issues. This addresses the following bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=16217 Thanks a ton to Phil Carns for providing the bug report and an excellent reproducer. [ Note for stable: this applies to 2.6.32/33/34 ]. Signed-off-by: Jeff Moyer <jmoyer@redhat.com> Reported-by: Phil Carns <carns@mcs.anl.gov> Cc: stable@kernel.org Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: fix DISCARD_BARRIER requestsChristoph Hellwig2010-06-17
| | | | | | | | | | | | | Filesystems assume that DISCARD_BARRIER are full barriers, so that they don't have to track in-progress discard operation when submitting new I/O. But currently we only treat them as elevator barriers, which don't actually do the nessecary queue drains. Also remove the unlikely around both the DISCARD and BARRIER requests - the happen far too often for a static mispredict. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: make blk_init_free_list and elevator_init idempotentMike Snitzer2010-06-04
| | | | | | | | | blk_init_allocated_queue_node may fail and the caller _could_ retry. Accommodate the unlikely event that blk_init_allocated_queue_node is called on an already initialized (possibly partially) request_queue. Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* block: avoid unconditionally freeing previously allocated request_queueMike Snitzer2010-06-04
| | | | | | | | | | | | | | | | | On blk_init_allocated_queue_node failure, only free the request_queue if it is wasn't previously allocated outside the block layer (e.g. blk_init_queue_node was blk_init_allocated_queue_node caller). This addresses an interface bug introduced by the following commit: 01effb0 block: allow initialization of previously allocated request_queue Otherwise the request_queue may be free'd out from underneath a caller that is managing the request_queue directly (e.g. caller uses blk_alloc_queue + blk_init_allocated_queue_node). Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
* cfq-iosched: fix an oops caused by slab leakShaohua Li2010-05-25
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | I got below oops when unloading cfq-iosched. Considering scenario: queue A merge to B, C merge to D and B will be merged to D. Before B is merged to D, we do split B. We should put B's reference for D. [ 807.768536] ============================================================================= [ 807.768539] BUG cfq_queue: Objects remaining on kmem_cache_close() [ 807.768541] ----------------------------------------------------------------------------- [ 807.768543] [ 807.768546] INFO: Slab 0xffffea0003e6b4e0 objects=26 used=1 fp=0xffff88011d584fd8 flags=0x200000000004082 [ 807.768550] Pid: 5946, comm: rmmod Tainted: G W 2.6.34-07097-gf4b87de-dirty #724 [ 807.768552] Call Trace: [ 807.768560] [<ffffffff81104e8d>] slab_err+0x8f/0x9d [ 807.768564] [<ffffffff811059e1>] ? flush_cpu_slab+0x0/0x93 [ 807.768569] [<ffffffff8164be52>] ? add_preempt_count+0xe/0xca [ 807.768572] [<ffffffff8164bd9c>] ? sub_preempt_count+0xe/0xb6 [ 807.768577] [<ffffffff81648871>] ? _raw_spin_unlock+0x15/0x30 [ 807.768580] [<ffffffff8164bd9c>] ? sub_preempt_count+0xe/0xb6 [ 807.768584] [<ffffffff811061bc>] list_slab_objects+0x9b/0x19f [ 807.768588] [<ffffffff8164bf0a>] ? add_preempt_count+0xc6/0xca [ 807.768591] [<ffffffff81109e27>] kmem_cache_destroy+0x13f/0x21d [ 807.768597] [<ffffffffa000ff13>] cfq_slab_kill+0x1a/0x43 [cfq_iosched] [ 807.768601] [<ffffffffa000ffcf>] cfq_exit+0x93/0x9e [cfq_iosched] [ 807.768606] [<ffffffff810973a2>] sys_delete_module+0x1b1/0x219 [ 807.768612] [<ffffffff8102fb5b>] system_call_fastpath+0x16/0x1b [ 807.768618] INFO: Object 0xffff88011d584618 @offset=1560 [ 807.768622] INFO: Allocated in cfq_get_queue+0x11e/0x274 [cfq_iosched] age=7173 cpu=1 pid=5496 [ 807.768626] ============================================================================= Cc: stable@kernel.org Signed-off-by: Shaohua Li <shaohua.li@intel.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
* block: Adjust elv_iosched_show to return "none" for bio-based DMMike Snitzer2010-05-24
| | | | | | | | | | | | | | Bio-based DM doesn't use an elevator (queue is !blk_queue_stackable()). Longer-term DM will not allocate an elevator for bio-based DM. But even then there will be small potential for an elevator to be allocated for a request-based DM table only to have a bio-based table be loaded in the end. Displaying "none" for bio-based DM will help avoid user confusion. Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>