diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-03 20:12:13 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-03 20:12:13 -0400 |
commit | 7f0ef0267e20d62d45d527911a993b1e998f4968 (patch) | |
tree | de51abc7da5903f59d83e23937f22420164c9477 /drivers/block | |
parent | 862f0012549110d6f2586bf54b52ed4540cbff3a (diff) | |
parent | 9307c29524502c21f0e8a6d96d850b2f5bc0bd9a (diff) |
Merge branch 'akpm' (updates from Andrew Morton)
Merge first patch-bomb from Andrew Morton:
- various misc bits
- I'm been patchmonkeying ocfs2 for a while, as Joel and Mark have been
distracted. There has been quite a bit of activity.
- About half the MM queue
- Some backlight bits
- Various lib/ updates
- checkpatch updates
- zillions more little rtc patches
- ptrace
- signals
- exec
- procfs
- rapidio
- nbd
- aoe
- pps
- memstick
- tools/testing/selftests updates
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (445 commits)
tools/testing/selftests: don't assume the x bit is set on scripts
selftests: add .gitignore for kcmp
selftests: fix clean target in kcmp Makefile
selftests: add .gitignore for vm
selftests: add hugetlbfstest
self-test: fix make clean
selftests: exit 1 on failure
kernel/resource.c: remove the unneeded assignment in function __find_resource
aio: fix wrong comment in aio_complete()
drivers/w1/slaves/w1_ds2408.c: add magic sequence to disable P0 test mode
drivers/memstick/host/r592.c: convert to module_pci_driver
drivers/memstick/host/jmb38x_ms: convert to module_pci_driver
pps-gpio: add device-tree binding and support
drivers/pps/clients/pps-gpio.c: convert to module_platform_driver
drivers/pps/clients/pps-gpio.c: convert to devm_* helpers
drivers/parport/share.c: use kzalloc
Documentation/accounting/getdelays.c: avoid strncpy in accounting tool
aoe: update internal version number to v83
aoe: update copyright date
aoe: perform I/O completions in parallel
...
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/aoe/aoe.h | 11 | ||||
-rw-r--r-- | drivers/block/aoe/aoecmd.c | 156 | ||||
-rw-r--r-- | drivers/block/aoe/aoedev.c | 3 | ||||
-rw-r--r-- | drivers/block/aoe/aoenet.c | 7 | ||||
-rw-r--r-- | drivers/block/mtip32xx/mtip32xx.c | 3 | ||||
-rw-r--r-- | drivers/block/nbd.c | 11 | ||||
-rw-r--r-- | drivers/block/xen-blkback/xenbus.c | 2 |
7 files changed, 151 insertions, 42 deletions
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index 175649468c95..025c41d3cb33 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ | 1 | /* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */ |
2 | #define VERSION "81" | 2 | #define VERSION "83" |
3 | #define AOE_MAJOR 152 | 3 | #define AOE_MAJOR 152 |
4 | #define DEVICE_NAME "aoe" | 4 | #define DEVICE_NAME "aoe" |
5 | 5 | ||
@@ -196,9 +196,11 @@ struct ktstate { | |||
196 | struct completion rendez; | 196 | struct completion rendez; |
197 | struct task_struct *task; | 197 | struct task_struct *task; |
198 | wait_queue_head_t *waitq; | 198 | wait_queue_head_t *waitq; |
199 | int (*fn) (void); | 199 | int (*fn) (int); |
200 | char *name; | 200 | char name[12]; |
201 | spinlock_t *lock; | 201 | spinlock_t *lock; |
202 | int id; | ||
203 | int active; | ||
202 | }; | 204 | }; |
203 | 205 | ||
204 | int aoeblk_init(void); | 206 | int aoeblk_init(void); |
@@ -222,6 +224,7 @@ int aoecmd_init(void); | |||
222 | struct sk_buff *aoecmd_ata_id(struct aoedev *); | 224 | struct sk_buff *aoecmd_ata_id(struct aoedev *); |
223 | void aoe_freetframe(struct frame *); | 225 | void aoe_freetframe(struct frame *); |
224 | void aoe_flush_iocq(void); | 226 | void aoe_flush_iocq(void); |
227 | void aoe_flush_iocq_by_index(int); | ||
225 | void aoe_end_request(struct aoedev *, struct request *, int); | 228 | void aoe_end_request(struct aoedev *, struct request *, int); |
226 | int aoe_ktstart(struct ktstate *k); | 229 | int aoe_ktstart(struct ktstate *k); |
227 | void aoe_ktstop(struct ktstate *k); | 230 | void aoe_ktstop(struct ktstate *k); |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index fc803ecbbce4..99cb944a002d 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ | 1 | /* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */ |
2 | /* | 2 | /* |
3 | * aoecmd.c | 3 | * aoecmd.c |
4 | * Filesystem request handling methods | 4 | * Filesystem request handling methods |
@@ -35,14 +35,27 @@ module_param(aoe_maxout, int, 0644); | |||
35 | MODULE_PARM_DESC(aoe_maxout, | 35 | MODULE_PARM_DESC(aoe_maxout, |
36 | "Only aoe_maxout outstanding packets for every MAC on eX.Y."); | 36 | "Only aoe_maxout outstanding packets for every MAC on eX.Y."); |
37 | 37 | ||
38 | static wait_queue_head_t ktiowq; | 38 | /* The number of online cpus during module initialization gives us a |
39 | static struct ktstate kts; | 39 | * convenient heuristic cap on the parallelism used for ktio threads |
40 | * doing I/O completion. It is not important that the cap equal the | ||
41 | * actual number of running CPUs at any given time, but because of CPU | ||
42 | * hotplug, we take care to use ncpus instead of using | ||
43 | * num_online_cpus() after module initialization. | ||
44 | */ | ||
45 | static int ncpus; | ||
46 | |||
47 | /* mutex lock used for synchronization while thread spawning */ | ||
48 | static DEFINE_MUTEX(ktio_spawn_lock); | ||
49 | |||
50 | static wait_queue_head_t *ktiowq; | ||
51 | static struct ktstate *kts; | ||
40 | 52 | ||
41 | /* io completion queue */ | 53 | /* io completion queue */ |
42 | static struct { | 54 | struct iocq_ktio { |
43 | struct list_head head; | 55 | struct list_head head; |
44 | spinlock_t lock; | 56 | spinlock_t lock; |
45 | } iocq; | 57 | }; |
58 | static struct iocq_ktio *iocq; | ||
46 | 59 | ||
47 | static struct page *empty_page; | 60 | static struct page *empty_page; |
48 | 61 | ||
@@ -1278,23 +1291,36 @@ out: | |||
1278 | * Returns true iff responses needing processing remain. | 1291 | * Returns true iff responses needing processing remain. |
1279 | */ | 1292 | */ |
1280 | static int | 1293 | static int |
1281 | ktio(void) | 1294 | ktio(int id) |
1282 | { | 1295 | { |
1283 | struct frame *f; | 1296 | struct frame *f; |
1284 | struct list_head *pos; | 1297 | struct list_head *pos; |
1285 | int i; | 1298 | int i; |
1299 | int actual_id; | ||
1286 | 1300 | ||
1287 | for (i = 0; ; ++i) { | 1301 | for (i = 0; ; ++i) { |
1288 | if (i == MAXIOC) | 1302 | if (i == MAXIOC) |
1289 | return 1; | 1303 | return 1; |
1290 | if (list_empty(&iocq.head)) | 1304 | if (list_empty(&iocq[id].head)) |
1291 | return 0; | 1305 | return 0; |
1292 | pos = iocq.head.next; | 1306 | pos = iocq[id].head.next; |
1293 | list_del(pos); | 1307 | list_del(pos); |
1294 | spin_unlock_irq(&iocq.lock); | ||
1295 | f = list_entry(pos, struct frame, head); | 1308 | f = list_entry(pos, struct frame, head); |
1309 | spin_unlock_irq(&iocq[id].lock); | ||
1296 | ktiocomplete(f); | 1310 | ktiocomplete(f); |
1297 | spin_lock_irq(&iocq.lock); | 1311 | |
1312 | /* Figure out if extra threads are required. */ | ||
1313 | actual_id = f->t->d->aoeminor % ncpus; | ||
1314 | |||
1315 | if (!kts[actual_id].active) { | ||
1316 | BUG_ON(id != 0); | ||
1317 | mutex_lock(&ktio_spawn_lock); | ||
1318 | if (!kts[actual_id].active | ||
1319 | && aoe_ktstart(&kts[actual_id]) == 0) | ||
1320 | kts[actual_id].active = 1; | ||
1321 | mutex_unlock(&ktio_spawn_lock); | ||
1322 | } | ||
1323 | spin_lock_irq(&iocq[id].lock); | ||
1298 | } | 1324 | } |
1299 | } | 1325 | } |
1300 | 1326 | ||
@@ -1311,7 +1337,7 @@ kthread(void *vp) | |||
1311 | complete(&k->rendez); /* tell spawner we're running */ | 1337 | complete(&k->rendez); /* tell spawner we're running */ |
1312 | do { | 1338 | do { |
1313 | spin_lock_irq(k->lock); | 1339 | spin_lock_irq(k->lock); |
1314 | more = k->fn(); | 1340 | more = k->fn(k->id); |
1315 | if (!more) { | 1341 | if (!more) { |
1316 | add_wait_queue(k->waitq, &wait); | 1342 | add_wait_queue(k->waitq, &wait); |
1317 | __set_current_state(TASK_INTERRUPTIBLE); | 1343 | __set_current_state(TASK_INTERRUPTIBLE); |
@@ -1340,7 +1366,7 @@ aoe_ktstart(struct ktstate *k) | |||
1340 | struct task_struct *task; | 1366 | struct task_struct *task; |
1341 | 1367 | ||
1342 | init_completion(&k->rendez); | 1368 | init_completion(&k->rendez); |
1343 | task = kthread_run(kthread, k, k->name); | 1369 | task = kthread_run(kthread, k, "%s", k->name); |
1344 | if (task == NULL || IS_ERR(task)) | 1370 | if (task == NULL || IS_ERR(task)) |
1345 | return -ENOMEM; | 1371 | return -ENOMEM; |
1346 | k->task = task; | 1372 | k->task = task; |
@@ -1353,13 +1379,24 @@ aoe_ktstart(struct ktstate *k) | |||
1353 | static void | 1379 | static void |
1354 | ktcomplete(struct frame *f, struct sk_buff *skb) | 1380 | ktcomplete(struct frame *f, struct sk_buff *skb) |
1355 | { | 1381 | { |
1382 | int id; | ||
1356 | ulong flags; | 1383 | ulong flags; |
1357 | 1384 | ||
1358 | f->r_skb = skb; | 1385 | f->r_skb = skb; |
1359 | spin_lock_irqsave(&iocq.lock, flags); | 1386 | id = f->t->d->aoeminor % ncpus; |
1360 | list_add_tail(&f->head, &iocq.head); | 1387 | spin_lock_irqsave(&iocq[id].lock, flags); |
1361 | spin_unlock_irqrestore(&iocq.lock, flags); | 1388 | if (!kts[id].active) { |
1362 | wake_up(&ktiowq); | 1389 | spin_unlock_irqrestore(&iocq[id].lock, flags); |
1390 | /* The thread with id has not been spawned yet, | ||
1391 | * so delegate the work to the main thread and | ||
1392 | * try spawning a new thread. | ||
1393 | */ | ||
1394 | id = 0; | ||
1395 | spin_lock_irqsave(&iocq[id].lock, flags); | ||
1396 | } | ||
1397 | list_add_tail(&f->head, &iocq[id].head); | ||
1398 | spin_unlock_irqrestore(&iocq[id].lock, flags); | ||
1399 | wake_up(&ktiowq[id]); | ||
1363 | } | 1400 | } |
1364 | 1401 | ||
1365 | struct sk_buff * | 1402 | struct sk_buff * |
@@ -1706,6 +1743,17 @@ aoe_failbuf(struct aoedev *d, struct buf *buf) | |||
1706 | void | 1743 | void |
1707 | aoe_flush_iocq(void) | 1744 | aoe_flush_iocq(void) |
1708 | { | 1745 | { |
1746 | int i; | ||
1747 | |||
1748 | for (i = 0; i < ncpus; i++) { | ||
1749 | if (kts[i].active) | ||
1750 | aoe_flush_iocq_by_index(i); | ||
1751 | } | ||
1752 | } | ||
1753 | |||
1754 | void | ||
1755 | aoe_flush_iocq_by_index(int id) | ||
1756 | { | ||
1709 | struct frame *f; | 1757 | struct frame *f; |
1710 | struct aoedev *d; | 1758 | struct aoedev *d; |
1711 | LIST_HEAD(flist); | 1759 | LIST_HEAD(flist); |
@@ -1713,9 +1761,9 @@ aoe_flush_iocq(void) | |||
1713 | struct sk_buff *skb; | 1761 | struct sk_buff *skb; |
1714 | ulong flags; | 1762 | ulong flags; |
1715 | 1763 | ||
1716 | spin_lock_irqsave(&iocq.lock, flags); | 1764 | spin_lock_irqsave(&iocq[id].lock, flags); |
1717 | list_splice_init(&iocq.head, &flist); | 1765 | list_splice_init(&iocq[id].head, &flist); |
1718 | spin_unlock_irqrestore(&iocq.lock, flags); | 1766 | spin_unlock_irqrestore(&iocq[id].lock, flags); |
1719 | while (!list_empty(&flist)) { | 1767 | while (!list_empty(&flist)) { |
1720 | pos = flist.next; | 1768 | pos = flist.next; |
1721 | list_del(pos); | 1769 | list_del(pos); |
@@ -1738,6 +1786,8 @@ int __init | |||
1738 | aoecmd_init(void) | 1786 | aoecmd_init(void) |
1739 | { | 1787 | { |
1740 | void *p; | 1788 | void *p; |
1789 | int i; | ||
1790 | int ret; | ||
1741 | 1791 | ||
1742 | /* get_zeroed_page returns page with ref count 1 */ | 1792 | /* get_zeroed_page returns page with ref count 1 */ |
1743 | p = (void *) get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); | 1793 | p = (void *) get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); |
@@ -1745,22 +1795,72 @@ aoecmd_init(void) | |||
1745 | return -ENOMEM; | 1795 | return -ENOMEM; |
1746 | empty_page = virt_to_page(p); | 1796 | empty_page = virt_to_page(p); |
1747 | 1797 | ||
1748 | INIT_LIST_HEAD(&iocq.head); | 1798 | ncpus = num_online_cpus(); |
1749 | spin_lock_init(&iocq.lock); | 1799 | |
1750 | init_waitqueue_head(&ktiowq); | 1800 | iocq = kcalloc(ncpus, sizeof(struct iocq_ktio), GFP_KERNEL); |
1751 | kts.name = "aoe_ktio"; | 1801 | if (!iocq) |
1752 | kts.fn = ktio; | 1802 | return -ENOMEM; |
1753 | kts.waitq = &ktiowq; | 1803 | |
1754 | kts.lock = &iocq.lock; | 1804 | kts = kcalloc(ncpus, sizeof(struct ktstate), GFP_KERNEL); |
1755 | return aoe_ktstart(&kts); | 1805 | if (!kts) { |
1806 | ret = -ENOMEM; | ||
1807 | goto kts_fail; | ||
1808 | } | ||
1809 | |||
1810 | ktiowq = kcalloc(ncpus, sizeof(wait_queue_head_t), GFP_KERNEL); | ||
1811 | if (!ktiowq) { | ||
1812 | ret = -ENOMEM; | ||
1813 | goto ktiowq_fail; | ||
1814 | } | ||
1815 | |||
1816 | mutex_init(&ktio_spawn_lock); | ||
1817 | |||
1818 | for (i = 0; i < ncpus; i++) { | ||
1819 | INIT_LIST_HEAD(&iocq[i].head); | ||
1820 | spin_lock_init(&iocq[i].lock); | ||
1821 | init_waitqueue_head(&ktiowq[i]); | ||
1822 | snprintf(kts[i].name, sizeof(kts[i].name), "aoe_ktio%d", i); | ||
1823 | kts[i].fn = ktio; | ||
1824 | kts[i].waitq = &ktiowq[i]; | ||
1825 | kts[i].lock = &iocq[i].lock; | ||
1826 | kts[i].id = i; | ||
1827 | kts[i].active = 0; | ||
1828 | } | ||
1829 | kts[0].active = 1; | ||
1830 | if (aoe_ktstart(&kts[0])) { | ||
1831 | ret = -ENOMEM; | ||
1832 | goto ktstart_fail; | ||
1833 | } | ||
1834 | return 0; | ||
1835 | |||
1836 | ktstart_fail: | ||
1837 | kfree(ktiowq); | ||
1838 | ktiowq_fail: | ||
1839 | kfree(kts); | ||
1840 | kts_fail: | ||
1841 | kfree(iocq); | ||
1842 | |||
1843 | return ret; | ||
1756 | } | 1844 | } |
1757 | 1845 | ||
1758 | void | 1846 | void |
1759 | aoecmd_exit(void) | 1847 | aoecmd_exit(void) |
1760 | { | 1848 | { |
1761 | aoe_ktstop(&kts); | 1849 | int i; |
1850 | |||
1851 | for (i = 0; i < ncpus; i++) | ||
1852 | if (kts[i].active) | ||
1853 | aoe_ktstop(&kts[i]); | ||
1854 | |||
1762 | aoe_flush_iocq(); | 1855 | aoe_flush_iocq(); |
1763 | 1856 | ||
1857 | /* Free up the iocq and thread speicific configuration | ||
1858 | * allocated during startup. | ||
1859 | */ | ||
1860 | kfree(iocq); | ||
1861 | kfree(kts); | ||
1862 | kfree(ktiowq); | ||
1863 | |||
1764 | free_page((unsigned long) page_address(empty_page)); | 1864 | free_page((unsigned long) page_address(empty_page)); |
1765 | empty_page = NULL; | 1865 | empty_page = NULL; |
1766 | } | 1866 | } |
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c index 98f2965778b9..784c92e038d1 100644 --- a/drivers/block/aoe/aoedev.c +++ b/drivers/block/aoe/aoedev.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ | 1 | /* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */ |
2 | /* | 2 | /* |
3 | * aoedev.c | 3 | * aoedev.c |
4 | * AoE device utility functions; maintains device list. | 4 | * AoE device utility functions; maintains device list. |
@@ -518,7 +518,6 @@ void | |||
518 | aoedev_exit(void) | 518 | aoedev_exit(void) |
519 | { | 519 | { |
520 | flush_scheduled_work(); | 520 | flush_scheduled_work(); |
521 | aoe_flush_iocq(); | ||
522 | flush(NULL, 0, EXITING); | 521 | flush(NULL, 0, EXITING); |
523 | } | 522 | } |
524 | 523 | ||
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c index 71d3ea8d3006..63773a90581d 100644 --- a/drivers/block/aoe/aoenet.c +++ b/drivers/block/aoe/aoenet.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ | 1 | /* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */ |
2 | /* | 2 | /* |
3 | * aoenet.c | 3 | * aoenet.c |
4 | * Ethernet portion of AoE driver | 4 | * Ethernet portion of AoE driver |
@@ -52,7 +52,7 @@ static struct sk_buff_head skbtxq; | |||
52 | 52 | ||
53 | /* enters with txlock held */ | 53 | /* enters with txlock held */ |
54 | static int | 54 | static int |
55 | tx(void) __must_hold(&txlock) | 55 | tx(int id) __must_hold(&txlock) |
56 | { | 56 | { |
57 | struct sk_buff *skb; | 57 | struct sk_buff *skb; |
58 | struct net_device *ifp; | 58 | struct net_device *ifp; |
@@ -205,7 +205,8 @@ aoenet_init(void) | |||
205 | kts.lock = &txlock; | 205 | kts.lock = &txlock; |
206 | kts.fn = tx; | 206 | kts.fn = tx; |
207 | kts.waitq = &txwq; | 207 | kts.waitq = &txwq; |
208 | kts.name = "aoe_tx"; | 208 | kts.id = 0; |
209 | snprintf(kts.name, sizeof(kts.name), "aoe_tx%d", kts.id); | ||
209 | if (aoe_ktstart(&kts)) | 210 | if (aoe_ktstart(&kts)) |
210 | return -EAGAIN; | 211 | return -EAGAIN; |
211 | dev_add_pack(&aoe_pt); | 212 | dev_add_pack(&aoe_pt); |
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 20dd52a2f92f..952dbfe22126 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
@@ -4087,7 +4087,8 @@ skip_create_disk: | |||
4087 | start_service_thread: | 4087 | start_service_thread: |
4088 | sprintf(thd_name, "mtip_svc_thd_%02d", index); | 4088 | sprintf(thd_name, "mtip_svc_thd_%02d", index); |
4089 | dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread, | 4089 | dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread, |
4090 | dd, dd->numa_node, thd_name); | 4090 | dd, dd->numa_node, "%s", |
4091 | thd_name); | ||
4091 | 4092 | ||
4092 | if (IS_ERR(dd->mtip_svc_handler)) { | 4093 | if (IS_ERR(dd->mtip_svc_handler)) { |
4093 | dev_err(&dd->pdev->dev, "service thread failed to start\n"); | 4094 | dev_err(&dd->pdev->dev, "service thread failed to start\n"); |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 037288e7874d..2dc3b5153f0d 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -623,8 +623,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, | |||
623 | if (!nbd->sock) | 623 | if (!nbd->sock) |
624 | return -EINVAL; | 624 | return -EINVAL; |
625 | 625 | ||
626 | nbd->disconnect = 1; | ||
627 | |||
626 | nbd_send_req(nbd, &sreq); | 628 | nbd_send_req(nbd, &sreq); |
627 | return 0; | 629 | return 0; |
628 | } | 630 | } |
629 | 631 | ||
630 | case NBD_CLEAR_SOCK: { | 632 | case NBD_CLEAR_SOCK: { |
@@ -654,6 +656,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, | |||
654 | nbd->sock = SOCKET_I(inode); | 656 | nbd->sock = SOCKET_I(inode); |
655 | if (max_part > 0) | 657 | if (max_part > 0) |
656 | bdev->bd_invalidated = 1; | 658 | bdev->bd_invalidated = 1; |
659 | nbd->disconnect = 0; /* we're connected now */ | ||
657 | return 0; | 660 | return 0; |
658 | } else { | 661 | } else { |
659 | fput(file); | 662 | fput(file); |
@@ -714,7 +717,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, | |||
714 | else | 717 | else |
715 | blk_queue_flush(nbd->disk->queue, 0); | 718 | blk_queue_flush(nbd->disk->queue, 0); |
716 | 719 | ||
717 | thread = kthread_create(nbd_thread, nbd, nbd->disk->disk_name); | 720 | thread = kthread_create(nbd_thread, nbd, "%s", |
721 | nbd->disk->disk_name); | ||
718 | if (IS_ERR(thread)) { | 722 | if (IS_ERR(thread)) { |
719 | mutex_lock(&nbd->tx_lock); | 723 | mutex_lock(&nbd->tx_lock); |
720 | return PTR_ERR(thread); | 724 | return PTR_ERR(thread); |
@@ -742,6 +746,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, | |||
742 | set_capacity(nbd->disk, 0); | 746 | set_capacity(nbd->disk, 0); |
743 | if (max_part > 0) | 747 | if (max_part > 0) |
744 | ioctl_by_bdev(bdev, BLKRRPART, 0); | 748 | ioctl_by_bdev(bdev, BLKRRPART, 0); |
749 | if (nbd->disconnect) /* user requested, ignore socket errors */ | ||
750 | return 0; | ||
745 | return nbd->harderror; | 751 | return nbd->harderror; |
746 | } | 752 | } |
747 | 753 | ||
@@ -750,7 +756,6 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, | |||
750 | * This is for compatibility only. The queue is always cleared | 756 | * This is for compatibility only. The queue is always cleared |
751 | * by NBD_DO_IT or NBD_CLEAR_SOCK. | 757 | * by NBD_DO_IT or NBD_CLEAR_SOCK. |
752 | */ | 758 | */ |
753 | BUG_ON(!nbd->sock && !list_empty(&nbd->queue_head)); | ||
754 | return 0; | 759 | return 0; |
755 | 760 | ||
756 | case NBD_PRINT_DEBUG: | 761 | case NBD_PRINT_DEBUG: |
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 8bfd1bcf95ec..04608a6502d7 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c | |||
@@ -93,7 +93,7 @@ static void xen_update_blkif_status(struct xen_blkif *blkif) | |||
93 | } | 93 | } |
94 | invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping); | 94 | invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping); |
95 | 95 | ||
96 | blkif->xenblkd = kthread_run(xen_blkif_schedule, blkif, name); | 96 | blkif->xenblkd = kthread_run(xen_blkif_schedule, blkif, "%s", name); |
97 | if (IS_ERR(blkif->xenblkd)) { | 97 | if (IS_ERR(blkif->xenblkd)) { |
98 | err = PTR_ERR(blkif->xenblkd); | 98 | err = PTR_ERR(blkif->xenblkd); |
99 | blkif->xenblkd = NULL; | 99 | blkif->xenblkd = NULL; |