aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-01 15:30:01 -0400
committerTejun Heo <tj@kernel.org>2012-04-01 15:55:00 -0400
commit959d851caa48829eb85cb85aa949fd6b4c5d5bc6 (patch)
tree3ba9c94ec346275fb44c4f0d1cd2537cdff8d811 /block
parenta5567932fc926739e29e98487128080f40c61710 (diff)
parent48ddbe194623ae089cc0576e60363f2d2e85662a (diff)
Merge branch 'for-3.5' of ../cgroup into block/for-3.5/core-merged
cgroup/for-3.5 contains the following changes which blk-cgroup needs to proceed with the on-going cleanup. * Dynamic addition and removal of cftypes to make config/stat file handling modular for policies. * cgroup removal update to not wait for css references to drain to fix blkcg removal hang caused by cfq caching cfqgs. Pull in cgroup/for-3.5 into block/for-3.5/core. This causes the following conflicts in block/blk-cgroup.c. * 761b3ef50e "cgroup: remove cgroup_subsys argument from callbacks" conflicts with blkiocg_pre_destroy() addition and blkiocg_attach() removal. Resolved by removing @subsys from all subsys methods. * 676f7c8f84 "cgroup: relocate cftype and cgroup_subsys definitions in controllers" conflicts with ->pre_destroy() and ->attach() updates and removal of modular config. Resolved by dropping forward declarations of the methods and applying updates to the relocated blkio_subsys. * 4baf6e3325 "cgroup: convert all non-memcg controllers to the new cftype interface" builds upon the previous item. Resolved by adding ->base_cftypes to the relocated blkio_subsys. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c51
-rw-r--r--block/blk-ioc.c6
-rw-r--r--block/blk-softirq.c16
-rw-r--r--block/blk.h16
-rw-r--r--block/partitions/ldm.c11
5 files changed, 35 insertions, 65 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index aa54c4110f54..4fdeb46b4436 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -43,32 +43,12 @@ EXPORT_SYMBOL_GPL(blkio_root_cgroup);
43 43
44static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES]; 44static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
45 45
46static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
47 struct cgroup *);
48static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
49 struct cgroup_taskset *);
50static int blkiocg_pre_destroy(struct cgroup_subsys *, struct cgroup *);
51static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
52static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
53
54/* for encoding cft->private value on file */ 46/* for encoding cft->private value on file */
55#define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val)) 47#define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
56/* What policy owns the file, proportional or throttle */ 48/* What policy owns the file, proportional or throttle */
57#define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff) 49#define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
58#define BLKIOFILE_ATTR(val) ((val) & 0xffff) 50#define BLKIOFILE_ATTR(val) ((val) & 0xffff)
59 51
60struct cgroup_subsys blkio_subsys = {
61 .name = "blkio",
62 .create = blkiocg_create,
63 .can_attach = blkiocg_can_attach,
64 .pre_destroy = blkiocg_pre_destroy,
65 .destroy = blkiocg_destroy,
66 .populate = blkiocg_populate,
67 .subsys_id = blkio_subsys_id,
68 .module = THIS_MODULE,
69};
70EXPORT_SYMBOL_GPL(blkio_subsys);
71
72struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) 52struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
73{ 53{
74 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), 54 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
@@ -1563,17 +1543,11 @@ struct cftype blkio_files[] = {
1563 .read_map = blkiocg_file_read_map, 1543 .read_map = blkiocg_file_read_map,
1564 }, 1544 },
1565#endif 1545#endif
1546 { } /* terminate */
1566}; 1547};
1567 1548
1568static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1569{
1570 return cgroup_add_files(cgroup, subsys, blkio_files,
1571 ARRAY_SIZE(blkio_files));
1572}
1573
1574/** 1549/**
1575 * blkiocg_pre_destroy - cgroup pre_destroy callback 1550 * blkiocg_pre_destroy - cgroup pre_destroy callback
1576 * @subsys: cgroup subsys
1577 * @cgroup: cgroup of interest 1551 * @cgroup: cgroup of interest
1578 * 1552 *
1579 * This function is called when @cgroup is about to go away and responsible 1553 * This function is called when @cgroup is about to go away and responsible
@@ -1583,8 +1557,7 @@ static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1583 * 1557 *
1584 * This is the blkcg counterpart of ioc_release_fn(). 1558 * This is the blkcg counterpart of ioc_release_fn().
1585 */ 1559 */
1586static int blkiocg_pre_destroy(struct cgroup_subsys *subsys, 1560static int blkiocg_pre_destroy(struct cgroup *cgroup)
1587 struct cgroup *cgroup)
1588{ 1561{
1589 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); 1562 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1590 1563
@@ -1609,7 +1582,7 @@ static int blkiocg_pre_destroy(struct cgroup_subsys *subsys,
1609 return 0; 1582 return 0;
1610} 1583}
1611 1584
1612static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) 1585static void blkiocg_destroy(struct cgroup *cgroup)
1613{ 1586{
1614 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); 1587 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1615 1588
@@ -1617,8 +1590,7 @@ static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1617 kfree(blkcg); 1590 kfree(blkcg);
1618} 1591}
1619 1592
1620static struct cgroup_subsys_state * 1593static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
1621blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1622{ 1594{
1623 static atomic64_t id_seq = ATOMIC64_INIT(0); 1595 static atomic64_t id_seq = ATOMIC64_INIT(0);
1624 struct blkio_cgroup *blkcg; 1596 struct blkio_cgroup *blkcg;
@@ -1706,8 +1678,7 @@ void blkcg_exit_queue(struct request_queue *q)
1706 * of the main cic data structures. For now we allow a task to change 1678 * of the main cic data structures. For now we allow a task to change
1707 * its cgroup only if it's the only owner of its ioc. 1679 * its cgroup only if it's the only owner of its ioc.
1708 */ 1680 */
1709static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 1681static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1710 struct cgroup_taskset *tset)
1711{ 1682{
1712 struct task_struct *task; 1683 struct task_struct *task;
1713 struct io_context *ioc; 1684 struct io_context *ioc;
@@ -1750,6 +1721,18 @@ static void blkcg_bypass_end(void)
1750 mutex_unlock(&all_q_mutex); 1721 mutex_unlock(&all_q_mutex);
1751} 1722}
1752 1723
1724struct cgroup_subsys blkio_subsys = {
1725 .name = "blkio",
1726 .create = blkiocg_create,
1727 .can_attach = blkiocg_can_attach,
1728 .pre_destroy = blkiocg_pre_destroy,
1729 .destroy = blkiocg_destroy,
1730 .subsys_id = blkio_subsys_id,
1731 .base_cftypes = blkio_files,
1732 .module = THIS_MODULE,
1733};
1734EXPORT_SYMBOL_GPL(blkio_subsys);
1735
1753void blkio_policy_register(struct blkio_policy_type *blkiop) 1736void blkio_policy_register(struct blkio_policy_type *blkiop)
1754{ 1737{
1755 struct request_queue *q; 1738 struct request_queue *q;
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 3f3dd51a1280..1e2d53b04858 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -130,6 +130,7 @@ static void ioc_release_fn(struct work_struct *work)
130void put_io_context(struct io_context *ioc) 130void put_io_context(struct io_context *ioc)
131{ 131{
132 unsigned long flags; 132 unsigned long flags;
133 bool free_ioc = false;
133 134
134 if (ioc == NULL) 135 if (ioc == NULL)
135 return; 136 return;
@@ -144,8 +145,13 @@ void put_io_context(struct io_context *ioc)
144 spin_lock_irqsave(&ioc->lock, flags); 145 spin_lock_irqsave(&ioc->lock, flags);
145 if (!hlist_empty(&ioc->icq_list)) 146 if (!hlist_empty(&ioc->icq_list))
146 schedule_work(&ioc->release_work); 147 schedule_work(&ioc->release_work);
148 else
149 free_ioc = true;
147 spin_unlock_irqrestore(&ioc->lock, flags); 150 spin_unlock_irqrestore(&ioc->lock, flags);
148 } 151 }
152
153 if (free_ioc)
154 kmem_cache_free(iocontext_cachep, ioc);
149} 155}
150EXPORT_SYMBOL(put_io_context); 156EXPORT_SYMBOL(put_io_context);
151 157
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 1366a89d8e66..467c8de88642 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -8,6 +8,7 @@
8#include <linux/blkdev.h> 8#include <linux/blkdev.h>
9#include <linux/interrupt.h> 9#include <linux/interrupt.h>
10#include <linux/cpu.h> 10#include <linux/cpu.h>
11#include <linux/sched.h>
11 12
12#include "blk.h" 13#include "blk.h"
13 14
@@ -103,9 +104,10 @@ static struct notifier_block __cpuinitdata blk_cpu_notifier = {
103 104
104void __blk_complete_request(struct request *req) 105void __blk_complete_request(struct request *req)
105{ 106{
106 int ccpu, cpu, group_cpu = NR_CPUS; 107 int ccpu, cpu;
107 struct request_queue *q = req->q; 108 struct request_queue *q = req->q;
108 unsigned long flags; 109 unsigned long flags;
110 bool shared = false;
109 111
110 BUG_ON(!q->softirq_done_fn); 112 BUG_ON(!q->softirq_done_fn);
111 113
@@ -117,22 +119,20 @@ void __blk_complete_request(struct request *req)
117 */ 119 */
118 if (req->cpu != -1) { 120 if (req->cpu != -1) {
119 ccpu = req->cpu; 121 ccpu = req->cpu;
120 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) { 122 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags))
121 ccpu = blk_cpu_to_group(ccpu); 123 shared = cpus_share_cache(cpu, ccpu);
122 group_cpu = blk_cpu_to_group(cpu);
123 }
124 } else 124 } else
125 ccpu = cpu; 125 ccpu = cpu;
126 126
127 /* 127 /*
128 * If current CPU and requested CPU are in the same group, running 128 * If current CPU and requested CPU share a cache, run the softirq on
129 * softirq in current CPU. One might concern this is just like 129 * the current CPU. One might concern this is just like
130 * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is 130 * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is
131 * running in interrupt handler, and currently I/O controller doesn't 131 * running in interrupt handler, and currently I/O controller doesn't
132 * support multiple interrupts, so current CPU is unique actually. This 132 * support multiple interrupts, so current CPU is unique actually. This
133 * avoids IPI sending from current CPU to the first CPU of a group. 133 * avoids IPI sending from current CPU to the first CPU of a group.
134 */ 134 */
135 if (ccpu == cpu || ccpu == group_cpu) { 135 if (ccpu == cpu || shared) {
136 struct list_head *list; 136 struct list_head *list;
137do_local: 137do_local:
138 list = &__get_cpu_var(blk_cpu_done); 138 list = &__get_cpu_var(blk_cpu_done);
diff --git a/block/blk.h b/block/blk.h
index aa81afde8220..85f6ae42f7d3 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -164,22 +164,6 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
164 return q->nr_congestion_off; 164 return q->nr_congestion_off;
165} 165}
166 166
167static inline int blk_cpu_to_group(int cpu)
168{
169 int group = NR_CPUS;
170#ifdef CONFIG_SCHED_MC
171 const struct cpumask *mask = cpu_coregroup_mask(cpu);
172 group = cpumask_first(mask);
173#elif defined(CONFIG_SCHED_SMT)
174 group = cpumask_first(topology_thread_cpumask(cpu));
175#else
176 return cpu;
177#endif
178 if (likely(group < NR_CPUS))
179 return group;
180 return cpu;
181}
182
183/* 167/*
184 * Contribute to IO statistics IFF: 168 * Contribute to IO statistics IFF:
185 * 169 *
diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c
index bd8ae788f689..e507cfbd044e 100644
--- a/block/partitions/ldm.c
+++ b/block/partitions/ldm.c
@@ -2,7 +2,7 @@
2 * ldm - Support for Windows Logical Disk Manager (Dynamic Disks) 2 * ldm - Support for Windows Logical Disk Manager (Dynamic Disks)
3 * 3 *
4 * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org> 4 * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org>
5 * Copyright (c) 2001-2007 Anton Altaparmakov 5 * Copyright (c) 2001-2012 Anton Altaparmakov
6 * Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com> 6 * Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
7 * 7 *
8 * Documentation is available at http://www.linux-ntfs.org/doku.php?id=downloads 8 * Documentation is available at http://www.linux-ntfs.org/doku.php?id=downloads
@@ -1341,20 +1341,17 @@ found:
1341 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, f->num); 1341 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, f->num);
1342 return false; 1342 return false;
1343 } 1343 }
1344
1345 if (f->map & (1 << rec)) { 1344 if (f->map & (1 << rec)) {
1346 ldm_error ("Duplicate VBLK, part %d.", rec); 1345 ldm_error ("Duplicate VBLK, part %d.", rec);
1347 f->map &= 0x7F; /* Mark the group as broken */ 1346 f->map &= 0x7F; /* Mark the group as broken */
1348 return false; 1347 return false;
1349 } 1348 }
1350
1351 f->map |= (1 << rec); 1349 f->map |= (1 << rec);
1352 1350 if (!rec)
1351 memcpy(f->data, data, VBLK_SIZE_HEAD);
1353 data += VBLK_SIZE_HEAD; 1352 data += VBLK_SIZE_HEAD;
1354 size -= VBLK_SIZE_HEAD; 1353 size -= VBLK_SIZE_HEAD;
1355 1354 memcpy(f->data + VBLK_SIZE_HEAD + rec * size, data, size);
1356 memcpy (f->data+rec*(size-VBLK_SIZE_HEAD)+VBLK_SIZE_HEAD, data, size);
1357
1358 return true; 1355 return true;
1359} 1356}
1360 1357