aboutsummaryrefslogtreecommitdiffstats
path: root/lib/percpu_counter.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-10 07:26:02 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-10 07:26:02 -0400
commitc798360cd1438090d51eeaa8e67985da11362eba (patch)
tree0107d3b9ee7476264c3357287787d393545bd2d9 /lib/percpu_counter.c
parentb211e9d7c861bdb37b86d6384da9edfb80949ceb (diff)
parent6ae833c7fe0c6ef1f0ab13cc775da230d6f4c256 (diff)
Merge branch 'for-3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
Pull percpu updates from Tejun Heo: "A lot of activities on percpu front. Notable changes are... - percpu allocator now can take @gfp. If @gfp doesn't contain GFP_KERNEL, it tries to allocate from what's already available to the allocator and a work item tries to keep the reserve around certain level so that these atomic allocations usually succeed. This will replace the ad-hoc percpu memory pool used by blk-throttle and also be used by the planned blkcg support for writeback IOs. Please note that I noticed a bug in how @gfp is interpreted while preparing this pull request and applied the fix 6ae833c7fe0c ("percpu: fix how @gfp is interpreted by the percpu allocator") just now. - percpu_ref now uses longs for percpu and global counters instead of ints. It leads to more sparse packing of the percpu counters on 64bit machines but the overhead should be negligible and this allows using percpu_ref for refcnting pages and in-memory objects directly. - The switching between percpu and single counter modes of a percpu_ref is made independent of putting the base ref and a percpu_ref can now optionally be initialized in single or killed mode. This allows avoiding percpu shutdown latency for cases where the refcounted objects may be synchronously created and destroyed in rapid succession with only a fraction of them reaching fully operational status (SCSI probing does this when combined with blk-mq support). It's also planned to be used to implement forced single mode to detect underflow more timely for debugging. There's a separate branch percpu/for-3.18-consistent-ops which cleans up the duplicate percpu accessors. That branch causes a number of conflicts with s390 and other trees. I'll send a separate pull request w/ resolutions once other branches are merged" * 'for-3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (33 commits) percpu: fix how @gfp is interpreted by the percpu allocator blk-mq, percpu_ref: start q->mq_usage_counter in atomic mode percpu_ref: make INIT_ATOMIC and switch_to_atomic() sticky percpu_ref: add PERCPU_REF_INIT_* flags percpu_ref: decouple switching to percpu mode and reinit percpu_ref: decouple switching to atomic mode and killing percpu_ref: add PCPU_REF_DEAD percpu_ref: rename things to prepare for decoupling percpu/atomic mode switch percpu_ref: replace pcpu_ prefix with percpu_ percpu_ref: minor code and comment updates percpu_ref: relocate percpu_ref_reinit() Revert "blk-mq, percpu_ref: implement a kludge for SCSI blk-mq stall during probe" Revert "percpu: free percpu allocation info for uniprocessor system" percpu-refcount: make percpu_ref based on longs instead of ints percpu-refcount: improve WARN messages percpu: fix locking regression in the failure path of pcpu_alloc() percpu-refcount: add @gfp to percpu_ref_init() proportions: add @gfp to init functions percpu_counter: add @gfp to percpu_counter_init() percpu_counter: make percpu_counters_lock irq-safe ...
Diffstat (limited to 'lib/percpu_counter.c')
-rw-r--r--lib/percpu_counter.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 7dd33577b905..48144cdae819 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -112,13 +112,15 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
112} 112}
113EXPORT_SYMBOL(__percpu_counter_sum); 113EXPORT_SYMBOL(__percpu_counter_sum);
114 114
115int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, 115int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
116 struct lock_class_key *key) 116 struct lock_class_key *key)
117{ 117{
118 unsigned long flags __maybe_unused;
119
118 raw_spin_lock_init(&fbc->lock); 120 raw_spin_lock_init(&fbc->lock);
119 lockdep_set_class(&fbc->lock, key); 121 lockdep_set_class(&fbc->lock, key);
120 fbc->count = amount; 122 fbc->count = amount;
121 fbc->counters = alloc_percpu(s32); 123 fbc->counters = alloc_percpu_gfp(s32, gfp);
122 if (!fbc->counters) 124 if (!fbc->counters)
123 return -ENOMEM; 125 return -ENOMEM;
124 126
@@ -126,9 +128,9 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
126 128
127#ifdef CONFIG_HOTPLUG_CPU 129#ifdef CONFIG_HOTPLUG_CPU
128 INIT_LIST_HEAD(&fbc->list); 130 INIT_LIST_HEAD(&fbc->list);
129 spin_lock(&percpu_counters_lock); 131 spin_lock_irqsave(&percpu_counters_lock, flags);
130 list_add(&fbc->list, &percpu_counters); 132 list_add(&fbc->list, &percpu_counters);
131 spin_unlock(&percpu_counters_lock); 133 spin_unlock_irqrestore(&percpu_counters_lock, flags);
132#endif 134#endif
133 return 0; 135 return 0;
134} 136}
@@ -136,15 +138,17 @@ EXPORT_SYMBOL(__percpu_counter_init);
136 138
137void percpu_counter_destroy(struct percpu_counter *fbc) 139void percpu_counter_destroy(struct percpu_counter *fbc)
138{ 140{
141 unsigned long flags __maybe_unused;
142
139 if (!fbc->counters) 143 if (!fbc->counters)
140 return; 144 return;
141 145
142 debug_percpu_counter_deactivate(fbc); 146 debug_percpu_counter_deactivate(fbc);
143 147
144#ifdef CONFIG_HOTPLUG_CPU 148#ifdef CONFIG_HOTPLUG_CPU
145 spin_lock(&percpu_counters_lock); 149 spin_lock_irqsave(&percpu_counters_lock, flags);
146 list_del(&fbc->list); 150 list_del(&fbc->list);
147 spin_unlock(&percpu_counters_lock); 151 spin_unlock_irqrestore(&percpu_counters_lock, flags);
148#endif 152#endif
149 free_percpu(fbc->counters); 153 free_percpu(fbc->counters);
150 fbc->counters = NULL; 154 fbc->counters = NULL;
@@ -173,7 +177,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
173 return NOTIFY_OK; 177 return NOTIFY_OK;
174 178
175 cpu = (unsigned long)hcpu; 179 cpu = (unsigned long)hcpu;
176 spin_lock(&percpu_counters_lock); 180 spin_lock_irq(&percpu_counters_lock);
177 list_for_each_entry(fbc, &percpu_counters, list) { 181 list_for_each_entry(fbc, &percpu_counters, list) {
178 s32 *pcount; 182 s32 *pcount;
179 unsigned long flags; 183 unsigned long flags;
@@ -184,7 +188,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
184 *pcount = 0; 188 *pcount = 0;
185 raw_spin_unlock_irqrestore(&fbc->lock, flags); 189 raw_spin_unlock_irqrestore(&fbc->lock, flags);
186 } 190 }
187 spin_unlock(&percpu_counters_lock); 191 spin_unlock_irq(&percpu_counters_lock);
188#endif 192#endif
189 return NOTIFY_OK; 193 return NOTIFY_OK;
190} 194}