aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-19 15:57:45 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-19 15:57:45 -0500
commit34b85e3574424beb30e4cd163e6da2e2282d2683 (patch)
tree8962201bcfb406db85796f2690f92bcc051373f4 /drivers/misc
parentd5e80b4b1857d5175bc6815aeefbb0e19b1a2c9b (diff)
parentd70a54e2d08510a99b1f10eceeae6f2f7086e226 (diff)
Merge tag 'powerpc-3.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux
Pull second batch of powerpc updates from Michael Ellerman: "The highlight is the series that reworks the idle management on powernv, which allows us to use deeper idle states on those machines. There's the fix from Anton for the "BUG at kernel/smpboot.c:134!" problem. An i2c driver for powernv. This is acked by Wolfram Sang, and he asked that we take it through the powerpc tree. A fix for audit from rgb at Red Hat, acked by Paul Moore who is one of the audit maintainers. A patch from Ben to export the symbol map of our OPAL firmware as a sysfs file, so that tools can use it. Also some CXL fixes, a couple of powerpc perf fixes, a fix for smt-enabled, and the patch to add __force to get_user() so we can use bitwise types" * tag 'powerpc-3.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux: powerpc/powernv: Ignore smt-enabled on Power8 and later powerpc/uaccess: Allow get_user() with bitwise types powerpc/powernv: Expose OPAL firmware symbol map powernv/powerpc: Add winkle support for offline cpus powernv/cpuidle: Redesign idle states management powerpc/powernv: Enable Offline CPUs to enter deep idle states powerpc/powernv: Switch off MMU before entering nap/sleep/rvwinkle mode i2c: Driver to expose PowerNV platform i2c busses powerpc: add little endian flag to syscall_get_arch() power/perf/hv-24x7: Use kmem_cache_free() instead of kfree powerpc/perf/hv-24x7: Use per-cpu page buffer cxl: Unmap MMIO regions when detaching a context cxl: Add timeout to process element commands cxl: Change contexts_lock to a mutex to fix sleep while atomic bug powerpc: Secondary CPUs must set cpu_callin_map after setting active and online
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/cxl/context.c26
-rw-r--r--drivers/misc/cxl/cxl.h9
-rw-r--r--drivers/misc/cxl/file.c6
-rw-r--r--drivers/misc/cxl/native.c12
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/cxl/sysfs.c10
6 files changed, 41 insertions, 24 deletions
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index cca472109135..51fd6b524371 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -34,7 +34,8 @@ struct cxl_context *cxl_context_alloc(void)
34/* 34/*
35 * Initialises a CXL context. 35 * Initialises a CXL context.
36 */ 36 */
37int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master) 37int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
38 struct address_space *mapping)
38{ 39{
39 int i; 40 int i;
40 41
@@ -42,6 +43,8 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
42 ctx->afu = afu; 43 ctx->afu = afu;
43 ctx->master = master; 44 ctx->master = master;
44 ctx->pid = NULL; /* Set in start work ioctl */ 45 ctx->pid = NULL; /* Set in start work ioctl */
46 mutex_init(&ctx->mapping_lock);
47 ctx->mapping = mapping;
45 48
46 /* 49 /*
47 * Allocate the segment table before we put it in the IDR so that we 50 * Allocate the segment table before we put it in the IDR so that we
@@ -82,12 +85,12 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
82 * Allocating IDR! We better make sure everything's setup that 85 * Allocating IDR! We better make sure everything's setup that
83 * dereferences from it. 86 * dereferences from it.
84 */ 87 */
88 mutex_lock(&afu->contexts_lock);
85 idr_preload(GFP_KERNEL); 89 idr_preload(GFP_KERNEL);
86 spin_lock(&afu->contexts_lock);
87 i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0, 90 i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
88 ctx->afu->num_procs, GFP_NOWAIT); 91 ctx->afu->num_procs, GFP_NOWAIT);
89 spin_unlock(&afu->contexts_lock);
90 idr_preload_end(); 92 idr_preload_end();
93 mutex_unlock(&afu->contexts_lock);
91 if (i < 0) 94 if (i < 0)
92 return i; 95 return i;
93 96
@@ -147,6 +150,12 @@ static void __detach_context(struct cxl_context *ctx)
147 afu_release_irqs(ctx); 150 afu_release_irqs(ctx);
148 flush_work(&ctx->fault_work); /* Only needed for dedicated process */ 151 flush_work(&ctx->fault_work); /* Only needed for dedicated process */
149 wake_up_all(&ctx->wq); 152 wake_up_all(&ctx->wq);
153
154 /* Release Problem State Area mapping */
155 mutex_lock(&ctx->mapping_lock);
156 if (ctx->mapping)
157 unmap_mapping_range(ctx->mapping, 0, 0, 1);
158 mutex_unlock(&ctx->mapping_lock);
150} 159}
151 160
152/* 161/*
@@ -168,21 +177,22 @@ void cxl_context_detach_all(struct cxl_afu *afu)
168 struct cxl_context *ctx; 177 struct cxl_context *ctx;
169 int tmp; 178 int tmp;
170 179
171 rcu_read_lock(); 180 mutex_lock(&afu->contexts_lock);
172 idr_for_each_entry(&afu->contexts_idr, ctx, tmp) 181 idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
173 /* 182 /*
174 * Anything done in here needs to be setup before the IDR is 183 * Anything done in here needs to be setup before the IDR is
175 * created and torn down after the IDR removed 184 * created and torn down after the IDR removed
176 */ 185 */
177 __detach_context(ctx); 186 __detach_context(ctx);
178 rcu_read_unlock(); 187 }
188 mutex_unlock(&afu->contexts_lock);
179} 189}
180 190
181void cxl_context_free(struct cxl_context *ctx) 191void cxl_context_free(struct cxl_context *ctx)
182{ 192{
183 spin_lock(&ctx->afu->contexts_lock); 193 mutex_lock(&ctx->afu->contexts_lock);
184 idr_remove(&ctx->afu->contexts_idr, ctx->pe); 194 idr_remove(&ctx->afu->contexts_idr, ctx->pe);
185 spin_unlock(&ctx->afu->contexts_lock); 195 mutex_unlock(&ctx->afu->contexts_lock);
186 synchronize_rcu(); 196 synchronize_rcu();
187 197
188 free_page((u64)ctx->sstp); 198 free_page((u64)ctx->sstp);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index b5b6bda44a00..28078f8894a5 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -351,7 +351,7 @@ struct cxl_afu {
351 struct device *chardev_s, *chardev_m, *chardev_d; 351 struct device *chardev_s, *chardev_m, *chardev_d;
352 struct idr contexts_idr; 352 struct idr contexts_idr;
353 struct dentry *debugfs; 353 struct dentry *debugfs;
354 spinlock_t contexts_lock; 354 struct mutex contexts_lock;
355 struct mutex spa_mutex; 355 struct mutex spa_mutex;
356 spinlock_t afu_cntl_lock; 356 spinlock_t afu_cntl_lock;
357 357
@@ -398,6 +398,10 @@ struct cxl_context {
398 phys_addr_t psn_phys; 398 phys_addr_t psn_phys;
399 u64 psn_size; 399 u64 psn_size;
400 400
401 /* Used to unmap any mmaps when force detaching */
402 struct address_space *mapping;
403 struct mutex mapping_lock;
404
401 spinlock_t sste_lock; /* Protects segment table entries */ 405 spinlock_t sste_lock; /* Protects segment table entries */
402 struct cxl_sste *sstp; 406 struct cxl_sste *sstp;
403 u64 sstp0, sstp1; 407 u64 sstp0, sstp1;
@@ -599,7 +603,8 @@ int cxl_alloc_sst(struct cxl_context *ctx);
599void init_cxl_native(void); 603void init_cxl_native(void);
600 604
601struct cxl_context *cxl_context_alloc(void); 605struct cxl_context *cxl_context_alloc(void);
602int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master); 606int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
607 struct address_space *mapping);
603void cxl_context_free(struct cxl_context *ctx); 608void cxl_context_free(struct cxl_context *ctx);
604int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma); 609int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma);
605 610
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 378b099e7c0b..e9f2f10dbb37 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -77,7 +77,7 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
77 goto err_put_afu; 77 goto err_put_afu;
78 } 78 }
79 79
80 if ((rc = cxl_context_init(ctx, afu, master))) 80 if ((rc = cxl_context_init(ctx, afu, master, inode->i_mapping)))
81 goto err_put_afu; 81 goto err_put_afu;
82 82
83 pr_devel("afu_open pe: %i\n", ctx->pe); 83 pr_devel("afu_open pe: %i\n", ctx->pe);
@@ -113,6 +113,10 @@ static int afu_release(struct inode *inode, struct file *file)
113 __func__, ctx->pe); 113 __func__, ctx->pe);
114 cxl_context_detach(ctx); 114 cxl_context_detach(ctx);
115 115
116 mutex_lock(&ctx->mapping_lock);
117 ctx->mapping = NULL;
118 mutex_unlock(&ctx->mapping_lock);
119
116 put_device(&ctx->afu->dev); 120 put_device(&ctx->afu->dev);
117 121
118 /* 122 /*
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 9a5a442269a8..f2b37b41a0da 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -277,6 +277,7 @@ static int do_process_element_cmd(struct cxl_context *ctx,
277 u64 cmd, u64 pe_state) 277 u64 cmd, u64 pe_state)
278{ 278{
279 u64 state; 279 u64 state;
280 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
280 281
281 WARN_ON(!ctx->afu->enabled); 282 WARN_ON(!ctx->afu->enabled);
282 283
@@ -286,6 +287,10 @@ static int do_process_element_cmd(struct cxl_context *ctx,
286 smp_mb(); 287 smp_mb();
287 cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe); 288 cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
288 while (1) { 289 while (1) {
290 if (time_after_eq(jiffies, timeout)) {
291 dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
292 return -EBUSY;
293 }
289 state = be64_to_cpup(ctx->afu->sw_command_status); 294 state = be64_to_cpup(ctx->afu->sw_command_status);
290 if (state == ~0ULL) { 295 if (state == ~0ULL) {
291 pr_err("cxl: Error adding process element to AFU\n"); 296 pr_err("cxl: Error adding process element to AFU\n");
@@ -610,13 +615,6 @@ static inline int detach_process_native_dedicated(struct cxl_context *ctx)
610 return 0; 615 return 0;
611} 616}
612 617
613/*
614 * TODO: handle case when this is called inside a rcu_read_lock() which may
615 * happen when we unbind the driver (ie. cxl_context_detach_all()) . Terminate
616 * & remove use a mutex lock and schedule which will not good with lock held.
617 * May need to write do_process_element_cmd() that handles outstanding page
618 * faults synchronously.
619 */
620static inline int detach_process_native_afu_directed(struct cxl_context *ctx) 618static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
621{ 619{
622 if (!ctx->pe_inserted) 620 if (!ctx->pe_inserted)
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 10c98ab7f46e..0f2cc9f8b4db 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -502,7 +502,7 @@ static struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
502 afu->dev.release = cxl_release_afu; 502 afu->dev.release = cxl_release_afu;
503 afu->slice = slice; 503 afu->slice = slice;
504 idr_init(&afu->contexts_idr); 504 idr_init(&afu->contexts_idr);
505 spin_lock_init(&afu->contexts_lock); 505 mutex_init(&afu->contexts_lock);
506 spin_lock_init(&afu->afu_cntl_lock); 506 spin_lock_init(&afu->afu_cntl_lock);
507 mutex_init(&afu->spa_mutex); 507 mutex_init(&afu->spa_mutex);
508 508
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index ce7ec06d87d1..461bdbd5d483 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -121,7 +121,7 @@ static ssize_t reset_store_afu(struct device *device,
121 int rc; 121 int rc;
122 122
123 /* Not safe to reset if it is currently in use */ 123 /* Not safe to reset if it is currently in use */
124 spin_lock(&afu->contexts_lock); 124 mutex_lock(&afu->contexts_lock);
125 if (!idr_is_empty(&afu->contexts_idr)) { 125 if (!idr_is_empty(&afu->contexts_idr)) {
126 rc = -EBUSY; 126 rc = -EBUSY;
127 goto err; 127 goto err;
@@ -132,7 +132,7 @@ static ssize_t reset_store_afu(struct device *device,
132 132
133 rc = count; 133 rc = count;
134err: 134err:
135 spin_unlock(&afu->contexts_lock); 135 mutex_unlock(&afu->contexts_lock);
136 return rc; 136 return rc;
137} 137}
138 138
@@ -247,7 +247,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
247 int rc = -EBUSY; 247 int rc = -EBUSY;
248 248
249 /* can't change this if we have a user */ 249 /* can't change this if we have a user */
250 spin_lock(&afu->contexts_lock); 250 mutex_lock(&afu->contexts_lock);
251 if (!idr_is_empty(&afu->contexts_idr)) 251 if (!idr_is_empty(&afu->contexts_idr))
252 goto err; 252 goto err;
253 253
@@ -271,7 +271,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
271 afu->current_mode = 0; 271 afu->current_mode = 0;
272 afu->num_procs = 0; 272 afu->num_procs = 0;
273 273
274 spin_unlock(&afu->contexts_lock); 274 mutex_unlock(&afu->contexts_lock);
275 275
276 if ((rc = _cxl_afu_deactivate_mode(afu, old_mode))) 276 if ((rc = _cxl_afu_deactivate_mode(afu, old_mode)))
277 return rc; 277 return rc;
@@ -280,7 +280,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
280 280
281 return count; 281 return count;
282err: 282err:
283 spin_unlock(&afu->contexts_lock); 283 mutex_unlock(&afu->contexts_lock);
284 return rc; 284 return rc;
285} 285}
286 286