aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-11 20:48:14 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-11 20:48:14 -0500
commit140cd7fb04a4a2bc09a30980bc8104cc89e09330 (patch)
tree776d57c7508f946d592de4334d4d3cb50fd36220 /drivers
parent27afc5dbda52ee3dbcd0bda7375c917c6936b470 (diff)
parent56548fc0e86cb9156af7a7e1f15ba78f251dafaf (diff)
Merge tag 'powerpc-3.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux
Pull powerpc updates from Michael Ellerman: "Some nice cleanups like removing bootmem, and removal of __get_cpu_var(). There is one patch to mm/gup.c. This is the generic GUP implementation, but is only used by us and arm(64). We have an ack from Steve Capper, and although we didn't get an ack from Andrew he told us to take the patch through the powerpc tree. There's one cxl patch. This is in drivers/misc, but Greg said he was happy for us to manage fixes for it. There is an infrastructure patch to support an IPMI driver for OPAL. There is also an RTC driver for OPAL. We weren't able to get any response from the RTC maintainer, Alessandro Zummo, so in the end we just merged the driver. The usual batch of Freescale updates from Scott" * tag 'powerpc-3.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux: (101 commits) powerpc/powernv: Return to cpu offline loop when finished in KVM guest powerpc/book3s: Fix partial invalidation of TLBs in MCE code. powerpc/mm: don't do tlbie for updatepp request with NO HPTE fault powerpc/xmon: Cleanup the breakpoint flags powerpc/xmon: Enable HW instruction breakpoint on POWER8 powerpc/mm/thp: Use tlbiel if possible powerpc/mm/thp: Remove code duplication powerpc/mm/hugetlb: Sanity check gigantic hugepage count powerpc/oprofile: Disable pagefaults during user stack read powerpc/mm: Check for matching hpte without taking hpte lock powerpc: Drop useless warning in eeh_init() powerpc/powernv: Cleanup unused MCE definitions/declarations. powerpc/eeh: Dump PHB diag-data early powerpc/eeh: Recover EEH error on ownership change for BCM5719 powerpc/eeh: Set EEH_PE_RESET on PE reset powerpc/eeh: Refactor eeh_reset_pe() powerpc: Remove more traces of bootmem powerpc/pseries: Initialise nvram_pstore_info's buf_lock cxl: Name interrupts in /proc/interrupt cxl: Return error to PSL if IRQ demultiplexing fails & print clearer warning ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/misc/cxl/cxl.h15
-rw-r--r--drivers/misc/cxl/fault.c8
-rw-r--r--drivers/misc/cxl/irq.c144
-rw-r--r--drivers/misc/cxl/native.c14
-rw-r--r--drivers/rtc/Kconfig11
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-opal.c261
7 files changed, 414 insertions, 40 deletions
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 3d2b8677ec8a..b5b6bda44a00 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -336,6 +336,8 @@ struct cxl_sste {
336struct cxl_afu { 336struct cxl_afu {
337 irq_hw_number_t psl_hwirq; 337 irq_hw_number_t psl_hwirq;
338 irq_hw_number_t serr_hwirq; 338 irq_hw_number_t serr_hwirq;
339 char *err_irq_name;
340 char *psl_irq_name;
339 unsigned int serr_virq; 341 unsigned int serr_virq;
340 void __iomem *p1n_mmio; 342 void __iomem *p1n_mmio;
341 void __iomem *p2n_mmio; 343 void __iomem *p2n_mmio;
@@ -379,6 +381,12 @@ struct cxl_afu {
379 bool enabled; 381 bool enabled;
380}; 382};
381 383
384
385struct cxl_irq_name {
386 struct list_head list;
387 char *name;
388};
389
382/* 390/*
383 * This is a cxl context. If the PSL is in dedicated mode, there will be one 391 * This is a cxl context. If the PSL is in dedicated mode, there will be one
384 * of these per AFU. If in AFU directed there can be lots of these. 392 * of these per AFU. If in AFU directed there can be lots of these.
@@ -403,6 +411,7 @@ struct cxl_context {
403 411
404 unsigned long *irq_bitmap; /* Accessed from IRQ context */ 412 unsigned long *irq_bitmap; /* Accessed from IRQ context */
405 struct cxl_irq_ranges irqs; 413 struct cxl_irq_ranges irqs;
414 struct list_head irq_names;
406 u64 fault_addr; 415 u64 fault_addr;
407 u64 fault_dsisr; 416 u64 fault_dsisr;
408 u64 afu_err; 417 u64 afu_err;
@@ -444,6 +453,7 @@ struct cxl {
444 struct dentry *trace; 453 struct dentry *trace;
445 struct dentry *psl_err_chk; 454 struct dentry *psl_err_chk;
446 struct dentry *debugfs; 455 struct dentry *debugfs;
456 char *irq_name;
447 struct bin_attribute cxl_attr; 457 struct bin_attribute cxl_attr;
448 int adapter_num; 458 int adapter_num;
449 int user_irqs; 459 int user_irqs;
@@ -563,9 +573,6 @@ int _cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode);
563int cxl_afu_deactivate_mode(struct cxl_afu *afu); 573int cxl_afu_deactivate_mode(struct cxl_afu *afu);
564int cxl_afu_select_best_mode(struct cxl_afu *afu); 574int cxl_afu_select_best_mode(struct cxl_afu *afu);
565 575
566unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
567 irq_handler_t handler, void *cookie);
568void cxl_unmap_irq(unsigned int virq, void *cookie);
569int cxl_register_psl_irq(struct cxl_afu *afu); 576int cxl_register_psl_irq(struct cxl_afu *afu);
570void cxl_release_psl_irq(struct cxl_afu *afu); 577void cxl_release_psl_irq(struct cxl_afu *afu);
571int cxl_register_psl_err_irq(struct cxl *adapter); 578int cxl_register_psl_err_irq(struct cxl *adapter);
@@ -612,7 +619,7 @@ int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed,
612 u64 amr); 619 u64 amr);
613int cxl_detach_process(struct cxl_context *ctx); 620int cxl_detach_process(struct cxl_context *ctx);
614 621
615int cxl_get_irq(struct cxl_context *ctx, struct cxl_irq_info *info); 622int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info);
616int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask); 623int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask);
617 624
618int cxl_check_error(struct cxl_afu *afu); 625int cxl_check_error(struct cxl_afu *afu);
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index c99e896604ee..f8684bca2d79 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -133,7 +133,7 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
133{ 133{
134 unsigned flt = 0; 134 unsigned flt = 0;
135 int result; 135 int result;
136 unsigned long access, flags; 136 unsigned long access, flags, inv_flags = 0;
137 137
138 if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { 138 if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) {
139 pr_devel("copro_handle_mm_fault failed: %#x\n", result); 139 pr_devel("copro_handle_mm_fault failed: %#x\n", result);
@@ -149,8 +149,12 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
149 access |= _PAGE_RW; 149 access |= _PAGE_RW;
150 if ((!ctx->kernel) || ~(dar & (1ULL << 63))) 150 if ((!ctx->kernel) || ~(dar & (1ULL << 63)))
151 access |= _PAGE_USER; 151 access |= _PAGE_USER;
152
153 if (dsisr & DSISR_NOHPTE)
154 inv_flags |= HPTE_NOHPTE_UPDATE;
155
152 local_irq_save(flags); 156 local_irq_save(flags);
153 hash_page_mm(mm, dar, access, 0x300); 157 hash_page_mm(mm, dar, access, 0x300, inv_flags);
154 local_irq_restore(flags); 158 local_irq_restore(flags);
155 159
156 pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe); 160 pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe);
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index 336020c8e1af..c294925f73ee 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -92,20 +92,13 @@ static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 da
92 return IRQ_HANDLED; 92 return IRQ_HANDLED;
93} 93}
94 94
95static irqreturn_t cxl_irq(int irq, void *data) 95static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info)
96{ 96{
97 struct cxl_context *ctx = data; 97 struct cxl_context *ctx = data;
98 struct cxl_irq_info irq_info;
99 u64 dsisr, dar; 98 u64 dsisr, dar;
100 int result;
101
102 if ((result = cxl_get_irq(ctx, &irq_info))) {
103 WARN(1, "Unable to get CXL IRQ Info: %i\n", result);
104 return IRQ_HANDLED;
105 }
106 99
107 dsisr = irq_info.dsisr; 100 dsisr = irq_info->dsisr;
108 dar = irq_info.dar; 101 dar = irq_info->dar;
109 102
110 pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar); 103 pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
111 104
@@ -149,9 +142,9 @@ static irqreturn_t cxl_irq(int irq, void *data)
149 if (dsisr & CXL_PSL_DSISR_An_UR) 142 if (dsisr & CXL_PSL_DSISR_An_UR)
150 pr_devel("CXL interrupt: AURP PTE not found\n"); 143 pr_devel("CXL interrupt: AURP PTE not found\n");
151 if (dsisr & CXL_PSL_DSISR_An_PE) 144 if (dsisr & CXL_PSL_DSISR_An_PE)
152 return handle_psl_slice_error(ctx, dsisr, irq_info.errstat); 145 return handle_psl_slice_error(ctx, dsisr, irq_info->errstat);
153 if (dsisr & CXL_PSL_DSISR_An_AE) { 146 if (dsisr & CXL_PSL_DSISR_An_AE) {
154 pr_devel("CXL interrupt: AFU Error %.llx\n", irq_info.afu_err); 147 pr_devel("CXL interrupt: AFU Error %.llx\n", irq_info->afu_err);
155 148
156 if (ctx->pending_afu_err) { 149 if (ctx->pending_afu_err) {
157 /* 150 /*
@@ -163,10 +156,10 @@ static irqreturn_t cxl_irq(int irq, void *data)
163 */ 156 */
164 dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error " 157 dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error "
165 "undelivered to pe %i: %.llx\n", 158 "undelivered to pe %i: %.llx\n",
166 ctx->pe, irq_info.afu_err); 159 ctx->pe, irq_info->afu_err);
167 } else { 160 } else {
168 spin_lock(&ctx->lock); 161 spin_lock(&ctx->lock);
169 ctx->afu_err = irq_info.afu_err; 162 ctx->afu_err = irq_info->afu_err;
170 ctx->pending_afu_err = 1; 163 ctx->pending_afu_err = 1;
171 spin_unlock(&ctx->lock); 164 spin_unlock(&ctx->lock);
172 165
@@ -182,24 +175,43 @@ static irqreturn_t cxl_irq(int irq, void *data)
182 return IRQ_HANDLED; 175 return IRQ_HANDLED;
183} 176}
184 177
178static irqreturn_t fail_psl_irq(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
179{
180 if (irq_info->dsisr & CXL_PSL_DSISR_TRANS)
181 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
182 else
183 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
184
185 return IRQ_HANDLED;
186}
187
185static irqreturn_t cxl_irq_multiplexed(int irq, void *data) 188static irqreturn_t cxl_irq_multiplexed(int irq, void *data)
186{ 189{
187 struct cxl_afu *afu = data; 190 struct cxl_afu *afu = data;
188 struct cxl_context *ctx; 191 struct cxl_context *ctx;
192 struct cxl_irq_info irq_info;
189 int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff; 193 int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff;
190 int ret; 194 int ret;
191 195
196 if ((ret = cxl_get_irq(afu, &irq_info))) {
197 WARN(1, "Unable to get CXL IRQ Info: %i\n", ret);
198 return fail_psl_irq(afu, &irq_info);
199 }
200
192 rcu_read_lock(); 201 rcu_read_lock();
193 ctx = idr_find(&afu->contexts_idr, ph); 202 ctx = idr_find(&afu->contexts_idr, ph);
194 if (ctx) { 203 if (ctx) {
195 ret = cxl_irq(irq, ctx); 204 ret = cxl_irq(irq, ctx, &irq_info);
196 rcu_read_unlock(); 205 rcu_read_unlock();
197 return ret; 206 return ret;
198 } 207 }
199 rcu_read_unlock(); 208 rcu_read_unlock();
200 209
201 WARN(1, "Unable to demultiplex CXL PSL IRQ\n"); 210 WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %.16llx DAR"
202 return IRQ_HANDLED; 211 " %.16llx\n(Possible AFU HW issue - was a term/remove acked"
212 " with outstanding transactions?)\n", ph, irq_info.dsisr,
213 irq_info.dar);
214 return fail_psl_irq(afu, &irq_info);
203} 215}
204 216
205static irqreturn_t cxl_irq_afu(int irq, void *data) 217static irqreturn_t cxl_irq_afu(int irq, void *data)
@@ -243,7 +255,7 @@ static irqreturn_t cxl_irq_afu(int irq, void *data)
243} 255}
244 256
245unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq, 257unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
246 irq_handler_t handler, void *cookie) 258 irq_handler_t handler, void *cookie, const char *name)
247{ 259{
248 unsigned int virq; 260 unsigned int virq;
249 int result; 261 int result;
@@ -259,7 +271,7 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
259 271
260 pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq); 272 pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq);
261 273
262 result = request_irq(virq, handler, 0, "cxl", cookie); 274 result = request_irq(virq, handler, 0, name, cookie);
263 if (result) { 275 if (result) {
264 dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result); 276 dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result);
265 return 0; 277 return 0;
@@ -278,14 +290,15 @@ static int cxl_register_one_irq(struct cxl *adapter,
278 irq_handler_t handler, 290 irq_handler_t handler,
279 void *cookie, 291 void *cookie,
280 irq_hw_number_t *dest_hwirq, 292 irq_hw_number_t *dest_hwirq,
281 unsigned int *dest_virq) 293 unsigned int *dest_virq,
294 const char *name)
282{ 295{
283 int hwirq, virq; 296 int hwirq, virq;
284 297
285 if ((hwirq = cxl_alloc_one_irq(adapter)) < 0) 298 if ((hwirq = cxl_alloc_one_irq(adapter)) < 0)
286 return hwirq; 299 return hwirq;
287 300
288 if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie))) 301 if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name)))
289 goto err; 302 goto err;
290 303
291 *dest_hwirq = hwirq; 304 *dest_hwirq = hwirq;
@@ -302,10 +315,19 @@ int cxl_register_psl_err_irq(struct cxl *adapter)
302{ 315{
303 int rc; 316 int rc;
304 317
318 adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
319 dev_name(&adapter->dev));
320 if (!adapter->irq_name)
321 return -ENOMEM;
322
305 if ((rc = cxl_register_one_irq(adapter, cxl_irq_err, adapter, 323 if ((rc = cxl_register_one_irq(adapter, cxl_irq_err, adapter,
306 &adapter->err_hwirq, 324 &adapter->err_hwirq,
307 &adapter->err_virq))) 325 &adapter->err_virq,
326 adapter->irq_name))) {
327 kfree(adapter->irq_name);
328 adapter->irq_name = NULL;
308 return rc; 329 return rc;
330 }
309 331
310 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->err_hwirq & 0xffff); 332 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->err_hwirq & 0xffff);
311 333
@@ -317,6 +339,7 @@ void cxl_release_psl_err_irq(struct cxl *adapter)
317 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); 339 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
318 cxl_unmap_irq(adapter->err_virq, adapter); 340 cxl_unmap_irq(adapter->err_virq, adapter);
319 cxl_release_one_irq(adapter, adapter->err_hwirq); 341 cxl_release_one_irq(adapter, adapter->err_hwirq);
342 kfree(adapter->irq_name);
320} 343}
321 344
322int cxl_register_serr_irq(struct cxl_afu *afu) 345int cxl_register_serr_irq(struct cxl_afu *afu)
@@ -324,10 +347,18 @@ int cxl_register_serr_irq(struct cxl_afu *afu)
324 u64 serr; 347 u64 serr;
325 int rc; 348 int rc;
326 349
350 afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
351 dev_name(&afu->dev));
352 if (!afu->err_irq_name)
353 return -ENOMEM;
354
327 if ((rc = cxl_register_one_irq(afu->adapter, cxl_slice_irq_err, afu, 355 if ((rc = cxl_register_one_irq(afu->adapter, cxl_slice_irq_err, afu,
328 &afu->serr_hwirq, 356 &afu->serr_hwirq,
329 &afu->serr_virq))) 357 &afu->serr_virq, afu->err_irq_name))) {
358 kfree(afu->err_irq_name);
359 afu->err_irq_name = NULL;
330 return rc; 360 return rc;
361 }
331 362
332 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); 363 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
333 serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff); 364 serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
@@ -341,24 +372,50 @@ void cxl_release_serr_irq(struct cxl_afu *afu)
341 cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000); 372 cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
342 cxl_unmap_irq(afu->serr_virq, afu); 373 cxl_unmap_irq(afu->serr_virq, afu);
343 cxl_release_one_irq(afu->adapter, afu->serr_hwirq); 374 cxl_release_one_irq(afu->adapter, afu->serr_hwirq);
375 kfree(afu->err_irq_name);
344} 376}
345 377
346int cxl_register_psl_irq(struct cxl_afu *afu) 378int cxl_register_psl_irq(struct cxl_afu *afu)
347{ 379{
348 return cxl_register_one_irq(afu->adapter, cxl_irq_multiplexed, afu, 380 int rc;
349 &afu->psl_hwirq, &afu->psl_virq); 381
382 afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s",
383 dev_name(&afu->dev));
384 if (!afu->psl_irq_name)
385 return -ENOMEM;
386
387 if ((rc = cxl_register_one_irq(afu->adapter, cxl_irq_multiplexed, afu,
388 &afu->psl_hwirq, &afu->psl_virq,
389 afu->psl_irq_name))) {
390 kfree(afu->psl_irq_name);
391 afu->psl_irq_name = NULL;
392 }
393 return rc;
350} 394}
351 395
352void cxl_release_psl_irq(struct cxl_afu *afu) 396void cxl_release_psl_irq(struct cxl_afu *afu)
353{ 397{
354 cxl_unmap_irq(afu->psl_virq, afu); 398 cxl_unmap_irq(afu->psl_virq, afu);
355 cxl_release_one_irq(afu->adapter, afu->psl_hwirq); 399 cxl_release_one_irq(afu->adapter, afu->psl_hwirq);
400 kfree(afu->psl_irq_name);
401}
402
403void afu_irq_name_free(struct cxl_context *ctx)
404{
405 struct cxl_irq_name *irq_name, *tmp;
406
407 list_for_each_entry_safe(irq_name, tmp, &ctx->irq_names, list) {
408 kfree(irq_name->name);
409 list_del(&irq_name->list);
410 kfree(irq_name);
411 }
356} 412}
357 413
358int afu_register_irqs(struct cxl_context *ctx, u32 count) 414int afu_register_irqs(struct cxl_context *ctx, u32 count)
359{ 415{
360 irq_hw_number_t hwirq; 416 irq_hw_number_t hwirq;
361 int rc, r, i; 417 int rc, r, i, j = 1;
418 struct cxl_irq_name *irq_name;
362 419
363 if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count))) 420 if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count)))
364 return rc; 421 return rc;
@@ -372,15 +429,47 @@ int afu_register_irqs(struct cxl_context *ctx, u32 count)
372 sizeof(*ctx->irq_bitmap), GFP_KERNEL); 429 sizeof(*ctx->irq_bitmap), GFP_KERNEL);
373 if (!ctx->irq_bitmap) 430 if (!ctx->irq_bitmap)
374 return -ENOMEM; 431 return -ENOMEM;
432
433 /*
434 * Allocate names first. If any fail, bail out before allocating
435 * actual hardware IRQs.
436 */
437 INIT_LIST_HEAD(&ctx->irq_names);
438 for (r = 1; r < CXL_IRQ_RANGES; r++) {
439 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
440 irq_name = kmalloc(sizeof(struct cxl_irq_name),
441 GFP_KERNEL);
442 if (!irq_name)
443 goto out;
444 irq_name->name = kasprintf(GFP_KERNEL, "cxl-%s-pe%i-%i",
445 dev_name(&ctx->afu->dev),
446 ctx->pe, j);
447 if (!irq_name->name) {
448 kfree(irq_name);
449 goto out;
450 }
451 /* Add to tail so next look get the correct order */
452 list_add_tail(&irq_name->list, &ctx->irq_names);
453 j++;
454 }
455 }
456
457 /* We've allocated all memory now, so let's do the irq allocations */
458 irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list);
375 for (r = 1; r < CXL_IRQ_RANGES; r++) { 459 for (r = 1; r < CXL_IRQ_RANGES; r++) {
376 hwirq = ctx->irqs.offset[r]; 460 hwirq = ctx->irqs.offset[r];
377 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { 461 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
378 cxl_map_irq(ctx->afu->adapter, hwirq, 462 cxl_map_irq(ctx->afu->adapter, hwirq,
379 cxl_irq_afu, ctx); 463 cxl_irq_afu, ctx, irq_name->name);
464 irq_name = list_next_entry(irq_name, list);
380 } 465 }
381 } 466 }
382 467
383 return 0; 468 return 0;
469
470out:
471 afu_irq_name_free(ctx);
472 return -ENOMEM;
384} 473}
385 474
386void afu_release_irqs(struct cxl_context *ctx) 475void afu_release_irqs(struct cxl_context *ctx)
@@ -398,5 +487,6 @@ void afu_release_irqs(struct cxl_context *ctx)
398 } 487 }
399 } 488 }
400 489
490 afu_irq_name_free(ctx);
401 cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter); 491 cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
402} 492}
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index d47532e8f4f1..9a5a442269a8 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -637,18 +637,18 @@ int cxl_detach_process(struct cxl_context *ctx)
637 return detach_process_native_afu_directed(ctx); 637 return detach_process_native_afu_directed(ctx);
638} 638}
639 639
640int cxl_get_irq(struct cxl_context *ctx, struct cxl_irq_info *info) 640int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info)
641{ 641{
642 u64 pidtid; 642 u64 pidtid;
643 643
644 info->dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An); 644 info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
645 info->dar = cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An); 645 info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
646 info->dsr = cxl_p2n_read(ctx->afu, CXL_PSL_DSR_An); 646 info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
647 pidtid = cxl_p2n_read(ctx->afu, CXL_PSL_PID_TID_An); 647 pidtid = cxl_p2n_read(afu, CXL_PSL_PID_TID_An);
648 info->pid = pidtid >> 32; 648 info->pid = pidtid >> 32;
649 info->tid = pidtid & 0xffffffff; 649 info->tid = pidtid & 0xffffffff;
650 info->afu_err = cxl_p2n_read(ctx->afu, CXL_AFU_ERR_An); 650 info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An);
651 info->errstat = cxl_p2n_read(ctx->afu, CXL_PSL_ErrStat_An); 651 info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
652 652
653 return 0; 653 return 0;
654} 654}
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 4511ddc1ac31..f15cddfeb897 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -987,6 +987,17 @@ config RTC_DRV_NUC900
987 If you say yes here you get support for the RTC subsystem of the 987 If you say yes here you get support for the RTC subsystem of the
988 NUC910/NUC920 used in embedded systems. 988 NUC910/NUC920 used in embedded systems.
989 989
990config RTC_DRV_OPAL
991 tristate "IBM OPAL RTC driver"
992 depends on PPC_POWERNV
993 default y
994 help
995 If you say yes here you get support for the PowerNV platform RTC
996 driver based on OPAL interfaces.
997
998 This driver can also be built as a module. If so, the module
999 will be called rtc-opal.
1000
990comment "on-CPU RTC drivers" 1001comment "on-CPU RTC drivers"
991 1002
992config RTC_DRV_DAVINCI 1003config RTC_DRV_DAVINCI
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index b188323c096a..c8ef3e1e6ccd 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -92,6 +92,7 @@ obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o
92obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o 92obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o
93obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o 93obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
94obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o 94obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o
95obj-$(CONFIG_RTC_DRV_OPAL) += rtc-opal.o
95obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o 96obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
96obj-$(CONFIG_RTC_DRV_PALMAS) += rtc-palmas.o 97obj-$(CONFIG_RTC_DRV_PALMAS) += rtc-palmas.o
97obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o 98obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o
diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
new file mode 100644
index 000000000000..95f652165fe9
--- /dev/null
+++ b/drivers/rtc/rtc-opal.c
@@ -0,0 +1,261 @@
1/*
2 * IBM OPAL RTC driver
3 * Copyright (C) 2014 IBM
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program.
17 */
18
19#define DRVNAME "rtc-opal"
20#define pr_fmt(fmt) DRVNAME ": " fmt
21
22#include <linux/module.h>
23#include <linux/err.h>
24#include <linux/rtc.h>
25#include <linux/delay.h>
26#include <linux/bcd.h>
27#include <linux/platform_device.h>
28#include <linux/of.h>
29#include <asm/opal.h>
30#include <asm/firmware.h>
31
32static void opal_to_tm(u32 y_m_d, u64 h_m_s_ms, struct rtc_time *tm)
33{
34 tm->tm_year = ((bcd2bin(y_m_d >> 24) * 100) +
35 bcd2bin((y_m_d >> 16) & 0xff)) - 1900;
36 tm->tm_mon = bcd2bin((y_m_d >> 8) & 0xff) - 1;
37 tm->tm_mday = bcd2bin(y_m_d & 0xff);
38 tm->tm_hour = bcd2bin((h_m_s_ms >> 56) & 0xff);
39 tm->tm_min = bcd2bin((h_m_s_ms >> 48) & 0xff);
40 tm->tm_sec = bcd2bin((h_m_s_ms >> 40) & 0xff);
41
42 GregorianDay(tm);
43}
44
45static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms)
46{
47 *y_m_d |= ((u32)bin2bcd((tm->tm_year + 1900) / 100)) << 24;
48 *y_m_d |= ((u32)bin2bcd((tm->tm_year + 1900) % 100)) << 16;
49 *y_m_d |= ((u32)bin2bcd((tm->tm_mon + 1))) << 8;
50 *y_m_d |= ((u32)bin2bcd(tm->tm_mday));
51
52 *h_m_s_ms |= ((u64)bin2bcd(tm->tm_hour)) << 56;
53 *h_m_s_ms |= ((u64)bin2bcd(tm->tm_min)) << 48;
54 *h_m_s_ms |= ((u64)bin2bcd(tm->tm_sec)) << 40;
55}
56
57static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
58{
59 long rc = OPAL_BUSY;
60 u32 y_m_d;
61 u64 h_m_s_ms;
62 __be32 __y_m_d;
63 __be64 __h_m_s_ms;
64
65 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
66 rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
67 if (rc == OPAL_BUSY_EVENT)
68 opal_poll_events(NULL);
69 else
70 msleep(10);
71 }
72
73 if (rc != OPAL_SUCCESS)
74 return -EIO;
75
76 y_m_d = be32_to_cpu(__y_m_d);
77 h_m_s_ms = be64_to_cpu(__h_m_s_ms);
78 opal_to_tm(y_m_d, h_m_s_ms, tm);
79
80 return 0;
81}
82
83static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
84{
85 long rc = OPAL_BUSY;
86 u32 y_m_d = 0;
87 u64 h_m_s_ms = 0;
88
89 tm_to_opal(tm, &y_m_d, &h_m_s_ms);
90 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
91 rc = opal_rtc_write(y_m_d, h_m_s_ms);
92 if (rc == OPAL_BUSY_EVENT)
93 opal_poll_events(NULL);
94 else
95 msleep(10);
96 }
97
98 return rc == OPAL_SUCCESS ? 0 : -EIO;
99}
100
101/*
102 * TPO Timed Power-On
103 *
104 * TPO get/set OPAL calls care about the hour and min and to make it consistent
105 * with the rtc utility time conversion functions, we use the 'u64' to store
106 * its value and perform bit shift by 32 before use..
107 */
108static int opal_get_tpo_time(struct device *dev, struct rtc_wkalrm *alarm)
109{
110 __be32 __y_m_d, __h_m;
111 struct opal_msg msg;
112 int rc, token;
113 u64 h_m_s_ms;
114 u32 y_m_d;
115
116 token = opal_async_get_token_interruptible();
117 if (token < 0) {
118 if (token != -ERESTARTSYS)
119 pr_err("Failed to get the async token\n");
120
121 return token;
122 }
123
124 rc = opal_tpo_read(token, &__y_m_d, &__h_m);
125 if (rc != OPAL_ASYNC_COMPLETION) {
126 rc = -EIO;
127 goto exit;
128 }
129
130 rc = opal_async_wait_response(token, &msg);
131 if (rc) {
132 rc = -EIO;
133 goto exit;
134 }
135
136 rc = be64_to_cpu(msg.params[1]);
137 if (rc != OPAL_SUCCESS) {
138 rc = -EIO;
139 goto exit;
140 }
141
142 y_m_d = be32_to_cpu(__y_m_d);
143 h_m_s_ms = ((u64)be32_to_cpu(__h_m) << 32);
144 opal_to_tm(y_m_d, h_m_s_ms, &alarm->time);
145
146exit:
147 opal_async_release_token(token);
148 return rc;
149}
150
151/* Set Timed Power-On */
152static int opal_set_tpo_time(struct device *dev, struct rtc_wkalrm *alarm)
153{
154 u64 h_m_s_ms = 0, token;
155 struct opal_msg msg;
156 u32 y_m_d = 0;
157 int rc;
158
159 tm_to_opal(&alarm->time, &y_m_d, &h_m_s_ms);
160
161 token = opal_async_get_token_interruptible();
162 if (token < 0) {
163 if (token != -ERESTARTSYS)
164 pr_err("Failed to get the async token\n");
165
166 return token;
167 }
168
169 /* TPO, we care about hour and minute */
170 rc = opal_tpo_write(token, y_m_d,
171 (u32)((h_m_s_ms >> 32) & 0xffff0000));
172 if (rc != OPAL_ASYNC_COMPLETION) {
173 rc = -EIO;
174 goto exit;
175 }
176
177 rc = opal_async_wait_response(token, &msg);
178 if (rc) {
179 rc = -EIO;
180 goto exit;
181 }
182
183 rc = be64_to_cpu(msg.params[1]);
184 if (rc != OPAL_SUCCESS)
185 rc = -EIO;
186
187exit:
188 opal_async_release_token(token);
189 return rc;
190}
191
192static const struct rtc_class_ops opal_rtc_ops = {
193 .read_time = opal_get_rtc_time,
194 .set_time = opal_set_rtc_time,
195 .read_alarm = opal_get_tpo_time,
196 .set_alarm = opal_set_tpo_time,
197};
198
199static int opal_rtc_probe(struct platform_device *pdev)
200{
201 struct rtc_device *rtc;
202
203 if (pdev->dev.of_node && of_get_property(pdev->dev.of_node, "has-tpo",
204 NULL))
205 device_set_wakeup_capable(&pdev->dev, true);
206
207 rtc = devm_rtc_device_register(&pdev->dev, DRVNAME, &opal_rtc_ops,
208 THIS_MODULE);
209 if (IS_ERR(rtc))
210 return PTR_ERR(rtc);
211
212 rtc->uie_unsupported = 1;
213
214 return 0;
215}
216
217static const struct of_device_id opal_rtc_match[] = {
218 {
219 .compatible = "ibm,opal-rtc",
220 },
221 { }
222};
223MODULE_DEVICE_TABLE(of, opal_rtc_match);
224
225static const struct platform_device_id opal_rtc_driver_ids[] = {
226 {
227 .name = "opal-rtc",
228 },
229 { }
230};
231MODULE_DEVICE_TABLE(platform, opal_rtc_driver_ids);
232
233static struct platform_driver opal_rtc_driver = {
234 .probe = opal_rtc_probe,
235 .id_table = opal_rtc_driver_ids,
236 .driver = {
237 .name = DRVNAME,
238 .owner = THIS_MODULE,
239 .of_match_table = opal_rtc_match,
240 },
241};
242
243static int __init opal_rtc_init(void)
244{
245 if (!firmware_has_feature(FW_FEATURE_OPAL))
246 return -ENODEV;
247
248 return platform_driver_register(&opal_rtc_driver);
249}
250
251static void __exit opal_rtc_exit(void)
252{
253 platform_driver_unregister(&opal_rtc_driver);
254}
255
256MODULE_AUTHOR("Neelesh Gupta <neelegup@linux.vnet.ibm.com>");
257MODULE_DESCRIPTION("IBM OPAL RTC driver");
258MODULE_LICENSE("GPL");
259
260module_init(opal_rtc_init);
261module_exit(opal_rtc_exit);