diff options
-rw-r--r-- | drivers/misc/cxl/irq.c | 78 |
1 files changed, 64 insertions, 14 deletions
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c index 5033869621ad..3c04c14d1c60 100644 --- a/drivers/misc/cxl/irq.c +++ b/drivers/misc/cxl/irq.c | |||
@@ -19,6 +19,13 @@ | |||
19 | #include "cxl.h" | 19 | #include "cxl.h" |
20 | #include "trace.h" | 20 | #include "trace.h" |
21 | 21 | ||
22 | static int afu_irq_range_start(void) | ||
23 | { | ||
24 | if (cpu_has_feature(CPU_FTR_HVMODE)) | ||
25 | return 1; | ||
26 | return 0; | ||
27 | } | ||
28 | |||
22 | static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar) | 29 | static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar) |
23 | { | 30 | { |
24 | ctx->dsisr = dsisr; | 31 | ctx->dsisr = dsisr; |
@@ -117,11 +124,23 @@ static irqreturn_t cxl_irq_afu(int irq, void *data) | |||
117 | { | 124 | { |
118 | struct cxl_context *ctx = data; | 125 | struct cxl_context *ctx = data; |
119 | irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq)); | 126 | irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq)); |
120 | int irq_off, afu_irq = 1; | 127 | int irq_off, afu_irq = 0; |
121 | __u16 range; | 128 | __u16 range; |
122 | int r; | 129 | int r; |
123 | 130 | ||
124 | for (r = 1; r < CXL_IRQ_RANGES; r++) { | 131 | /* |
132 | * Look for the interrupt number. | ||
133 | * On bare-metal, we know range 0 only contains the PSL | ||
134 | * interrupt so we could start counting at range 1 and initialize | ||
135 | * afu_irq at 1. | ||
136 | * In a guest, range 0 also contains AFU interrupts, so it must | ||
137 | * be counted for. Therefore we initialize afu_irq at 0 to take into | ||
138 | * account the PSL interrupt. | ||
139 | * | ||
140 | * For code-readability, it just seems easier to go over all | ||
141 | * the ranges on bare-metal and guest. The end result is the same. | ||
142 | */ | ||
143 | for (r = 0; r < CXL_IRQ_RANGES; r++) { | ||
125 | irq_off = hwirq - ctx->irqs.offset[r]; | 144 | irq_off = hwirq - ctx->irqs.offset[r]; |
126 | range = ctx->irqs.range[r]; | 145 | range = ctx->irqs.range[r]; |
127 | if (irq_off >= 0 && irq_off < range) { | 146 | if (irq_off >= 0 && irq_off < range) { |
@@ -131,7 +150,7 @@ static irqreturn_t cxl_irq_afu(int irq, void *data) | |||
131 | afu_irq += range; | 150 | afu_irq += range; |
132 | } | 151 | } |
133 | if (unlikely(r >= CXL_IRQ_RANGES)) { | 152 | if (unlikely(r >= CXL_IRQ_RANGES)) { |
134 | WARN(1, "Recieved AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n", | 153 | WARN(1, "Received AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n", |
135 | ctx->pe, irq, hwirq); | 154 | ctx->pe, irq, hwirq); |
136 | return IRQ_HANDLED; | 155 | return IRQ_HANDLED; |
137 | } | 156 | } |
@@ -141,7 +160,7 @@ static irqreturn_t cxl_irq_afu(int irq, void *data) | |||
141 | afu_irq, ctx->pe, irq, hwirq); | 160 | afu_irq, ctx->pe, irq, hwirq); |
142 | 161 | ||
143 | if (unlikely(!ctx->irq_bitmap)) { | 162 | if (unlikely(!ctx->irq_bitmap)) { |
144 | WARN(1, "Recieved AFU IRQ for context with no IRQ bitmap\n"); | 163 | WARN(1, "Received AFU IRQ for context with no IRQ bitmap\n"); |
145 | return IRQ_HANDLED; | 164 | return IRQ_HANDLED; |
146 | } | 165 | } |
147 | spin_lock(&ctx->lock); | 166 | spin_lock(&ctx->lock); |
@@ -227,17 +246,33 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count) | |||
227 | { | 246 | { |
228 | int rc, r, i, j = 1; | 247 | int rc, r, i, j = 1; |
229 | struct cxl_irq_name *irq_name; | 248 | struct cxl_irq_name *irq_name; |
249 | int alloc_count; | ||
250 | |||
251 | /* | ||
252 | * In native mode, range 0 is reserved for the multiplexed | ||
253 | * PSL interrupt. It has been allocated when the AFU was initialized. | ||
254 | * | ||
255 | * In a guest, the PSL interrupt is not mutliplexed, but per-context, | ||
256 | * and is the first interrupt from range 0. It still needs to be | ||
257 | * allocated, so bump the count by one. | ||
258 | */ | ||
259 | if (cpu_has_feature(CPU_FTR_HVMODE)) | ||
260 | alloc_count = count; | ||
261 | else | ||
262 | alloc_count = count + 1; | ||
230 | 263 | ||
231 | /* Initialize the list head to hold irq names */ | 264 | /* Initialize the list head to hold irq names */ |
232 | INIT_LIST_HEAD(&ctx->irq_names); | 265 | INIT_LIST_HEAD(&ctx->irq_names); |
233 | 266 | ||
234 | if ((rc = cxl_ops->alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, | 267 | if ((rc = cxl_ops->alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, |
235 | count))) | 268 | alloc_count))) |
236 | return rc; | 269 | return rc; |
237 | 270 | ||
238 | /* Multiplexed PSL Interrupt */ | 271 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
239 | ctx->irqs.offset[0] = ctx->afu->psl_hwirq; | 272 | /* Multiplexed PSL Interrupt */ |
240 | ctx->irqs.range[0] = 1; | 273 | ctx->irqs.offset[0] = ctx->afu->psl_hwirq; |
274 | ctx->irqs.range[0] = 1; | ||
275 | } | ||
241 | 276 | ||
242 | ctx->irq_count = count; | 277 | ctx->irq_count = count; |
243 | ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count), | 278 | ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count), |
@@ -249,7 +284,7 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count) | |||
249 | * Allocate names first. If any fail, bail out before allocating | 284 | * Allocate names first. If any fail, bail out before allocating |
250 | * actual hardware IRQs. | 285 | * actual hardware IRQs. |
251 | */ | 286 | */ |
252 | for (r = 1; r < CXL_IRQ_RANGES; r++) { | 287 | for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) { |
253 | for (i = 0; i < ctx->irqs.range[r]; i++) { | 288 | for (i = 0; i < ctx->irqs.range[r]; i++) { |
254 | irq_name = kmalloc(sizeof(struct cxl_irq_name), | 289 | irq_name = kmalloc(sizeof(struct cxl_irq_name), |
255 | GFP_KERNEL); | 290 | GFP_KERNEL); |
@@ -279,15 +314,30 @@ static void afu_register_hwirqs(struct cxl_context *ctx) | |||
279 | { | 314 | { |
280 | irq_hw_number_t hwirq; | 315 | irq_hw_number_t hwirq; |
281 | struct cxl_irq_name *irq_name; | 316 | struct cxl_irq_name *irq_name; |
282 | int r,i; | 317 | int r, i; |
318 | irqreturn_t (*handler)(int irq, void *data); | ||
283 | 319 | ||
284 | /* We've allocated all memory now, so let's do the irq allocations */ | 320 | /* We've allocated all memory now, so let's do the irq allocations */ |
285 | irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list); | 321 | irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list); |
286 | for (r = 1; r < CXL_IRQ_RANGES; r++) { | 322 | for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) { |
287 | hwirq = ctx->irqs.offset[r]; | 323 | hwirq = ctx->irqs.offset[r]; |
288 | for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { | 324 | for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { |
289 | cxl_map_irq(ctx->afu->adapter, hwirq, | 325 | if (r == 0 && i == 0) |
290 | cxl_irq_afu, ctx, irq_name->name); | 326 | /* |
327 | * The very first interrupt of range 0 is | ||
328 | * always the PSL interrupt, but we only | ||
329 | * need to connect a handler for guests, | ||
330 | * because there's one PSL interrupt per | ||
331 | * context. | ||
332 | * On bare-metal, the PSL interrupt is | ||
333 | * multiplexed and was setup when the AFU | ||
334 | * was configured. | ||
335 | */ | ||
336 | handler = cxl_ops->psl_interrupt; | ||
337 | else | ||
338 | handler = cxl_irq_afu; | ||
339 | cxl_map_irq(ctx->afu->adapter, hwirq, handler, ctx, | ||
340 | irq_name->name); | ||
291 | irq_name = list_next_entry(irq_name, list); | 341 | irq_name = list_next_entry(irq_name, list); |
292 | } | 342 | } |
293 | } | 343 | } |
@@ -311,7 +361,7 @@ void afu_release_irqs(struct cxl_context *ctx, void *cookie) | |||
311 | unsigned int virq; | 361 | unsigned int virq; |
312 | int r, i; | 362 | int r, i; |
313 | 363 | ||
314 | for (r = 1; r < CXL_IRQ_RANGES; r++) { | 364 | for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) { |
315 | hwirq = ctx->irqs.offset[r]; | 365 | hwirq = ctx->irqs.offset[r]; |
316 | for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { | 366 | for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { |
317 | virq = irq_find_mapping(NULL, hwirq); | 367 | virq = irq_find_mapping(NULL, hwirq); |