2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/interrupt.h>
11 #include <linux/workqueue.h>
12 #include <linux/sched.h>
13 #include <linux/wait.h>
14 #include <linux/slab.h>
15 #include <linux/pid.h>
16 #include <asm/cputable.h>
21 /* XXX: This is implementation specific */
22 static irqreturn_t
handle_psl_slice_error(struct cxl_context
*ctx
, u64 dsisr
, u64 errstat
)
24 u64 fir1
, fir2
, fir_slice
, serr
, afu_debug
;
26 fir1
= cxl_p1_read(ctx
->afu
->adapter
, CXL_PSL_FIR1
);
27 fir2
= cxl_p1_read(ctx
->afu
->adapter
, CXL_PSL_FIR2
);
28 fir_slice
= cxl_p1n_read(ctx
->afu
, CXL_PSL_FIR_SLICE_An
);
29 serr
= cxl_p1n_read(ctx
->afu
, CXL_PSL_SERR_An
);
30 afu_debug
= cxl_p1n_read(ctx
->afu
, CXL_AFU_DEBUG_An
);
32 dev_crit(&ctx
->afu
->dev
, "PSL ERROR STATUS: 0x%.16llx\n", errstat
);
33 dev_crit(&ctx
->afu
->dev
, "PSL_FIR1: 0x%.16llx\n", fir1
);
34 dev_crit(&ctx
->afu
->dev
, "PSL_FIR2: 0x%.16llx\n", fir2
);
35 dev_crit(&ctx
->afu
->dev
, "PSL_SERR_An: 0x%.16llx\n", serr
);
36 dev_crit(&ctx
->afu
->dev
, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice
);
37 dev_crit(&ctx
->afu
->dev
, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug
);
39 dev_crit(&ctx
->afu
->dev
, "STOPPING CXL TRACE\n");
40 cxl_stop_trace(ctx
->afu
->adapter
);
42 return cxl_ack_irq(ctx
, 0, errstat
);
45 irqreturn_t
cxl_slice_irq_err(int irq
, void *data
)
47 struct cxl_afu
*afu
= data
;
48 u64 fir_slice
, errstat
, serr
, afu_debug
;
50 WARN(irq
, "CXL SLICE ERROR interrupt %i\n", irq
);
52 serr
= cxl_p1n_read(afu
, CXL_PSL_SERR_An
);
53 fir_slice
= cxl_p1n_read(afu
, CXL_PSL_FIR_SLICE_An
);
54 errstat
= cxl_p2n_read(afu
, CXL_PSL_ErrStat_An
);
55 afu_debug
= cxl_p1n_read(afu
, CXL_AFU_DEBUG_An
);
56 dev_crit(&afu
->dev
, "PSL_SERR_An: 0x%.16llx\n", serr
);
57 dev_crit(&afu
->dev
, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice
);
58 dev_crit(&afu
->dev
, "CXL_PSL_ErrStat_An: 0x%.16llx\n", errstat
);
59 dev_crit(&afu
->dev
, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug
);
61 cxl_p1n_write(afu
, CXL_PSL_SERR_An
, serr
);
66 static irqreturn_t
cxl_irq_err(int irq
, void *data
)
68 struct cxl
*adapter
= data
;
69 u64 fir1
, fir2
, err_ivte
;
71 WARN(1, "CXL ERROR interrupt %i\n", irq
);
73 err_ivte
= cxl_p1_read(adapter
, CXL_PSL_ErrIVTE
);
74 dev_crit(&adapter
->dev
, "PSL_ErrIVTE: 0x%.16llx\n", err_ivte
);
76 dev_crit(&adapter
->dev
, "STOPPING CXL TRACE\n");
77 cxl_stop_trace(adapter
);
79 fir1
= cxl_p1_read(adapter
, CXL_PSL_FIR1
);
80 fir2
= cxl_p1_read(adapter
, CXL_PSL_FIR2
);
82 dev_crit(&adapter
->dev
, "PSL_FIR1: 0x%.16llx\nPSL_FIR2: 0x%.16llx\n", fir1
, fir2
);
87 static irqreturn_t
schedule_cxl_fault(struct cxl_context
*ctx
, u64 dsisr
, u64 dar
)
91 schedule_work(&ctx
->fault_work
);
95 static irqreturn_t
cxl_irq(int irq
, void *data
)
97 struct cxl_context
*ctx
= data
;
98 struct cxl_irq_info irq_info
;
102 if ((result
= cxl_get_irq(ctx
, &irq_info
))) {
103 WARN(1, "Unable to get CXL IRQ Info: %i\n", result
);
107 dsisr
= irq_info
.dsisr
;
110 pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq
, ctx
->pe
, dsisr
, dar
);
112 if (dsisr
& CXL_PSL_DSISR_An_DS
) {
114 * We don't inherently need to sleep to handle this, but we do
115 * need to get a ref to the task's mm, which we can't do from
116 * irq context without the potential for a deadlock since it
117 * takes the task_lock. An alternate option would be to keep a
118 * reference to the task's mm the entire time it has cxl open,
119 * but to do that we need to solve the issue where we hold a
120 * ref to the mm, but the mm can hold a ref to the fd after an
121 * mmap preventing anything from being cleaned up.
123 pr_devel("Scheduling segment miss handling for later pe: %i\n", ctx
->pe
);
124 return schedule_cxl_fault(ctx
, dsisr
, dar
);
127 if (dsisr
& CXL_PSL_DSISR_An_M
)
128 pr_devel("CXL interrupt: PTE not found\n");
129 if (dsisr
& CXL_PSL_DSISR_An_P
)
130 pr_devel("CXL interrupt: Storage protection violation\n");
131 if (dsisr
& CXL_PSL_DSISR_An_A
)
132 pr_devel("CXL interrupt: AFU lock access to write through or cache inhibited storage\n");
133 if (dsisr
& CXL_PSL_DSISR_An_S
)
134 pr_devel("CXL interrupt: Access was afu_wr or afu_zero\n");
135 if (dsisr
& CXL_PSL_DSISR_An_K
)
136 pr_devel("CXL interrupt: Access not permitted by virtual page class key protection\n");
138 if (dsisr
& CXL_PSL_DSISR_An_DM
) {
140 * In some cases we might be able to handle the fault
141 * immediately if hash_page would succeed, but we still need
142 * the task's mm, which as above we can't get without a lock
144 pr_devel("Scheduling page fault handling for later pe: %i\n", ctx
->pe
);
145 return schedule_cxl_fault(ctx
, dsisr
, dar
);
147 if (dsisr
& CXL_PSL_DSISR_An_ST
)
148 WARN(1, "CXL interrupt: Segment Table PTE not found\n");
149 if (dsisr
& CXL_PSL_DSISR_An_UR
)
150 pr_devel("CXL interrupt: AURP PTE not found\n");
151 if (dsisr
& CXL_PSL_DSISR_An_PE
)
152 return handle_psl_slice_error(ctx
, dsisr
, irq_info
.errstat
);
153 if (dsisr
& CXL_PSL_DSISR_An_AE
) {
154 pr_devel("CXL interrupt: AFU Error %.llx\n", irq_info
.afu_err
);
156 if (ctx
->pending_afu_err
) {
158 * This shouldn't happen - the PSL treats these errors
159 * as fatal and will have reset the AFU, so there's not
160 * much point buffering multiple AFU errors.
161 * OTOH if we DO ever see a storm of these come in it's
162 * probably best that we log them somewhere:
164 dev_err_ratelimited(&ctx
->afu
->dev
, "CXL AFU Error "
165 "undelivered to pe %i: %.llx\n",
166 ctx
->pe
, irq_info
.afu_err
);
168 spin_lock(&ctx
->lock
);
169 ctx
->afu_err
= irq_info
.afu_err
;
170 ctx
->pending_afu_err
= 1;
171 spin_unlock(&ctx
->lock
);
173 wake_up_all(&ctx
->wq
);
176 cxl_ack_irq(ctx
, CXL_PSL_TFC_An_A
, 0);
178 if (dsisr
& CXL_PSL_DSISR_An_OC
)
179 pr_devel("CXL interrupt: OS Context Warning\n");
181 WARN(1, "Unhandled CXL PSL IRQ\n");
185 static irqreturn_t
cxl_irq_multiplexed(int irq
, void *data
)
187 struct cxl_afu
*afu
= data
;
188 struct cxl_context
*ctx
;
189 int ph
= cxl_p2n_read(afu
, CXL_PSL_PEHandle_An
) & 0xffff;
193 ctx
= idr_find(&afu
->contexts_idr
, ph
);
195 ret
= cxl_irq(irq
, ctx
);
201 WARN(1, "Unable to demultiplex CXL PSL IRQ\n");
205 static irqreturn_t
cxl_irq_afu(int irq
, void *data
)
207 struct cxl_context
*ctx
= data
;
208 irq_hw_number_t hwirq
= irqd_to_hwirq(irq_get_irq_data(irq
));
209 int irq_off
, afu_irq
= 1;
213 for (r
= 1; r
< CXL_IRQ_RANGES
; r
++) {
214 irq_off
= hwirq
- ctx
->irqs
.offset
[r
];
215 range
= ctx
->irqs
.range
[r
];
216 if (irq_off
>= 0 && irq_off
< range
) {
222 if (unlikely(r
>= CXL_IRQ_RANGES
)) {
223 WARN(1, "Recieved AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n",
224 ctx
->pe
, irq
, hwirq
);
228 pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n",
229 afu_irq
, ctx
->pe
, irq
, hwirq
);
231 if (unlikely(!ctx
->irq_bitmap
)) {
232 WARN(1, "Recieved AFU IRQ for context with no IRQ bitmap\n");
235 spin_lock(&ctx
->lock
);
236 set_bit(afu_irq
- 1, ctx
->irq_bitmap
);
237 ctx
->pending_irq
= true;
238 spin_unlock(&ctx
->lock
);
240 wake_up_all(&ctx
->wq
);
245 unsigned int cxl_map_irq(struct cxl
*adapter
, irq_hw_number_t hwirq
,
246 irq_handler_t handler
, void *cookie
)
252 virq
= irq_create_mapping(NULL
, hwirq
);
254 dev_warn(&adapter
->dev
, "cxl_map_irq: irq_create_mapping failed\n");
258 cxl_setup_irq(adapter
, hwirq
, virq
);
260 pr_devel("hwirq %#lx mapped to virq %u\n", hwirq
, virq
);
262 result
= request_irq(virq
, handler
, 0, "cxl", cookie
);
264 dev_warn(&adapter
->dev
, "cxl_map_irq: request_irq failed: %i\n", result
);
271 void cxl_unmap_irq(unsigned int virq
, void *cookie
)
273 free_irq(virq
, cookie
);
274 irq_dispose_mapping(virq
);
277 static int cxl_register_one_irq(struct cxl
*adapter
,
278 irq_handler_t handler
,
280 irq_hw_number_t
*dest_hwirq
,
281 unsigned int *dest_virq
)
285 if ((hwirq
= cxl_alloc_one_irq(adapter
)) < 0)
288 if (!(virq
= cxl_map_irq(adapter
, hwirq
, handler
, cookie
)))
297 cxl_release_one_irq(adapter
, hwirq
);
301 int cxl_register_psl_err_irq(struct cxl
*adapter
)
305 if ((rc
= cxl_register_one_irq(adapter
, cxl_irq_err
, adapter
,
307 &adapter
->err_virq
)))
310 cxl_p1_write(adapter
, CXL_PSL_ErrIVTE
, adapter
->err_hwirq
& 0xffff);
315 void cxl_release_psl_err_irq(struct cxl
*adapter
)
317 cxl_p1_write(adapter
, CXL_PSL_ErrIVTE
, 0x0000000000000000);
318 cxl_unmap_irq(adapter
->err_virq
, adapter
);
319 cxl_release_one_irq(adapter
, adapter
->err_hwirq
);
322 int cxl_register_serr_irq(struct cxl_afu
*afu
)
327 if ((rc
= cxl_register_one_irq(afu
->adapter
, cxl_slice_irq_err
, afu
,
332 serr
= cxl_p1n_read(afu
, CXL_PSL_SERR_An
);
333 serr
= (serr
& 0x00ffffffffff0000ULL
) | (afu
->serr_hwirq
& 0xffff);
334 cxl_p1n_write(afu
, CXL_PSL_SERR_An
, serr
);
339 void cxl_release_serr_irq(struct cxl_afu
*afu
)
341 cxl_p1n_write(afu
, CXL_PSL_SERR_An
, 0x0000000000000000);
342 cxl_unmap_irq(afu
->serr_virq
, afu
);
343 cxl_release_one_irq(afu
->adapter
, afu
->serr_hwirq
);
346 int cxl_register_psl_irq(struct cxl_afu
*afu
)
348 return cxl_register_one_irq(afu
->adapter
, cxl_irq_multiplexed
, afu
,
349 &afu
->psl_hwirq
, &afu
->psl_virq
);
352 void cxl_release_psl_irq(struct cxl_afu
*afu
)
354 cxl_unmap_irq(afu
->psl_virq
, afu
);
355 cxl_release_one_irq(afu
->adapter
, afu
->psl_hwirq
);
358 int afu_register_irqs(struct cxl_context
*ctx
, u32 count
)
360 irq_hw_number_t hwirq
;
363 if ((rc
= cxl_alloc_irq_ranges(&ctx
->irqs
, ctx
->afu
->adapter
, count
)))
366 /* Multiplexed PSL Interrupt */
367 ctx
->irqs
.offset
[0] = ctx
->afu
->psl_hwirq
;
368 ctx
->irqs
.range
[0] = 1;
370 ctx
->irq_count
= count
;
371 ctx
->irq_bitmap
= kcalloc(BITS_TO_LONGS(count
),
372 sizeof(*ctx
->irq_bitmap
), GFP_KERNEL
);
373 if (!ctx
->irq_bitmap
)
375 for (r
= 1; r
< CXL_IRQ_RANGES
; r
++) {
376 hwirq
= ctx
->irqs
.offset
[r
];
377 for (i
= 0; i
< ctx
->irqs
.range
[r
]; hwirq
++, i
++) {
378 cxl_map_irq(ctx
->afu
->adapter
, hwirq
,
386 void afu_release_irqs(struct cxl_context
*ctx
)
388 irq_hw_number_t hwirq
;
392 for (r
= 1; r
< CXL_IRQ_RANGES
; r
++) {
393 hwirq
= ctx
->irqs
.offset
[r
];
394 for (i
= 0; i
< ctx
->irqs
.range
[r
]; hwirq
++, i
++) {
395 virq
= irq_find_mapping(NULL
, hwirq
);
397 cxl_unmap_irq(virq
, ctx
);
401 cxl_release_irq_ranges(&ctx
->irqs
, ctx
->afu
->adapter
);
This page took 0.221139 seconds and 5 git commands to generate.