2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/spinlock.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 #include <linux/sched.h>
14 #include <linux/mutex.h>
16 #include <linux/uaccess.h>
17 #include <asm/synch.h>
22 static int afu_control(struct cxl_afu
*afu
, u64 command
,
23 u64 result
, u64 mask
, bool enabled
)
25 u64 AFU_Cntl
= cxl_p2n_read(afu
, CXL_AFU_Cntl_An
);
26 unsigned long timeout
= jiffies
+ (HZ
* CXL_TIMEOUT
);
28 spin_lock(&afu
->afu_cntl_lock
);
29 pr_devel("AFU command starting: %llx\n", command
);
31 cxl_p2n_write(afu
, CXL_AFU_Cntl_An
, AFU_Cntl
| command
);
33 AFU_Cntl
= cxl_p2n_read(afu
, CXL_AFU_Cntl_An
);
34 while ((AFU_Cntl
& mask
) != result
) {
35 if (time_after_eq(jiffies
, timeout
)) {
36 dev_warn(&afu
->dev
, "WARNING: AFU control timed out!\n");
37 spin_unlock(&afu
->afu_cntl_lock
);
40 pr_devel_ratelimited("AFU control... (0x%.16llx)\n",
43 AFU_Cntl
= cxl_p2n_read(afu
, CXL_AFU_Cntl_An
);
45 pr_devel("AFU command complete: %llx\n", command
);
46 afu
->enabled
= enabled
;
47 spin_unlock(&afu
->afu_cntl_lock
);
52 static int afu_enable(struct cxl_afu
*afu
)
54 pr_devel("AFU enable request\n");
56 return afu_control(afu
, CXL_AFU_Cntl_An_E
,
57 CXL_AFU_Cntl_An_ES_Enabled
,
58 CXL_AFU_Cntl_An_ES_MASK
, true);
61 int cxl_afu_disable(struct cxl_afu
*afu
)
63 pr_devel("AFU disable request\n");
65 return afu_control(afu
, 0, CXL_AFU_Cntl_An_ES_Disabled
,
66 CXL_AFU_Cntl_An_ES_MASK
, false);
69 /* This will disable as well as reset */
70 int cxl_afu_reset(struct cxl_afu
*afu
)
72 pr_devel("AFU reset request\n");
74 return afu_control(afu
, CXL_AFU_Cntl_An_RA
,
75 CXL_AFU_Cntl_An_RS_Complete
| CXL_AFU_Cntl_An_ES_Disabled
,
76 CXL_AFU_Cntl_An_RS_MASK
| CXL_AFU_Cntl_An_ES_MASK
,
80 static int afu_check_and_enable(struct cxl_afu
*afu
)
84 return afu_enable(afu
);
87 int cxl_psl_purge(struct cxl_afu
*afu
)
89 u64 PSL_CNTL
= cxl_p1n_read(afu
, CXL_PSL_SCNTL_An
);
90 u64 AFU_Cntl
= cxl_p2n_read(afu
, CXL_AFU_Cntl_An
);
93 unsigned long timeout
= jiffies
+ (HZ
* CXL_TIMEOUT
);
95 pr_devel("PSL purge request\n");
97 if ((AFU_Cntl
& CXL_AFU_Cntl_An_ES_MASK
) != CXL_AFU_Cntl_An_ES_Disabled
) {
98 WARN(1, "psl_purge request while AFU not disabled!\n");
102 cxl_p1n_write(afu
, CXL_PSL_SCNTL_An
,
103 PSL_CNTL
| CXL_PSL_SCNTL_An_Pc
);
104 start
= local_clock();
105 PSL_CNTL
= cxl_p1n_read(afu
, CXL_PSL_SCNTL_An
);
106 while ((PSL_CNTL
& CXL_PSL_SCNTL_An_Ps_MASK
)
107 == CXL_PSL_SCNTL_An_Ps_Pending
) {
108 if (time_after_eq(jiffies
, timeout
)) {
109 dev_warn(&afu
->dev
, "WARNING: PSL Purge timed out!\n");
112 dsisr
= cxl_p2n_read(afu
, CXL_PSL_DSISR_An
);
113 pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%.16llx PSL_DSISR: 0x%.16llx\n", PSL_CNTL
, dsisr
);
114 if (dsisr
& CXL_PSL_DSISR_TRANS
) {
115 dar
= cxl_p2n_read(afu
, CXL_PSL_DAR_An
);
116 dev_notice(&afu
->dev
, "PSL purge terminating pending translation, DSISR: 0x%.16llx, DAR: 0x%.16llx\n", dsisr
, dar
);
117 cxl_p2n_write(afu
, CXL_PSL_TFC_An
, CXL_PSL_TFC_An_AE
);
119 dev_notice(&afu
->dev
, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%.16llx\n", dsisr
);
120 cxl_p2n_write(afu
, CXL_PSL_TFC_An
, CXL_PSL_TFC_An_A
);
124 PSL_CNTL
= cxl_p1n_read(afu
, CXL_PSL_SCNTL_An
);
127 pr_devel("PSL purged in %lld ns\n", end
- start
);
129 cxl_p1n_write(afu
, CXL_PSL_SCNTL_An
,
130 PSL_CNTL
& ~CXL_PSL_SCNTL_An_Pc
);
134 static int spa_max_procs(int spa_size
)
138 * end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255
139 * Most of that junk is really just an overly-complicated way of saying
140 * the last 256 bytes are __aligned(128), so it's really:
141 * end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255
143 * end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1
145 * sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256
146 * Ignore the alignment (which is safe in this case as long as we are
147 * careful with our rounding) and solve for n:
149 return ((spa_size
/ 8) - 96) / 17;
152 static int alloc_spa(struct cxl_afu
*afu
)
156 /* Work out how many pages to allocate */
160 afu
->spa_size
= (1 << afu
->spa_order
) * PAGE_SIZE
;
161 afu
->spa_max_procs
= spa_max_procs(afu
->spa_size
);
162 } while (afu
->spa_max_procs
< afu
->num_procs
);
164 WARN_ON(afu
->spa_size
> 0x100000); /* Max size supported by the hardware */
166 if (!(afu
->spa
= (struct cxl_process_element
*)
167 __get_free_pages(GFP_KERNEL
| __GFP_ZERO
, afu
->spa_order
))) {
168 pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
171 pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n",
172 1<<afu
->spa_order
, afu
->spa_max_procs
, afu
->num_procs
);
174 afu
->sw_command_status
= (__be64
*)((char *)afu
->spa
+
175 ((afu
->spa_max_procs
+ 3) * 128));
177 spap
= virt_to_phys(afu
->spa
) & CXL_PSL_SPAP_Addr
;
178 spap
|= ((afu
->spa_size
>> (12 - CXL_PSL_SPAP_Size_Shift
)) - 1) & CXL_PSL_SPAP_Size
;
179 spap
|= CXL_PSL_SPAP_V
;
180 pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", afu
->spa
, afu
->spa_max_procs
, afu
->sw_command_status
, spap
);
181 cxl_p1n_write(afu
, CXL_PSL_SPAP_An
, spap
);
186 static void release_spa(struct cxl_afu
*afu
)
188 free_pages((unsigned long) afu
->spa
, afu
->spa_order
);
191 int cxl_tlb_slb_invalidate(struct cxl
*adapter
)
193 unsigned long timeout
= jiffies
+ (HZ
* CXL_TIMEOUT
);
195 pr_devel("CXL adapter wide TLBIA & SLBIA\n");
197 cxl_p1_write(adapter
, CXL_PSL_AFUSEL
, CXL_PSL_AFUSEL_A
);
199 cxl_p1_write(adapter
, CXL_PSL_TLBIA
, CXL_TLB_SLB_IQ_ALL
);
200 while (cxl_p1_read(adapter
, CXL_PSL_TLBIA
) & CXL_TLB_SLB_P
) {
201 if (time_after_eq(jiffies
, timeout
)) {
202 dev_warn(&adapter
->dev
, "WARNING: CXL adapter wide TLBIA timed out!\n");
208 cxl_p1_write(adapter
, CXL_PSL_SLBIA
, CXL_TLB_SLB_IQ_ALL
);
209 while (cxl_p1_read(adapter
, CXL_PSL_SLBIA
) & CXL_TLB_SLB_P
) {
210 if (time_after_eq(jiffies
, timeout
)) {
211 dev_warn(&adapter
->dev
, "WARNING: CXL adapter wide SLBIA timed out!\n");
219 int cxl_afu_slbia(struct cxl_afu
*afu
)
221 unsigned long timeout
= jiffies
+ (HZ
* CXL_TIMEOUT
);
223 pr_devel("cxl_afu_slbia issuing SLBIA command\n");
224 cxl_p2n_write(afu
, CXL_SLBIA_An
, CXL_TLB_SLB_IQ_ALL
);
225 while (cxl_p2n_read(afu
, CXL_SLBIA_An
) & CXL_TLB_SLB_P
) {
226 if (time_after_eq(jiffies
, timeout
)) {
227 dev_warn(&afu
->dev
, "WARNING: CXL AFU SLBIA timed out!\n");
235 static int cxl_write_sstp(struct cxl_afu
*afu
, u64 sstp0
, u64 sstp1
)
239 /* 1. Disable SSTP by writing 0 to SSTP1[V] */
240 cxl_p2n_write(afu
, CXL_SSTP1_An
, 0);
242 /* 2. Invalidate all SLB entries */
243 if ((rc
= cxl_afu_slbia(afu
)))
246 /* 3. Set SSTP0_An */
247 cxl_p2n_write(afu
, CXL_SSTP0_An
, sstp0
);
249 /* 4. Set SSTP1_An */
250 cxl_p2n_write(afu
, CXL_SSTP1_An
, sstp1
);
255 /* Using per slice version may improve performance here. (ie. SLBIA_An) */
256 static void slb_invalid(struct cxl_context
*ctx
)
258 struct cxl
*adapter
= ctx
->afu
->adapter
;
261 WARN_ON(!mutex_is_locked(&ctx
->afu
->spa_mutex
));
263 cxl_p1_write(adapter
, CXL_PSL_LBISEL
,
264 ((u64
)be32_to_cpu(ctx
->elem
->common
.pid
) << 32) |
265 be32_to_cpu(ctx
->elem
->lpid
));
266 cxl_p1_write(adapter
, CXL_PSL_SLBIA
, CXL_TLB_SLB_IQ_LPIDPID
);
269 slbia
= cxl_p1_read(adapter
, CXL_PSL_SLBIA
);
270 if (!(slbia
& CXL_TLB_SLB_P
))
276 static int do_process_element_cmd(struct cxl_context
*ctx
,
277 u64 cmd
, u64 pe_state
)
281 WARN_ON(!ctx
->afu
->enabled
);
283 ctx
->elem
->software_state
= cpu_to_be32(pe_state
);
285 *(ctx
->afu
->sw_command_status
) = cpu_to_be64(cmd
| 0 | ctx
->pe
);
287 cxl_p1n_write(ctx
->afu
, CXL_PSL_LLCMD_An
, cmd
| ctx
->pe
);
289 state
= be64_to_cpup(ctx
->afu
->sw_command_status
);
290 if (state
== ~0ULL) {
291 pr_err("cxl: Error adding process element to AFU\n");
294 if ((state
& (CXL_SPA_SW_CMD_MASK
| CXL_SPA_SW_STATE_MASK
| CXL_SPA_SW_LINK_MASK
)) ==
295 (cmd
| (cmd
>> 16) | ctx
->pe
))
298 * The command won't finish in the PSL if there are
299 * outstanding DSIs. Hence we need to yield here in
300 * case there are outstanding DSIs that we need to
301 * service. Tuning possiblity: we could wait for a
310 static int add_process_element(struct cxl_context
*ctx
)
314 mutex_lock(&ctx
->afu
->spa_mutex
);
315 pr_devel("%s Adding pe: %i started\n", __func__
, ctx
->pe
);
316 if (!(rc
= do_process_element_cmd(ctx
, CXL_SPA_SW_CMD_ADD
, CXL_PE_SOFTWARE_STATE_V
)))
317 ctx
->pe_inserted
= true;
318 pr_devel("%s Adding pe: %i finished\n", __func__
, ctx
->pe
);
319 mutex_unlock(&ctx
->afu
->spa_mutex
);
323 static int terminate_process_element(struct cxl_context
*ctx
)
327 /* fast path terminate if it's already invalid */
328 if (!(ctx
->elem
->software_state
& cpu_to_be32(CXL_PE_SOFTWARE_STATE_V
)))
331 mutex_lock(&ctx
->afu
->spa_mutex
);
332 pr_devel("%s Terminate pe: %i started\n", __func__
, ctx
->pe
);
333 rc
= do_process_element_cmd(ctx
, CXL_SPA_SW_CMD_TERMINATE
,
334 CXL_PE_SOFTWARE_STATE_V
| CXL_PE_SOFTWARE_STATE_T
);
335 ctx
->elem
->software_state
= 0; /* Remove Valid bit */
336 pr_devel("%s Terminate pe: %i finished\n", __func__
, ctx
->pe
);
337 mutex_unlock(&ctx
->afu
->spa_mutex
);
341 static int remove_process_element(struct cxl_context
*ctx
)
345 mutex_lock(&ctx
->afu
->spa_mutex
);
346 pr_devel("%s Remove pe: %i started\n", __func__
, ctx
->pe
);
347 if (!(rc
= do_process_element_cmd(ctx
, CXL_SPA_SW_CMD_REMOVE
, 0)))
348 ctx
->pe_inserted
= false;
350 pr_devel("%s Remove pe: %i finished\n", __func__
, ctx
->pe
);
351 mutex_unlock(&ctx
->afu
->spa_mutex
);
357 static void assign_psn_space(struct cxl_context
*ctx
)
359 if (!ctx
->afu
->pp_size
|| ctx
->master
) {
360 ctx
->psn_phys
= ctx
->afu
->psn_phys
;
361 ctx
->psn_size
= ctx
->afu
->adapter
->ps_size
;
363 ctx
->psn_phys
= ctx
->afu
->psn_phys
+
364 (ctx
->afu
->pp_offset
+ ctx
->afu
->pp_size
* ctx
->pe
);
365 ctx
->psn_size
= ctx
->afu
->pp_size
;
369 static int activate_afu_directed(struct cxl_afu
*afu
)
373 dev_info(&afu
->dev
, "Activating AFU directed mode\n");
378 cxl_p1n_write(afu
, CXL_PSL_SCNTL_An
, CXL_PSL_SCNTL_An_PM_AFU
);
379 cxl_p1n_write(afu
, CXL_PSL_AMOR_An
, 0xFFFFFFFFFFFFFFFFULL
);
380 cxl_p1n_write(afu
, CXL_PSL_ID_An
, CXL_PSL_ID_An_F
| CXL_PSL_ID_An_L
);
382 afu
->current_mode
= CXL_MODE_DIRECTED
;
383 afu
->num_procs
= afu
->max_procs_virtualised
;
385 if ((rc
= cxl_chardev_m_afu_add(afu
)))
388 if ((rc
= cxl_sysfs_afu_m_add(afu
)))
391 if ((rc
= cxl_chardev_s_afu_add(afu
)))
396 cxl_sysfs_afu_m_remove(afu
);
398 cxl_chardev_afu_remove(afu
);
402 #ifdef CONFIG_CPU_LITTLE_ENDIAN
403 #define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE)
405 #define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE))
408 static int attach_afu_directed(struct cxl_context
*ctx
, u64 wed
, u64 amr
)
413 assign_psn_space(ctx
);
415 ctx
->elem
->ctxtime
= 0; /* disable */
416 ctx
->elem
->lpid
= cpu_to_be32(mfspr(SPRN_LPID
));
417 ctx
->elem
->haurp
= 0; /* disable */
418 ctx
->elem
->sdr
= cpu_to_be64(mfspr(SPRN_SDR1
));
420 sr
= CXL_PSL_SR_An_SC
;
422 sr
|= CXL_PSL_SR_An_MP
;
423 if (mfspr(SPRN_LPCR
) & LPCR_TC
)
424 sr
|= CXL_PSL_SR_An_TC
;
425 /* HV=0, PR=1, R=1 for userspace
426 * For kernel contexts: this would need to change
428 sr
|= CXL_PSL_SR_An_PR
| CXL_PSL_SR_An_R
;
430 sr
&= ~(CXL_PSL_SR_An_HV
);
431 if (!test_tsk_thread_flag(current
, TIF_32BIT
))
432 sr
|= CXL_PSL_SR_An_SF
;
433 ctx
->elem
->common
.pid
= cpu_to_be32(current
->pid
);
434 ctx
->elem
->common
.tid
= 0;
435 ctx
->elem
->sr
= cpu_to_be64(sr
);
437 ctx
->elem
->common
.csrp
= 0; /* disable */
438 ctx
->elem
->common
.aurp0
= 0; /* disable */
439 ctx
->elem
->common
.aurp1
= 0; /* disable */
441 cxl_prefault(ctx
, wed
);
443 ctx
->elem
->common
.sstp0
= cpu_to_be64(ctx
->sstp0
);
444 ctx
->elem
->common
.sstp1
= cpu_to_be64(ctx
->sstp1
);
446 for (r
= 0; r
< CXL_IRQ_RANGES
; r
++) {
447 ctx
->elem
->ivte_offsets
[r
] = cpu_to_be16(ctx
->irqs
.offset
[r
]);
448 ctx
->elem
->ivte_ranges
[r
] = cpu_to_be16(ctx
->irqs
.range
[r
]);
451 ctx
->elem
->common
.amr
= cpu_to_be64(amr
);
452 ctx
->elem
->common
.wed
= cpu_to_be64(wed
);
454 /* first guy needs to enable */
455 if ((result
= afu_check_and_enable(ctx
->afu
)))
458 add_process_element(ctx
);
463 static int deactivate_afu_directed(struct cxl_afu
*afu
)
465 dev_info(&afu
->dev
, "Deactivating AFU directed mode\n");
467 afu
->current_mode
= 0;
470 cxl_sysfs_afu_m_remove(afu
);
471 cxl_chardev_afu_remove(afu
);
474 cxl_afu_disable(afu
);
482 static int activate_dedicated_process(struct cxl_afu
*afu
)
484 dev_info(&afu
->dev
, "Activating dedicated process mode\n");
486 cxl_p1n_write(afu
, CXL_PSL_SCNTL_An
, CXL_PSL_SCNTL_An_PM_Process
);
488 cxl_p1n_write(afu
, CXL_PSL_CtxTime_An
, 0); /* disable */
489 cxl_p1n_write(afu
, CXL_PSL_SPAP_An
, 0); /* disable */
490 cxl_p1n_write(afu
, CXL_PSL_AMOR_An
, 0xFFFFFFFFFFFFFFFFULL
);
491 cxl_p1n_write(afu
, CXL_PSL_LPID_An
, mfspr(SPRN_LPID
));
492 cxl_p1n_write(afu
, CXL_HAURP_An
, 0); /* disable */
493 cxl_p1n_write(afu
, CXL_PSL_SDR_An
, mfspr(SPRN_SDR1
));
495 cxl_p2n_write(afu
, CXL_CSRP_An
, 0); /* disable */
496 cxl_p2n_write(afu
, CXL_AURP0_An
, 0); /* disable */
497 cxl_p2n_write(afu
, CXL_AURP1_An
, 0); /* disable */
499 afu
->current_mode
= CXL_MODE_DEDICATED
;
502 return cxl_chardev_d_afu_add(afu
);
505 static int attach_dedicated(struct cxl_context
*ctx
, u64 wed
, u64 amr
)
507 struct cxl_afu
*afu
= ctx
->afu
;
511 sr
= CXL_PSL_SR_An_SC
;
514 sr
|= CXL_PSL_SR_An_MP
;
515 if (mfspr(SPRN_LPCR
) & LPCR_TC
)
516 sr
|= CXL_PSL_SR_An_TC
;
517 sr
|= CXL_PSL_SR_An_PR
| CXL_PSL_SR_An_R
;
518 if (!test_tsk_thread_flag(current
, TIF_32BIT
))
519 sr
|= CXL_PSL_SR_An_SF
;
520 cxl_p2n_write(afu
, CXL_PSL_PID_TID_An
, (u64
)current
->pid
<< 32);
521 cxl_p1n_write(afu
, CXL_PSL_SR_An
, sr
);
523 if ((rc
= cxl_write_sstp(afu
, ctx
->sstp0
, ctx
->sstp1
)))
526 cxl_prefault(ctx
, wed
);
528 cxl_p1n_write(afu
, CXL_PSL_IVTE_Offset_An
,
529 (((u64
)ctx
->irqs
.offset
[0] & 0xffff) << 48) |
530 (((u64
)ctx
->irqs
.offset
[1] & 0xffff) << 32) |
531 (((u64
)ctx
->irqs
.offset
[2] & 0xffff) << 16) |
532 ((u64
)ctx
->irqs
.offset
[3] & 0xffff));
533 cxl_p1n_write(afu
, CXL_PSL_IVTE_Limit_An
, (u64
)
534 (((u64
)ctx
->irqs
.range
[0] & 0xffff) << 48) |
535 (((u64
)ctx
->irqs
.range
[1] & 0xffff) << 32) |
536 (((u64
)ctx
->irqs
.range
[2] & 0xffff) << 16) |
537 ((u64
)ctx
->irqs
.range
[3] & 0xffff));
539 cxl_p2n_write(afu
, CXL_PSL_AMR_An
, amr
);
541 /* master only context for dedicated */
542 assign_psn_space(ctx
);
544 if ((rc
= cxl_afu_reset(afu
)))
547 cxl_p2n_write(afu
, CXL_PSL_WED_An
, wed
);
549 return afu_enable(afu
);
552 static int deactivate_dedicated_process(struct cxl_afu
*afu
)
554 dev_info(&afu
->dev
, "Deactivating dedicated process mode\n");
556 afu
->current_mode
= 0;
559 cxl_chardev_afu_remove(afu
);
564 int _cxl_afu_deactivate_mode(struct cxl_afu
*afu
, int mode
)
566 if (mode
== CXL_MODE_DIRECTED
)
567 return deactivate_afu_directed(afu
);
568 if (mode
== CXL_MODE_DEDICATED
)
569 return deactivate_dedicated_process(afu
);
573 int cxl_afu_deactivate_mode(struct cxl_afu
*afu
)
575 return _cxl_afu_deactivate_mode(afu
, afu
->current_mode
);
578 int cxl_afu_activate_mode(struct cxl_afu
*afu
, int mode
)
582 if (!(mode
& afu
->modes_supported
))
585 if (mode
== CXL_MODE_DIRECTED
)
586 return activate_afu_directed(afu
);
587 if (mode
== CXL_MODE_DEDICATED
)
588 return activate_dedicated_process(afu
);
593 int cxl_attach_process(struct cxl_context
*ctx
, bool kernel
, u64 wed
, u64 amr
)
595 ctx
->kernel
= kernel
;
596 if (ctx
->afu
->current_mode
== CXL_MODE_DIRECTED
)
597 return attach_afu_directed(ctx
, wed
, amr
);
599 if (ctx
->afu
->current_mode
== CXL_MODE_DEDICATED
)
600 return attach_dedicated(ctx
, wed
, amr
);
605 static inline int detach_process_native_dedicated(struct cxl_context
*ctx
)
607 cxl_afu_reset(ctx
->afu
);
608 cxl_afu_disable(ctx
->afu
);
609 cxl_psl_purge(ctx
->afu
);
614 * TODO: handle case when this is called inside a rcu_read_lock() which may
615 * happen when we unbind the driver (ie. cxl_context_detach_all()) . Terminate
616 * & remove use a mutex lock and schedule which will not good with lock held.
617 * May need to write do_process_element_cmd() that handles outstanding page
618 * faults synchronously.
620 static inline int detach_process_native_afu_directed(struct cxl_context
*ctx
)
622 if (!ctx
->pe_inserted
)
624 if (terminate_process_element(ctx
))
626 if (remove_process_element(ctx
))
632 int cxl_detach_process(struct cxl_context
*ctx
)
634 if (ctx
->afu
->current_mode
== CXL_MODE_DEDICATED
)
635 return detach_process_native_dedicated(ctx
);
637 return detach_process_native_afu_directed(ctx
);
640 int cxl_get_irq(struct cxl_context
*ctx
, struct cxl_irq_info
*info
)
644 info
->dsisr
= cxl_p2n_read(ctx
->afu
, CXL_PSL_DSISR_An
);
645 info
->dar
= cxl_p2n_read(ctx
->afu
, CXL_PSL_DAR_An
);
646 info
->dsr
= cxl_p2n_read(ctx
->afu
, CXL_PSL_DSR_An
);
647 pidtid
= cxl_p2n_read(ctx
->afu
, CXL_PSL_PID_TID_An
);
648 info
->pid
= pidtid
>> 32;
649 info
->tid
= pidtid
& 0xffffffff;
650 info
->afu_err
= cxl_p2n_read(ctx
->afu
, CXL_AFU_ERR_An
);
651 info
->errstat
= cxl_p2n_read(ctx
->afu
, CXL_PSL_ErrStat_An
);
656 static void recover_psl_err(struct cxl_afu
*afu
, u64 errstat
)
660 pr_devel("RECOVERING FROM PSL ERROR... (0x%.16llx)\n", errstat
);
662 /* Clear PSL_DSISR[PE] */
663 dsisr
= cxl_p2n_read(afu
, CXL_PSL_DSISR_An
);
664 cxl_p2n_write(afu
, CXL_PSL_DSISR_An
, dsisr
& ~CXL_PSL_DSISR_An_PE
);
666 /* Write 1s to clear error status bits */
667 cxl_p2n_write(afu
, CXL_PSL_ErrStat_An
, errstat
);
670 int cxl_ack_irq(struct cxl_context
*ctx
, u64 tfc
, u64 psl_reset_mask
)
673 cxl_p2n_write(ctx
->afu
, CXL_PSL_TFC_An
, tfc
);
675 recover_psl_err(ctx
->afu
, psl_reset_mask
);
680 int cxl_check_error(struct cxl_afu
*afu
)
682 return (cxl_p1n_read(afu
, CXL_PSL_SCNTL_An
) == ~0ULL);