Commit | Line | Data |
---|---|---|
f204e0b8 IM |
1 | /* |
2 | * Copyright 2014 IBM Corp. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | */ | |
9 | ||
10 | #include <linux/workqueue.h> | |
11 | #include <linux/sched.h> | |
12 | #include <linux/pid.h> | |
13 | #include <linux/mm.h> | |
14 | #include <linux/moduleparam.h> | |
15 | ||
16 | #undef MODULE_PARAM_PREFIX | |
17 | #define MODULE_PARAM_PREFIX "cxl" "." | |
18 | #include <asm/current.h> | |
19 | #include <asm/copro.h> | |
20 | #include <asm/mmu.h> | |
21 | ||
22 | #include "cxl.h" | |
23 | ||
eb01d4c2 IM |
24 | static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb) |
25 | { | |
26 | return ((sste->vsid_data == cpu_to_be64(slb->vsid)) && | |
27 | (sste->esid_data == cpu_to_be64(slb->esid))); | |
28 | } | |
29 | ||
30 | /* | |
31 | * This finds a free SSTE for the given SLB, or returns NULL if it's already in | |
32 | * the segment table. | |
33 | */ | |
b03a7f57 IM |
34 | static struct cxl_sste* find_free_sste(struct cxl_context *ctx, |
35 | struct copro_slb *slb) | |
f204e0b8 | 36 | { |
eb01d4c2 | 37 | struct cxl_sste *primary, *sste, *ret = NULL; |
b03a7f57 | 38 | unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */ |
5100a9d6 | 39 | unsigned int entry; |
b03a7f57 IM |
40 | unsigned int hash; |
41 | ||
42 | if (slb->vsid & SLB_VSID_B_1T) | |
43 | hash = (slb->esid >> SID_SHIFT_1T) & mask; | |
44 | else /* 256M */ | |
45 | hash = (slb->esid >> SID_SHIFT) & mask; | |
f204e0b8 | 46 | |
b03a7f57 IM |
47 | primary = ctx->sstp + (hash << 3); |
48 | ||
49 | for (entry = 0, sste = primary; entry < 8; entry++, sste++) { | |
eb01d4c2 IM |
50 | if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V)) |
51 | ret = sste; | |
52 | if (sste_matches(sste, slb)) | |
53 | return NULL; | |
f204e0b8 | 54 | } |
eb01d4c2 IM |
55 | if (ret) |
56 | return ret; | |
b03a7f57 | 57 | |
f204e0b8 | 58 | /* Nothing free, select an entry to cast out */ |
eb01d4c2 | 59 | ret = primary + ctx->sst_lru; |
b03a7f57 | 60 | ctx->sst_lru = (ctx->sst_lru + 1) & 0x7; |
f204e0b8 | 61 | |
eb01d4c2 | 62 | return ret; |
f204e0b8 IM |
63 | } |
64 | ||
65 | static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb) | |
66 | { | |
67 | /* mask is the group index, we search primary and secondary here. */ | |
f204e0b8 | 68 | struct cxl_sste *sste; |
f204e0b8 IM |
69 | unsigned long flags; |
70 | ||
f204e0b8 | 71 | spin_lock_irqsave(&ctx->sste_lock, flags); |
b03a7f57 | 72 | sste = find_free_sste(ctx, slb); |
eb01d4c2 IM |
73 | if (!sste) |
74 | goto out_unlock; | |
f204e0b8 IM |
75 | |
76 | pr_devel("CXL Populating SST[%li]: %#llx %#llx\n", | |
77 | sste - ctx->sstp, slb->vsid, slb->esid); | |
78 | ||
79 | sste->vsid_data = cpu_to_be64(slb->vsid); | |
80 | sste->esid_data = cpu_to_be64(slb->esid); | |
eb01d4c2 | 81 | out_unlock: |
f204e0b8 IM |
82 | spin_unlock_irqrestore(&ctx->sste_lock, flags); |
83 | } | |
84 | ||
85 | static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm, | |
86 | u64 ea) | |
87 | { | |
88 | struct copro_slb slb = {0,0}; | |
89 | int rc; | |
90 | ||
91 | if (!(rc = copro_calculate_slb(mm, ea, &slb))) { | |
92 | cxl_load_segment(ctx, &slb); | |
93 | } | |
94 | ||
95 | return rc; | |
96 | } | |
97 | ||
98 | static void cxl_ack_ae(struct cxl_context *ctx) | |
99 | { | |
100 | unsigned long flags; | |
101 | ||
102 | cxl_ack_irq(ctx, CXL_PSL_TFC_An_AE, 0); | |
103 | ||
104 | spin_lock_irqsave(&ctx->lock, flags); | |
105 | ctx->pending_fault = true; | |
106 | ctx->fault_addr = ctx->dar; | |
107 | ctx->fault_dsisr = ctx->dsisr; | |
108 | spin_unlock_irqrestore(&ctx->lock, flags); | |
109 | ||
110 | wake_up_all(&ctx->wq); | |
111 | } | |
112 | ||
113 | static int cxl_handle_segment_miss(struct cxl_context *ctx, | |
114 | struct mm_struct *mm, u64 ea) | |
115 | { | |
116 | int rc; | |
117 | ||
118 | pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea); | |
119 | ||
120 | if ((rc = cxl_fault_segment(ctx, mm, ea))) | |
121 | cxl_ack_ae(ctx); | |
122 | else { | |
123 | ||
124 | mb(); /* Order seg table write to TFC MMIO write */ | |
125 | cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0); | |
126 | } | |
127 | ||
128 | return IRQ_HANDLED; | |
129 | } | |
130 | ||
131 | static void cxl_handle_page_fault(struct cxl_context *ctx, | |
132 | struct mm_struct *mm, u64 dsisr, u64 dar) | |
133 | { | |
134 | unsigned flt = 0; | |
135 | int result; | |
136 | unsigned long access, flags; | |
137 | ||
138 | if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { | |
139 | pr_devel("copro_handle_mm_fault failed: %#x\n", result); | |
140 | return cxl_ack_ae(ctx); | |
141 | } | |
142 | ||
143 | /* | |
144 | * update_mmu_cache() will not have loaded the hash since current->trap | |
145 | * is not a 0x400 or 0x300, so just call hash_page_mm() here. | |
146 | */ | |
147 | access = _PAGE_PRESENT; | |
148 | if (dsisr & CXL_PSL_DSISR_An_S) | |
149 | access |= _PAGE_RW; | |
150 | if ((!ctx->kernel) || ~(dar & (1ULL << 63))) | |
151 | access |= _PAGE_USER; | |
152 | local_irq_save(flags); | |
153 | hash_page_mm(mm, dar, access, 0x300); | |
154 | local_irq_restore(flags); | |
155 | ||
156 | pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe); | |
157 | cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0); | |
158 | } | |
159 | ||
160 | void cxl_handle_fault(struct work_struct *fault_work) | |
161 | { | |
162 | struct cxl_context *ctx = | |
163 | container_of(fault_work, struct cxl_context, fault_work); | |
164 | u64 dsisr = ctx->dsisr; | |
165 | u64 dar = ctx->dar; | |
166 | struct task_struct *task; | |
167 | struct mm_struct *mm; | |
168 | ||
169 | if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr || | |
170 | cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar || | |
171 | cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) { | |
172 | /* Most likely explanation is harmless - a dedicated process | |
173 | * has detached and these were cleared by the PSL purge, but | |
174 | * warn about it just in case */ | |
175 | dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n"); | |
176 | return; | |
177 | } | |
178 | ||
179 | pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. " | |
180 | "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar); | |
181 | ||
182 | if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { | |
183 | pr_devel("cxl_handle_fault unable to get task %i\n", | |
184 | pid_nr(ctx->pid)); | |
185 | cxl_ack_ae(ctx); | |
186 | return; | |
187 | } | |
188 | if (!(mm = get_task_mm(task))) { | |
189 | pr_devel("cxl_handle_fault unable to get mm %i\n", | |
190 | pid_nr(ctx->pid)); | |
191 | cxl_ack_ae(ctx); | |
192 | goto out; | |
193 | } | |
194 | ||
195 | if (dsisr & CXL_PSL_DSISR_An_DS) | |
196 | cxl_handle_segment_miss(ctx, mm, dar); | |
197 | else if (dsisr & CXL_PSL_DSISR_An_DM) | |
198 | cxl_handle_page_fault(ctx, mm, dsisr, dar); | |
199 | else | |
200 | WARN(1, "cxl_handle_fault has nothing to handle\n"); | |
201 | ||
202 | mmput(mm); | |
203 | out: | |
204 | put_task_struct(task); | |
205 | } | |
206 | ||
207 | static void cxl_prefault_one(struct cxl_context *ctx, u64 ea) | |
208 | { | |
209 | int rc; | |
210 | struct task_struct *task; | |
211 | struct mm_struct *mm; | |
212 | ||
213 | if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { | |
214 | pr_devel("cxl_prefault_one unable to get task %i\n", | |
215 | pid_nr(ctx->pid)); | |
216 | return; | |
217 | } | |
218 | if (!(mm = get_task_mm(task))) { | |
219 | pr_devel("cxl_prefault_one unable to get mm %i\n", | |
220 | pid_nr(ctx->pid)); | |
221 | put_task_struct(task); | |
222 | return; | |
223 | } | |
224 | ||
225 | rc = cxl_fault_segment(ctx, mm, ea); | |
226 | ||
227 | mmput(mm); | |
228 | put_task_struct(task); | |
229 | } | |
230 | ||
231 | static u64 next_segment(u64 ea, u64 vsid) | |
232 | { | |
233 | if (vsid & SLB_VSID_B_1T) | |
234 | ea |= (1ULL << 40) - 1; | |
235 | else | |
236 | ea |= (1ULL << 28) - 1; | |
237 | ||
238 | return ea + 1; | |
239 | } | |
240 | ||
241 | static void cxl_prefault_vma(struct cxl_context *ctx) | |
242 | { | |
243 | u64 ea, last_esid = 0; | |
244 | struct copro_slb slb; | |
245 | struct vm_area_struct *vma; | |
246 | int rc; | |
247 | struct task_struct *task; | |
248 | struct mm_struct *mm; | |
249 | ||
250 | if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { | |
251 | pr_devel("cxl_prefault_vma unable to get task %i\n", | |
252 | pid_nr(ctx->pid)); | |
253 | return; | |
254 | } | |
255 | if (!(mm = get_task_mm(task))) { | |
256 | pr_devel("cxl_prefault_vm unable to get mm %i\n", | |
257 | pid_nr(ctx->pid)); | |
258 | goto out1; | |
259 | } | |
260 | ||
261 | down_read(&mm->mmap_sem); | |
262 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | |
263 | for (ea = vma->vm_start; ea < vma->vm_end; | |
264 | ea = next_segment(ea, slb.vsid)) { | |
265 | rc = copro_calculate_slb(mm, ea, &slb); | |
266 | if (rc) | |
267 | continue; | |
268 | ||
269 | if (last_esid == slb.esid) | |
270 | continue; | |
271 | ||
272 | cxl_load_segment(ctx, &slb); | |
273 | last_esid = slb.esid; | |
274 | } | |
275 | } | |
276 | up_read(&mm->mmap_sem); | |
277 | ||
278 | mmput(mm); | |
279 | out1: | |
280 | put_task_struct(task); | |
281 | } | |
282 | ||
283 | void cxl_prefault(struct cxl_context *ctx, u64 wed) | |
284 | { | |
285 | switch (ctx->afu->prefault_mode) { | |
286 | case CXL_PREFAULT_WED: | |
287 | cxl_prefault_one(ctx, wed); | |
288 | break; | |
289 | case CXL_PREFAULT_ALL: | |
290 | cxl_prefault_vma(ctx); | |
291 | break; | |
292 | default: | |
293 | break; | |
294 | } | |
295 | } |