Merge branch 'for-linus' of git://git.kernel.dk/linux-block
[deliverable/linux.git] / drivers / misc / cxl / context.c
1 /*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/bitmap.h>
13 #include <linux/sched.h>
14 #include <linux/pid.h>
15 #include <linux/fs.h>
16 #include <linux/mm.h>
17 #include <linux/debugfs.h>
18 #include <linux/slab.h>
19 #include <linux/idr.h>
20 #include <asm/cputable.h>
21 #include <asm/current.h>
22 #include <asm/copro.h>
23
24 #include "cxl.h"
25
26 /*
27 * Allocates space for a CXL context.
28 */
29 struct cxl_context *cxl_context_alloc(void)
30 {
31 return kzalloc(sizeof(struct cxl_context), GFP_KERNEL);
32 }
33
34 /*
35 * Initialises a CXL context.
36 */
37 int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
38 struct address_space *mapping)
39 {
40 int i;
41
42 spin_lock_init(&ctx->sste_lock);
43 ctx->afu = afu;
44 ctx->master = master;
45 ctx->pid = ctx->glpid = NULL; /* Set in start work ioctl */
46 mutex_init(&ctx->mapping_lock);
47 ctx->mapping = mapping;
48
49 /*
50 * Allocate the segment table before we put it in the IDR so that we
51 * can always access it when dereferenced from IDR. For the same
52 * reason, the segment table is only destroyed after the context is
53 * removed from the IDR. Access to this in the IOCTL is protected by
54 * Linux filesytem symantics (can't IOCTL until open is complete).
55 */
56 i = cxl_alloc_sst(ctx);
57 if (i)
58 return i;
59
60 INIT_WORK(&ctx->fault_work, cxl_handle_fault);
61
62 init_waitqueue_head(&ctx->wq);
63 spin_lock_init(&ctx->lock);
64
65 ctx->irq_bitmap = NULL;
66 ctx->pending_irq = false;
67 ctx->pending_fault = false;
68 ctx->pending_afu_err = false;
69
70 /*
71 * When we have to destroy all contexts in cxl_context_detach_all() we
72 * end up with afu_release_irqs() called from inside a
73 * idr_for_each_entry(). Hence we need to make sure that anything
74 * dereferenced from this IDR is ok before we allocate the IDR here.
75 * This clears out the IRQ ranges to ensure this.
76 */
77 for (i = 0; i < CXL_IRQ_RANGES; i++)
78 ctx->irqs.range[i] = 0;
79
80 mutex_init(&ctx->status_mutex);
81
82 ctx->status = OPENED;
83
84 /*
85 * Allocating IDR! We better make sure everything's setup that
86 * dereferences from it.
87 */
88 mutex_lock(&afu->contexts_lock);
89 idr_preload(GFP_KERNEL);
90 i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
91 ctx->afu->num_procs, GFP_NOWAIT);
92 idr_preload_end();
93 mutex_unlock(&afu->contexts_lock);
94 if (i < 0)
95 return i;
96
97 ctx->pe = i;
98 if (cpu_has_feature(CPU_FTR_HVMODE)) {
99 ctx->elem = &ctx->afu->native->spa[i];
100 ctx->external_pe = ctx->pe;
101 } else {
102 ctx->external_pe = -1; /* assigned when attaching */
103 }
104 ctx->pe_inserted = false;
105
106 /*
107 * take a ref on the afu so that it stays alive at-least till
108 * this context is reclaimed inside reclaim_ctx.
109 */
110 cxl_afu_get(afu);
111 return 0;
112 }
113
114 static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
115 {
116 struct cxl_context *ctx = vma->vm_file->private_data;
117 unsigned long address = (unsigned long)vmf->virtual_address;
118 u64 area, offset;
119
120 offset = vmf->pgoff << PAGE_SHIFT;
121
122 pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
123 __func__, ctx->pe, address, offset);
124
125 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
126 area = ctx->afu->psn_phys;
127 if (offset >= ctx->afu->adapter->ps_size)
128 return VM_FAULT_SIGBUS;
129 } else {
130 area = ctx->psn_phys;
131 if (offset >= ctx->psn_size)
132 return VM_FAULT_SIGBUS;
133 }
134
135 mutex_lock(&ctx->status_mutex);
136
137 if (ctx->status != STARTED) {
138 mutex_unlock(&ctx->status_mutex);
139 pr_devel("%s: Context not started, failing problem state access\n", __func__);
140 if (ctx->mmio_err_ff) {
141 if (!ctx->ff_page) {
142 ctx->ff_page = alloc_page(GFP_USER);
143 if (!ctx->ff_page)
144 return VM_FAULT_OOM;
145 memset(page_address(ctx->ff_page), 0xff, PAGE_SIZE);
146 }
147 get_page(ctx->ff_page);
148 vmf->page = ctx->ff_page;
149 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
150 return 0;
151 }
152 return VM_FAULT_SIGBUS;
153 }
154
155 vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
156
157 mutex_unlock(&ctx->status_mutex);
158
159 return VM_FAULT_NOPAGE;
160 }
161
162 static const struct vm_operations_struct cxl_mmap_vmops = {
163 .fault = cxl_mmap_fault,
164 };
165
166 /*
167 * Map a per-context mmio space into the given vma.
168 */
169 int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
170 {
171 u64 start = vma->vm_pgoff << PAGE_SHIFT;
172 u64 len = vma->vm_end - vma->vm_start;
173
174 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
175 if (start + len > ctx->afu->adapter->ps_size)
176 return -EINVAL;
177 } else {
178 if (start + len > ctx->psn_size)
179 return -EINVAL;
180 }
181
182 if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
183 /* make sure there is a valid per process space for this AFU */
184 if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
185 pr_devel("AFU doesn't support mmio space\n");
186 return -EINVAL;
187 }
188
189 /* Can't mmap until the AFU is enabled */
190 if (!ctx->afu->enabled)
191 return -EBUSY;
192 }
193
194 pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
195 ctx->psn_phys, ctx->pe , ctx->master);
196
197 vma->vm_flags |= VM_IO | VM_PFNMAP;
198 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
199 vma->vm_ops = &cxl_mmap_vmops;
200 return 0;
201 }
202
203 /*
204 * Detach a context from the hardware. This disables interrupts and doesn't
205 * return until all outstanding interrupts for this context have completed. The
206 * hardware should no longer access *ctx after this has returned.
207 */
208 int __detach_context(struct cxl_context *ctx)
209 {
210 enum cxl_context_status status;
211
212 mutex_lock(&ctx->status_mutex);
213 status = ctx->status;
214 ctx->status = CLOSED;
215 mutex_unlock(&ctx->status_mutex);
216 if (status != STARTED)
217 return -EBUSY;
218
219 /* Only warn if we detached while the link was OK.
220 * If detach fails when hw is down, we don't care.
221 */
222 WARN_ON(cxl_ops->detach_process(ctx) &&
223 cxl_ops->link_ok(ctx->afu->adapter, ctx->afu));
224 flush_work(&ctx->fault_work); /* Only needed for dedicated process */
225
226 /* release the reference to the group leader and mm handling pid */
227 put_pid(ctx->pid);
228 put_pid(ctx->glpid);
229
230 cxl_ctx_put();
231 return 0;
232 }
233
234 /*
235 * Detach the given context from the AFU. This doesn't actually
236 * free the context but it should stop the context running in hardware
237 * (ie. prevent this context from generating any further interrupts
238 * so that it can be freed).
239 */
240 void cxl_context_detach(struct cxl_context *ctx)
241 {
242 int rc;
243
244 rc = __detach_context(ctx);
245 if (rc)
246 return;
247
248 afu_release_irqs(ctx, ctx);
249 wake_up_all(&ctx->wq);
250 }
251
252 /*
253 * Detach all contexts on the given AFU.
254 */
255 void cxl_context_detach_all(struct cxl_afu *afu)
256 {
257 struct cxl_context *ctx;
258 int tmp;
259
260 mutex_lock(&afu->contexts_lock);
261 idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
262 /*
263 * Anything done in here needs to be setup before the IDR is
264 * created and torn down after the IDR removed
265 */
266 cxl_context_detach(ctx);
267
268 /*
269 * We are force detaching - remove any active PSA mappings so
270 * userspace cannot interfere with the card if it comes back.
271 * Easiest way to exercise this is to unbind and rebind the
272 * driver via sysfs while it is in use.
273 */
274 mutex_lock(&ctx->mapping_lock);
275 if (ctx->mapping)
276 unmap_mapping_range(ctx->mapping, 0, 0, 1);
277 mutex_unlock(&ctx->mapping_lock);
278 }
279 mutex_unlock(&afu->contexts_lock);
280 }
281
282 static void reclaim_ctx(struct rcu_head *rcu)
283 {
284 struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu);
285
286 free_page((u64)ctx->sstp);
287 if (ctx->ff_page)
288 __free_page(ctx->ff_page);
289 ctx->sstp = NULL;
290 if (ctx->kernelapi)
291 kfree(ctx->mapping);
292
293 if (ctx->irq_bitmap)
294 kfree(ctx->irq_bitmap);
295
296 /* Drop ref to the afu device taken during cxl_context_init */
297 cxl_afu_put(ctx->afu);
298
299 kfree(ctx);
300 }
301
302 void cxl_context_free(struct cxl_context *ctx)
303 {
304 mutex_lock(&ctx->afu->contexts_lock);
305 idr_remove(&ctx->afu->contexts_idr, ctx->pe);
306 mutex_unlock(&ctx->afu->contexts_lock);
307 call_rcu(&ctx->rcu, reclaim_ctx);
308 }
This page took 0.038465 seconds and 6 git commands to generate.