cxl: Drop commands if the PCI channel is not in normal state
[deliverable/linux.git] / drivers / misc / cxl / context.c
1 /*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/bitmap.h>
13 #include <linux/sched.h>
14 #include <linux/pid.h>
15 #include <linux/fs.h>
16 #include <linux/mm.h>
17 #include <linux/debugfs.h>
18 #include <linux/slab.h>
19 #include <linux/idr.h>
20 #include <asm/cputable.h>
21 #include <asm/current.h>
22 #include <asm/copro.h>
23
24 #include "cxl.h"
25
26 /*
27 * Allocates space for a CXL context.
28 */
29 struct cxl_context *cxl_context_alloc(void)
30 {
31 return kzalloc(sizeof(struct cxl_context), GFP_KERNEL);
32 }
33
34 /*
35 * Initialises a CXL context.
36 */
37 int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
38 struct address_space *mapping)
39 {
40 int i;
41
42 spin_lock_init(&ctx->sste_lock);
43 ctx->afu = afu;
44 ctx->master = master;
45 ctx->pid = NULL; /* Set in start work ioctl */
46 mutex_init(&ctx->mapping_lock);
47 ctx->mapping = mapping;
48
49 /*
50 * Allocate the segment table before we put it in the IDR so that we
51 * can always access it when dereferenced from IDR. For the same
52 * reason, the segment table is only destroyed after the context is
53 * removed from the IDR. Access to this in the IOCTL is protected by
54 * Linux filesytem symantics (can't IOCTL until open is complete).
55 */
56 i = cxl_alloc_sst(ctx);
57 if (i)
58 return i;
59
60 INIT_WORK(&ctx->fault_work, cxl_handle_fault);
61
62 init_waitqueue_head(&ctx->wq);
63 spin_lock_init(&ctx->lock);
64
65 ctx->irq_bitmap = NULL;
66 ctx->pending_irq = false;
67 ctx->pending_fault = false;
68 ctx->pending_afu_err = false;
69
70 /*
71 * When we have to destroy all contexts in cxl_context_detach_all() we
72 * end up with afu_release_irqs() called from inside a
73 * idr_for_each_entry(). Hence we need to make sure that anything
74 * dereferenced from this IDR is ok before we allocate the IDR here.
75 * This clears out the IRQ ranges to ensure this.
76 */
77 for (i = 0; i < CXL_IRQ_RANGES; i++)
78 ctx->irqs.range[i] = 0;
79
80 mutex_init(&ctx->status_mutex);
81
82 ctx->status = OPENED;
83
84 /*
85 * Allocating IDR! We better make sure everything's setup that
86 * dereferences from it.
87 */
88 mutex_lock(&afu->contexts_lock);
89 idr_preload(GFP_KERNEL);
90 i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
91 ctx->afu->num_procs, GFP_NOWAIT);
92 idr_preload_end();
93 mutex_unlock(&afu->contexts_lock);
94 if (i < 0)
95 return i;
96
97 ctx->pe = i;
98 ctx->elem = &ctx->afu->spa[i];
99 ctx->pe_inserted = false;
100 return 0;
101 }
102
103 static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
104 {
105 struct cxl_context *ctx = vma->vm_file->private_data;
106 unsigned long address = (unsigned long)vmf->virtual_address;
107 u64 area, offset;
108
109 offset = vmf->pgoff << PAGE_SHIFT;
110
111 pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
112 __func__, ctx->pe, address, offset);
113
114 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
115 area = ctx->afu->psn_phys;
116 if (offset >= ctx->afu->adapter->ps_size)
117 return VM_FAULT_SIGBUS;
118 } else {
119 area = ctx->psn_phys;
120 if (offset >= ctx->psn_size)
121 return VM_FAULT_SIGBUS;
122 }
123
124 mutex_lock(&ctx->status_mutex);
125
126 if (ctx->status != STARTED) {
127 mutex_unlock(&ctx->status_mutex);
128 pr_devel("%s: Context not started, failing problem state access\n", __func__);
129 return VM_FAULT_SIGBUS;
130 }
131
132 vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
133
134 mutex_unlock(&ctx->status_mutex);
135
136 return VM_FAULT_NOPAGE;
137 }
138
139 static const struct vm_operations_struct cxl_mmap_vmops = {
140 .fault = cxl_mmap_fault,
141 };
142
143 /*
144 * Map a per-context mmio space into the given vma.
145 */
146 int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
147 {
148 u64 start = vma->vm_pgoff << PAGE_SHIFT;
149 u64 len = vma->vm_end - vma->vm_start;
150
151 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
152 if (start + len > ctx->afu->adapter->ps_size)
153 return -EINVAL;
154 } else {
155 if (start + len > ctx->psn_size)
156 return -EINVAL;
157 }
158
159 if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
160 /* make sure there is a valid per process space for this AFU */
161 if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
162 pr_devel("AFU doesn't support mmio space\n");
163 return -EINVAL;
164 }
165
166 /* Can't mmap until the AFU is enabled */
167 if (!ctx->afu->enabled)
168 return -EBUSY;
169 }
170
171 pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
172 ctx->psn_phys, ctx->pe , ctx->master);
173
174 vma->vm_flags |= VM_IO | VM_PFNMAP;
175 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
176 vma->vm_ops = &cxl_mmap_vmops;
177 return 0;
178 }
179
180 /*
181 * Detach a context from the hardware. This disables interrupts and doesn't
182 * return until all outstanding interrupts for this context have completed. The
183 * hardware should no longer access *ctx after this has returned.
184 */
185 int __detach_context(struct cxl_context *ctx)
186 {
187 enum cxl_context_status status;
188
189 mutex_lock(&ctx->status_mutex);
190 status = ctx->status;
191 ctx->status = CLOSED;
192 mutex_unlock(&ctx->status_mutex);
193 if (status != STARTED)
194 return -EBUSY;
195
196 /* Only warn if we detached while the link was OK.
197 * If detach fails when hw is down, we don't care.
198 */
199 WARN_ON(cxl_detach_process(ctx) &&
200 cxl_adapter_link_ok(ctx->afu->adapter));
201 flush_work(&ctx->fault_work); /* Only needed for dedicated process */
202 put_pid(ctx->pid);
203 cxl_ctx_put();
204 return 0;
205 }
206
207 /*
208 * Detach the given context from the AFU. This doesn't actually
209 * free the context but it should stop the context running in hardware
210 * (ie. prevent this context from generating any further interrupts
211 * so that it can be freed).
212 */
213 void cxl_context_detach(struct cxl_context *ctx)
214 {
215 int rc;
216
217 rc = __detach_context(ctx);
218 if (rc)
219 return;
220
221 afu_release_irqs(ctx, ctx);
222 wake_up_all(&ctx->wq);
223 }
224
225 /*
226 * Detach all contexts on the given AFU.
227 */
228 void cxl_context_detach_all(struct cxl_afu *afu)
229 {
230 struct cxl_context *ctx;
231 int tmp;
232
233 mutex_lock(&afu->contexts_lock);
234 idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
235 /*
236 * Anything done in here needs to be setup before the IDR is
237 * created and torn down after the IDR removed
238 */
239 cxl_context_detach(ctx);
240
241 /*
242 * We are force detaching - remove any active PSA mappings so
243 * userspace cannot interfere with the card if it comes back.
244 * Easiest way to exercise this is to unbind and rebind the
245 * driver via sysfs while it is in use.
246 */
247 mutex_lock(&ctx->mapping_lock);
248 if (ctx->mapping)
249 unmap_mapping_range(ctx->mapping, 0, 0, 1);
250 mutex_unlock(&ctx->mapping_lock);
251 }
252 mutex_unlock(&afu->contexts_lock);
253 }
254
255 static void reclaim_ctx(struct rcu_head *rcu)
256 {
257 struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu);
258
259 free_page((u64)ctx->sstp);
260 ctx->sstp = NULL;
261
262 kfree(ctx);
263 }
264
265 void cxl_context_free(struct cxl_context *ctx)
266 {
267 mutex_lock(&ctx->afu->contexts_lock);
268 idr_remove(&ctx->afu->contexts_idr, ctx->pe);
269 mutex_unlock(&ctx->afu->contexts_lock);
270 call_rcu(&ctx->rcu, reclaim_ctx);
271 }
This page took 0.036027 seconds and 5 git commands to generate.