Commit | Line | Data |
---|---|---|
eaf4722d FH |
1 | /** |
2 | * IBM Accelerator Family 'GenWQE' | |
3 | * | |
4 | * (C) Copyright IBM Corp. 2013 | |
5 | * | |
6 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> | |
7 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> | |
26d8f6f1 | 8 | * Author: Michael Jung <mijung@gmx.net> |
eaf4722d FH |
9 | * Author: Michael Ruettger <michael@ibmra.de> |
10 | * | |
11 | * This program is free software; you can redistribute it and/or modify | |
12 | * it under the terms of the GNU General Public License (version 2 only) | |
13 | * as published by the Free Software Foundation. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, | |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | * GNU General Public License for more details. | |
19 | */ | |
20 | ||
21 | /* | |
22 | * Character device representation of the GenWQE device. This allows | |
23 | * user-space applications to communicate with the card. | |
24 | */ | |
25 | ||
26 | #include <linux/kernel.h> | |
27 | #include <linux/types.h> | |
28 | #include <linux/module.h> | |
29 | #include <linux/pci.h> | |
30 | #include <linux/string.h> | |
31 | #include <linux/fs.h> | |
32 | #include <linux/sched.h> | |
33 | #include <linux/wait.h> | |
34 | #include <linux/delay.h> | |
35 | #include <linux/atomic.h> | |
36 | ||
37 | #include "card_base.h" | |
38 | #include "card_ddcb.h" | |
39 | ||
40 | static int genwqe_open_files(struct genwqe_dev *cd) | |
41 | { | |
42 | int rc; | |
43 | unsigned long flags; | |
44 | ||
45 | spin_lock_irqsave(&cd->file_lock, flags); | |
46 | rc = list_empty(&cd->file_list); | |
47 | spin_unlock_irqrestore(&cd->file_lock, flags); | |
48 | return !rc; | |
49 | } | |
50 | ||
51 | static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile) | |
52 | { | |
53 | unsigned long flags; | |
54 | ||
55 | cfile->owner = current; | |
56 | spin_lock_irqsave(&cd->file_lock, flags); | |
57 | list_add(&cfile->list, &cd->file_list); | |
58 | spin_unlock_irqrestore(&cd->file_lock, flags); | |
59 | } | |
60 | ||
61 | static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile) | |
62 | { | |
63 | unsigned long flags; | |
64 | ||
65 | spin_lock_irqsave(&cd->file_lock, flags); | |
66 | list_del(&cfile->list); | |
67 | spin_unlock_irqrestore(&cd->file_lock, flags); | |
68 | ||
69 | return 0; | |
70 | } | |
71 | ||
72 | static void genwqe_add_pin(struct genwqe_file *cfile, struct dma_mapping *m) | |
73 | { | |
74 | unsigned long flags; | |
75 | ||
76 | spin_lock_irqsave(&cfile->pin_lock, flags); | |
77 | list_add(&m->pin_list, &cfile->pin_list); | |
78 | spin_unlock_irqrestore(&cfile->pin_lock, flags); | |
79 | } | |
80 | ||
81 | static int genwqe_del_pin(struct genwqe_file *cfile, struct dma_mapping *m) | |
82 | { | |
83 | unsigned long flags; | |
84 | ||
85 | spin_lock_irqsave(&cfile->pin_lock, flags); | |
86 | list_del(&m->pin_list); | |
87 | spin_unlock_irqrestore(&cfile->pin_lock, flags); | |
88 | ||
89 | return 0; | |
90 | } | |
91 | ||
92 | /** | |
93 | * genwqe_search_pin() - Search for the mapping for a userspace address | |
94 | * @cfile: Descriptor of opened file | |
95 | * @u_addr: User virtual address | |
96 | * @size: Size of buffer | |
97 | * @dma_addr: DMA address to be updated | |
98 | * | |
99 | * Return: Pointer to the corresponding mapping NULL if not found | |
100 | */ | |
101 | static struct dma_mapping *genwqe_search_pin(struct genwqe_file *cfile, | |
102 | unsigned long u_addr, | |
103 | unsigned int size, | |
104 | void **virt_addr) | |
105 | { | |
106 | unsigned long flags; | |
107 | struct dma_mapping *m; | |
108 | ||
109 | spin_lock_irqsave(&cfile->pin_lock, flags); | |
110 | ||
111 | list_for_each_entry(m, &cfile->pin_list, pin_list) { | |
112 | if ((((u64)m->u_vaddr) <= (u_addr)) && | |
113 | (((u64)m->u_vaddr + m->size) >= (u_addr + size))) { | |
114 | ||
115 | if (virt_addr) | |
116 | *virt_addr = m->k_vaddr + | |
117 | (u_addr - (u64)m->u_vaddr); | |
118 | ||
119 | spin_unlock_irqrestore(&cfile->pin_lock, flags); | |
120 | return m; | |
121 | } | |
122 | } | |
123 | spin_unlock_irqrestore(&cfile->pin_lock, flags); | |
124 | return NULL; | |
125 | } | |
126 | ||
127 | static void __genwqe_add_mapping(struct genwqe_file *cfile, | |
128 | struct dma_mapping *dma_map) | |
129 | { | |
130 | unsigned long flags; | |
131 | ||
132 | spin_lock_irqsave(&cfile->map_lock, flags); | |
133 | list_add(&dma_map->card_list, &cfile->map_list); | |
134 | spin_unlock_irqrestore(&cfile->map_lock, flags); | |
135 | } | |
136 | ||
137 | static void __genwqe_del_mapping(struct genwqe_file *cfile, | |
138 | struct dma_mapping *dma_map) | |
139 | { | |
140 | unsigned long flags; | |
141 | ||
142 | spin_lock_irqsave(&cfile->map_lock, flags); | |
143 | list_del(&dma_map->card_list); | |
144 | spin_unlock_irqrestore(&cfile->map_lock, flags); | |
145 | } | |
146 | ||
147 | ||
148 | /** | |
149 | * __genwqe_search_mapping() - Search for the mapping for a userspace address | |
150 | * @cfile: descriptor of opened file | |
151 | * @u_addr: user virtual address | |
152 | * @size: size of buffer | |
153 | * @dma_addr: DMA address to be updated | |
154 | * Return: Pointer to the corresponding mapping NULL if not found | |
155 | */ | |
156 | static struct dma_mapping *__genwqe_search_mapping(struct genwqe_file *cfile, | |
157 | unsigned long u_addr, | |
158 | unsigned int size, | |
159 | dma_addr_t *dma_addr, | |
160 | void **virt_addr) | |
161 | { | |
162 | unsigned long flags; | |
163 | struct dma_mapping *m; | |
164 | struct pci_dev *pci_dev = cfile->cd->pci_dev; | |
165 | ||
166 | spin_lock_irqsave(&cfile->map_lock, flags); | |
167 | list_for_each_entry(m, &cfile->map_list, card_list) { | |
168 | ||
169 | if ((((u64)m->u_vaddr) <= (u_addr)) && | |
170 | (((u64)m->u_vaddr + m->size) >= (u_addr + size))) { | |
171 | ||
172 | /* match found: current is as expected and | |
173 | addr is in range */ | |
174 | if (dma_addr) | |
175 | *dma_addr = m->dma_addr + | |
176 | (u_addr - (u64)m->u_vaddr); | |
177 | ||
178 | if (virt_addr) | |
179 | *virt_addr = m->k_vaddr + | |
180 | (u_addr - (u64)m->u_vaddr); | |
181 | ||
182 | spin_unlock_irqrestore(&cfile->map_lock, flags); | |
183 | return m; | |
184 | } | |
185 | } | |
186 | spin_unlock_irqrestore(&cfile->map_lock, flags); | |
187 | ||
188 | dev_err(&pci_dev->dev, | |
189 | "[%s] Entry not found: u_addr=%lx, size=%x\n", | |
190 | __func__, u_addr, size); | |
191 | ||
192 | return NULL; | |
193 | } | |
194 | ||
195 | static void genwqe_remove_mappings(struct genwqe_file *cfile) | |
196 | { | |
197 | int i = 0; | |
198 | struct list_head *node, *next; | |
199 | struct dma_mapping *dma_map; | |
200 | struct genwqe_dev *cd = cfile->cd; | |
201 | struct pci_dev *pci_dev = cfile->cd->pci_dev; | |
202 | ||
203 | list_for_each_safe(node, next, &cfile->map_list) { | |
204 | dma_map = list_entry(node, struct dma_mapping, card_list); | |
205 | ||
206 | list_del_init(&dma_map->card_list); | |
207 | ||
208 | /* | |
209 | * This is really a bug, because those things should | |
210 | * have been already tidied up. | |
211 | * | |
212 | * GENWQE_MAPPING_RAW should have been removed via mmunmap(). | |
213 | * GENWQE_MAPPING_SGL_TEMP should be removed by tidy up code. | |
214 | */ | |
215 | dev_err(&pci_dev->dev, | |
d9c11d45 FH |
216 | "[%s] %d. cleanup mapping: u_vaddr=%p u_kaddr=%016lx dma_addr=%lx\n", |
217 | __func__, i++, dma_map->u_vaddr, | |
218 | (unsigned long)dma_map->k_vaddr, | |
3c1547e7 | 219 | (unsigned long)dma_map->dma_addr); |
eaf4722d FH |
220 | |
221 | if (dma_map->type == GENWQE_MAPPING_RAW) { | |
222 | /* we allocated this dynamically */ | |
223 | __genwqe_free_consistent(cd, dma_map->size, | |
224 | dma_map->k_vaddr, | |
225 | dma_map->dma_addr); | |
226 | kfree(dma_map); | |
227 | } else if (dma_map->type == GENWQE_MAPPING_SGL_TEMP) { | |
228 | /* we use dma_map statically from the request */ | |
229 | genwqe_user_vunmap(cd, dma_map, NULL); | |
230 | } | |
231 | } | |
232 | } | |
233 | ||
234 | static void genwqe_remove_pinnings(struct genwqe_file *cfile) | |
235 | { | |
236 | struct list_head *node, *next; | |
237 | struct dma_mapping *dma_map; | |
238 | struct genwqe_dev *cd = cfile->cd; | |
239 | ||
240 | list_for_each_safe(node, next, &cfile->pin_list) { | |
241 | dma_map = list_entry(node, struct dma_mapping, pin_list); | |
242 | ||
243 | /* | |
244 | * This is not a bug, because a killed processed might | |
245 | * not call the unpin ioctl, which is supposed to free | |
246 | * the resources. | |
247 | * | |
248 | * Pinnings are dymically allocated and need to be | |
249 | * deleted. | |
250 | */ | |
251 | list_del_init(&dma_map->pin_list); | |
252 | genwqe_user_vunmap(cd, dma_map, NULL); | |
253 | kfree(dma_map); | |
254 | } | |
255 | } | |
256 | ||
257 | /** | |
258 | * genwqe_kill_fasync() - Send signal to all processes with open GenWQE files | |
259 | * | |
260 | * E.g. genwqe_send_signal(cd, SIGIO); | |
261 | */ | |
262 | static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig) | |
263 | { | |
264 | unsigned int files = 0; | |
265 | unsigned long flags; | |
266 | struct genwqe_file *cfile; | |
267 | ||
268 | spin_lock_irqsave(&cd->file_lock, flags); | |
269 | list_for_each_entry(cfile, &cd->file_list, list) { | |
270 | if (cfile->async_queue) | |
271 | kill_fasync(&cfile->async_queue, sig, POLL_HUP); | |
272 | files++; | |
273 | } | |
274 | spin_unlock_irqrestore(&cd->file_lock, flags); | |
275 | return files; | |
276 | } | |
277 | ||
278 | static int genwqe_force_sig(struct genwqe_dev *cd, int sig) | |
279 | { | |
280 | unsigned int files = 0; | |
281 | unsigned long flags; | |
282 | struct genwqe_file *cfile; | |
283 | ||
284 | spin_lock_irqsave(&cd->file_lock, flags); | |
285 | list_for_each_entry(cfile, &cd->file_list, list) { | |
286 | force_sig(sig, cfile->owner); | |
287 | files++; | |
288 | } | |
289 | spin_unlock_irqrestore(&cd->file_lock, flags); | |
290 | return files; | |
291 | } | |
292 | ||
293 | /** | |
294 | * genwqe_open() - file open | |
295 | * @inode: file system information | |
296 | * @filp: file handle | |
297 | * | |
298 | * This function is executed whenever an application calls | |
299 | * open("/dev/genwqe",..). | |
300 | * | |
301 | * Return: 0 if successful or <0 if errors | |
302 | */ | |
303 | static int genwqe_open(struct inode *inode, struct file *filp) | |
304 | { | |
305 | struct genwqe_dev *cd; | |
306 | struct genwqe_file *cfile; | |
307 | struct pci_dev *pci_dev; | |
308 | ||
309 | cfile = kzalloc(sizeof(*cfile), GFP_KERNEL); | |
310 | if (cfile == NULL) | |
311 | return -ENOMEM; | |
312 | ||
313 | cd = container_of(inode->i_cdev, struct genwqe_dev, cdev_genwqe); | |
314 | pci_dev = cd->pci_dev; | |
315 | cfile->cd = cd; | |
316 | cfile->filp = filp; | |
317 | cfile->client = NULL; | |
318 | ||
319 | spin_lock_init(&cfile->map_lock); /* list of raw memory allocations */ | |
320 | INIT_LIST_HEAD(&cfile->map_list); | |
321 | ||
322 | spin_lock_init(&cfile->pin_lock); /* list of user pinned memory */ | |
323 | INIT_LIST_HEAD(&cfile->pin_list); | |
324 | ||
325 | filp->private_data = cfile; | |
326 | ||
327 | genwqe_add_file(cd, cfile); | |
328 | return 0; | |
329 | } | |
330 | ||
331 | /** | |
332 | * genwqe_fasync() - Setup process to receive SIGIO. | |
333 | * @fd: file descriptor | |
334 | * @filp: file handle | |
335 | * @mode: file mode | |
336 | * | |
337 | * Sending a signal is working as following: | |
338 | * | |
339 | * if (cdev->async_queue) | |
340 | * kill_fasync(&cdev->async_queue, SIGIO, POLL_IN); | |
341 | * | |
342 | * Some devices also implement asynchronous notification to indicate | |
343 | * when the device can be written; in this case, of course, | |
344 | * kill_fasync must be called with a mode of POLL_OUT. | |
345 | */ | |
346 | static int genwqe_fasync(int fd, struct file *filp, int mode) | |
347 | { | |
348 | struct genwqe_file *cdev = (struct genwqe_file *)filp->private_data; | |
d9c11d45 | 349 | |
eaf4722d FH |
350 | return fasync_helper(fd, filp, mode, &cdev->async_queue); |
351 | } | |
352 | ||
353 | ||
354 | /** | |
355 | * genwqe_release() - file close | |
356 | * @inode: file system information | |
357 | * @filp: file handle | |
358 | * | |
359 | * This function is executed whenever an application calls 'close(fd_genwqe)' | |
360 | * | |
361 | * Return: always 0 | |
362 | */ | |
363 | static int genwqe_release(struct inode *inode, struct file *filp) | |
364 | { | |
365 | struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; | |
366 | struct genwqe_dev *cd = cfile->cd; | |
367 | ||
368 | /* there must be no entries in these lists! */ | |
369 | genwqe_remove_mappings(cfile); | |
370 | genwqe_remove_pinnings(cfile); | |
371 | ||
372 | /* remove this filp from the asynchronously notified filp's */ | |
373 | genwqe_fasync(-1, filp, 0); | |
374 | ||
375 | /* | |
376 | * For this to work we must not release cd when this cfile is | |
377 | * not yet released, otherwise the list entry is invalid, | |
378 | * because the list itself gets reinstantiated! | |
379 | */ | |
380 | genwqe_del_file(cd, cfile); | |
381 | kfree(cfile); | |
382 | return 0; | |
383 | } | |
384 | ||
385 | static void genwqe_vma_open(struct vm_area_struct *vma) | |
386 | { | |
387 | /* nothing ... */ | |
388 | } | |
389 | ||
390 | /** | |
391 | * genwqe_vma_close() - Called each time when vma is unmapped | |
392 | * | |
393 | * Free memory which got allocated by GenWQE mmap(). | |
394 | */ | |
395 | static void genwqe_vma_close(struct vm_area_struct *vma) | |
396 | { | |
397 | unsigned long vsize = vma->vm_end - vma->vm_start; | |
a455589f | 398 | struct inode *inode = file_inode(vma->vm_file); |
eaf4722d FH |
399 | struct dma_mapping *dma_map; |
400 | struct genwqe_dev *cd = container_of(inode->i_cdev, struct genwqe_dev, | |
401 | cdev_genwqe); | |
402 | struct pci_dev *pci_dev = cd->pci_dev; | |
403 | dma_addr_t d_addr = 0; | |
404 | struct genwqe_file *cfile = vma->vm_private_data; | |
405 | ||
406 | dma_map = __genwqe_search_mapping(cfile, vma->vm_start, vsize, | |
407 | &d_addr, NULL); | |
408 | if (dma_map == NULL) { | |
409 | dev_err(&pci_dev->dev, | |
410 | " [%s] err: mapping not found: v=%lx, p=%lx s=%lx\n", | |
411 | __func__, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT, | |
412 | vsize); | |
413 | return; | |
414 | } | |
415 | __genwqe_del_mapping(cfile, dma_map); | |
416 | __genwqe_free_consistent(cd, dma_map->size, dma_map->k_vaddr, | |
417 | dma_map->dma_addr); | |
418 | kfree(dma_map); | |
419 | } | |
420 | ||
7cbea8dc | 421 | static const struct vm_operations_struct genwqe_vma_ops = { |
eaf4722d FH |
422 | .open = genwqe_vma_open, |
423 | .close = genwqe_vma_close, | |
424 | }; | |
425 | ||
426 | /** | |
427 | * genwqe_mmap() - Provide contignous buffers to userspace | |
428 | * | |
429 | * We use mmap() to allocate contignous buffers used for DMA | |
430 | * transfers. After the buffer is allocated we remap it to user-space | |
431 | * and remember a reference to our dma_mapping data structure, where | |
432 | * we store the associated DMA address and allocated size. | |
433 | * | |
434 | * When we receive a DDCB execution request with the ATS bits set to | |
435 | * plain buffer, we lookup our dma_mapping list to find the | |
436 | * corresponding DMA address for the associated user-space address. | |
437 | */ | |
438 | static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma) | |
439 | { | |
440 | int rc; | |
441 | unsigned long pfn, vsize = vma->vm_end - vma->vm_start; | |
442 | struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; | |
443 | struct genwqe_dev *cd = cfile->cd; | |
444 | struct dma_mapping *dma_map; | |
445 | ||
446 | if (vsize == 0) | |
447 | return -EINVAL; | |
448 | ||
449 | if (get_order(vsize) > MAX_ORDER) | |
450 | return -ENOMEM; | |
451 | ||
19f7767e | 452 | dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL); |
eaf4722d FH |
453 | if (dma_map == NULL) |
454 | return -ENOMEM; | |
455 | ||
456 | genwqe_mapping_init(dma_map, GENWQE_MAPPING_RAW); | |
457 | dma_map->u_vaddr = (void *)vma->vm_start; | |
458 | dma_map->size = vsize; | |
459 | dma_map->nr_pages = DIV_ROUND_UP(vsize, PAGE_SIZE); | |
460 | dma_map->k_vaddr = __genwqe_alloc_consistent(cd, vsize, | |
461 | &dma_map->dma_addr); | |
462 | if (dma_map->k_vaddr == NULL) { | |
463 | rc = -ENOMEM; | |
464 | goto free_dma_map; | |
465 | } | |
466 | ||
467 | if (capable(CAP_SYS_ADMIN) && (vsize > sizeof(dma_addr_t))) | |
468 | *(dma_addr_t *)dma_map->k_vaddr = dma_map->dma_addr; | |
469 | ||
470 | pfn = virt_to_phys(dma_map->k_vaddr) >> PAGE_SHIFT; | |
471 | rc = remap_pfn_range(vma, | |
472 | vma->vm_start, | |
473 | pfn, | |
474 | vsize, | |
475 | vma->vm_page_prot); | |
476 | if (rc != 0) { | |
477 | rc = -EFAULT; | |
478 | goto free_dma_mem; | |
479 | } | |
480 | ||
481 | vma->vm_private_data = cfile; | |
482 | vma->vm_ops = &genwqe_vma_ops; | |
483 | __genwqe_add_mapping(cfile, dma_map); | |
484 | ||
485 | return 0; | |
486 | ||
487 | free_dma_mem: | |
488 | __genwqe_free_consistent(cd, dma_map->size, | |
489 | dma_map->k_vaddr, | |
490 | dma_map->dma_addr); | |
491 | free_dma_map: | |
492 | kfree(dma_map); | |
493 | return rc; | |
494 | } | |
495 | ||
496 | /** | |
497 | * do_flash_update() - Excute flash update (write image or CVPD) | |
498 | * @cd: genwqe device | |
499 | * @load: details about image load | |
500 | * | |
501 | * Return: 0 if successful | |
502 | */ | |
503 | ||
504 | #define FLASH_BLOCK 0x40000 /* we use 256k blocks */ | |
505 | ||
506 | static int do_flash_update(struct genwqe_file *cfile, | |
507 | struct genwqe_bitstream *load) | |
508 | { | |
509 | int rc = 0; | |
510 | int blocks_to_flash; | |
3c1547e7 FH |
511 | dma_addr_t dma_addr; |
512 | u64 flash = 0; | |
eaf4722d FH |
513 | size_t tocopy = 0; |
514 | u8 __user *buf; | |
515 | u8 *xbuf; | |
516 | u32 crc; | |
517 | u8 cmdopts; | |
518 | struct genwqe_dev *cd = cfile->cd; | |
1451f414 | 519 | struct file *filp = cfile->filp; |
eaf4722d FH |
520 | struct pci_dev *pci_dev = cd->pci_dev; |
521 | ||
d276b6c5 | 522 | if ((load->size & 0x3) != 0) |
eaf4722d | 523 | return -EINVAL; |
d276b6c5 FH |
524 | |
525 | if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0) | |
eaf4722d | 526 | return -EINVAL; |
eaf4722d FH |
527 | |
528 | /* FIXME Bits have changed for new service layer! */ | |
529 | switch ((char)load->partition) { | |
530 | case '0': | |
531 | cmdopts = 0x14; | |
532 | break; /* download/erase_first/part_0 */ | |
533 | case '1': | |
534 | cmdopts = 0x1C; | |
535 | break; /* download/erase_first/part_1 */ | |
5c5e0589 FH |
536 | case 'v': |
537 | cmdopts = 0x0C; | |
538 | break; /* download/erase_first/vpd */ | |
eaf4722d | 539 | default: |
eaf4722d FH |
540 | return -EINVAL; |
541 | } | |
eaf4722d FH |
542 | |
543 | buf = (u8 __user *)load->data_addr; | |
544 | xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr); | |
d276b6c5 | 545 | if (xbuf == NULL) |
eaf4722d | 546 | return -ENOMEM; |
eaf4722d FH |
547 | |
548 | blocks_to_flash = load->size / FLASH_BLOCK; | |
549 | while (load->size) { | |
550 | struct genwqe_ddcb_cmd *req; | |
551 | ||
552 | /* | |
553 | * We must be 4 byte aligned. Buffer must be 0 appened | |
554 | * to have defined values when calculating CRC. | |
555 | */ | |
556 | tocopy = min_t(size_t, load->size, FLASH_BLOCK); | |
557 | ||
558 | rc = copy_from_user(xbuf, buf, tocopy); | |
559 | if (rc) { | |
d276b6c5 | 560 | rc = -EFAULT; |
eaf4722d FH |
561 | goto free_buffer; |
562 | } | |
563 | crc = genwqe_crc32(xbuf, tocopy, 0xffffffff); | |
564 | ||
d276b6c5 | 565 | dev_dbg(&pci_dev->dev, |
3c1547e7 FH |
566 | "[%s] DMA: %lx CRC: %08x SZ: %ld %d\n", |
567 | __func__, (unsigned long)dma_addr, crc, tocopy, | |
568 | blocks_to_flash); | |
eaf4722d FH |
569 | |
570 | /* prepare DDCB for SLU process */ | |
571 | req = ddcb_requ_alloc(); | |
572 | if (req == NULL) { | |
573 | rc = -ENOMEM; | |
574 | goto free_buffer; | |
575 | } | |
576 | ||
577 | req->cmd = SLCMD_MOVE_FLASH; | |
578 | req->cmdopts = cmdopts; | |
579 | ||
580 | /* prepare invariant values */ | |
581 | if (genwqe_get_slu_id(cd) <= 0x2) { | |
58d66ce7 FH |
582 | *(__be64 *)&req->__asiv[0] = cpu_to_be64(dma_addr); |
583 | *(__be64 *)&req->__asiv[8] = cpu_to_be64(tocopy); | |
584 | *(__be64 *)&req->__asiv[16] = cpu_to_be64(flash); | |
585 | *(__be32 *)&req->__asiv[24] = cpu_to_be32(0); | |
eaf4722d | 586 | req->__asiv[24] = load->uid; |
58d66ce7 | 587 | *(__be32 *)&req->__asiv[28] = cpu_to_be32(crc); |
eaf4722d FH |
588 | |
589 | /* for simulation only */ | |
58d66ce7 FH |
590 | *(__be64 *)&req->__asiv[88] = cpu_to_be64(load->slu_id); |
591 | *(__be64 *)&req->__asiv[96] = cpu_to_be64(load->app_id); | |
eaf4722d FH |
592 | req->asiv_length = 32; /* bytes included in crc calc */ |
593 | } else { /* setup DDCB for ATS architecture */ | |
58d66ce7 FH |
594 | *(__be64 *)&req->asiv[0] = cpu_to_be64(dma_addr); |
595 | *(__be32 *)&req->asiv[8] = cpu_to_be32(tocopy); | |
596 | *(__be32 *)&req->asiv[12] = cpu_to_be32(0); /* resvd */ | |
597 | *(__be64 *)&req->asiv[16] = cpu_to_be64(flash); | |
598 | *(__be32 *)&req->asiv[24] = cpu_to_be32(load->uid<<24); | |
599 | *(__be32 *)&req->asiv[28] = cpu_to_be32(crc); | |
eaf4722d FH |
600 | |
601 | /* for simulation only */ | |
58d66ce7 FH |
602 | *(__be64 *)&req->asiv[80] = cpu_to_be64(load->slu_id); |
603 | *(__be64 *)&req->asiv[88] = cpu_to_be64(load->app_id); | |
eaf4722d | 604 | |
58d66ce7 FH |
605 | /* Rd only */ |
606 | req->ats = 0x4ULL << 44; | |
eaf4722d FH |
607 | req->asiv_length = 40; /* bytes included in crc calc */ |
608 | } | |
609 | req->asv_length = 8; | |
610 | ||
611 | /* For Genwqe5 we get back the calculated CRC */ | |
612 | *(u64 *)&req->asv[0] = 0ULL; /* 0x80 */ | |
613 | ||
1451f414 | 614 | rc = __genwqe_execute_raw_ddcb(cd, req, filp->f_flags); |
eaf4722d FH |
615 | |
616 | load->retc = req->retc; | |
617 | load->attn = req->attn; | |
618 | load->progress = req->progress; | |
619 | ||
620 | if (rc < 0) { | |
eaf4722d FH |
621 | ddcb_requ_free(req); |
622 | goto free_buffer; | |
623 | } | |
624 | ||
625 | if (req->retc != DDCB_RETC_COMPLETE) { | |
eaf4722d FH |
626 | rc = -EIO; |
627 | ddcb_requ_free(req); | |
628 | goto free_buffer; | |
629 | } | |
630 | ||
631 | load->size -= tocopy; | |
632 | flash += tocopy; | |
633 | buf += tocopy; | |
634 | blocks_to_flash--; | |
635 | ddcb_requ_free(req); | |
636 | } | |
637 | ||
638 | free_buffer: | |
639 | __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr); | |
640 | return rc; | |
641 | } | |
642 | ||
643 | static int do_flash_read(struct genwqe_file *cfile, | |
644 | struct genwqe_bitstream *load) | |
645 | { | |
646 | int rc, blocks_to_flash; | |
3c1547e7 FH |
647 | dma_addr_t dma_addr; |
648 | u64 flash = 0; | |
eaf4722d FH |
649 | size_t tocopy = 0; |
650 | u8 __user *buf; | |
651 | u8 *xbuf; | |
652 | u8 cmdopts; | |
653 | struct genwqe_dev *cd = cfile->cd; | |
1451f414 | 654 | struct file *filp = cfile->filp; |
eaf4722d FH |
655 | struct pci_dev *pci_dev = cd->pci_dev; |
656 | struct genwqe_ddcb_cmd *cmd; | |
657 | ||
d276b6c5 | 658 | if ((load->size & 0x3) != 0) |
eaf4722d | 659 | return -EINVAL; |
d276b6c5 FH |
660 | |
661 | if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0) | |
eaf4722d | 662 | return -EINVAL; |
eaf4722d FH |
663 | |
664 | /* FIXME Bits have changed for new service layer! */ | |
665 | switch ((char)load->partition) { | |
666 | case '0': | |
667 | cmdopts = 0x12; | |
668 | break; /* upload/part_0 */ | |
669 | case '1': | |
670 | cmdopts = 0x1A; | |
671 | break; /* upload/part_1 */ | |
672 | case 'v': | |
5c5e0589 FH |
673 | cmdopts = 0x0A; |
674 | break; /* upload/vpd */ | |
eaf4722d | 675 | default: |
eaf4722d FH |
676 | return -EINVAL; |
677 | } | |
eaf4722d FH |
678 | |
679 | buf = (u8 __user *)load->data_addr; | |
680 | xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr); | |
d276b6c5 | 681 | if (xbuf == NULL) |
eaf4722d | 682 | return -ENOMEM; |
eaf4722d FH |
683 | |
684 | blocks_to_flash = load->size / FLASH_BLOCK; | |
685 | while (load->size) { | |
686 | /* | |
687 | * We must be 4 byte aligned. Buffer must be 0 appened | |
688 | * to have defined values when calculating CRC. | |
689 | */ | |
690 | tocopy = min_t(size_t, load->size, FLASH_BLOCK); | |
691 | ||
d276b6c5 | 692 | dev_dbg(&pci_dev->dev, |
3c1547e7 FH |
693 | "[%s] DMA: %lx SZ: %ld %d\n", |
694 | __func__, (unsigned long)dma_addr, tocopy, | |
695 | blocks_to_flash); | |
eaf4722d FH |
696 | |
697 | /* prepare DDCB for SLU process */ | |
698 | cmd = ddcb_requ_alloc(); | |
699 | if (cmd == NULL) { | |
700 | rc = -ENOMEM; | |
701 | goto free_buffer; | |
702 | } | |
703 | cmd->cmd = SLCMD_MOVE_FLASH; | |
704 | cmd->cmdopts = cmdopts; | |
705 | ||
706 | /* prepare invariant values */ | |
707 | if (genwqe_get_slu_id(cd) <= 0x2) { | |
58d66ce7 FH |
708 | *(__be64 *)&cmd->__asiv[0] = cpu_to_be64(dma_addr); |
709 | *(__be64 *)&cmd->__asiv[8] = cpu_to_be64(tocopy); | |
710 | *(__be64 *)&cmd->__asiv[16] = cpu_to_be64(flash); | |
711 | *(__be32 *)&cmd->__asiv[24] = cpu_to_be32(0); | |
eaf4722d | 712 | cmd->__asiv[24] = load->uid; |
d276b6c5 | 713 | *(__be32 *)&cmd->__asiv[28] = cpu_to_be32(0) /* CRC */; |
eaf4722d FH |
714 | cmd->asiv_length = 32; /* bytes included in crc calc */ |
715 | } else { /* setup DDCB for ATS architecture */ | |
58d66ce7 FH |
716 | *(__be64 *)&cmd->asiv[0] = cpu_to_be64(dma_addr); |
717 | *(__be32 *)&cmd->asiv[8] = cpu_to_be32(tocopy); | |
718 | *(__be32 *)&cmd->asiv[12] = cpu_to_be32(0); /* resvd */ | |
719 | *(__be64 *)&cmd->asiv[16] = cpu_to_be64(flash); | |
720 | *(__be32 *)&cmd->asiv[24] = cpu_to_be32(load->uid<<24); | |
721 | *(__be32 *)&cmd->asiv[28] = cpu_to_be32(0); /* CRC */ | |
722 | ||
723 | /* rd/wr */ | |
724 | cmd->ats = 0x5ULL << 44; | |
eaf4722d FH |
725 | cmd->asiv_length = 40; /* bytes included in crc calc */ |
726 | } | |
727 | cmd->asv_length = 8; | |
728 | ||
729 | /* we only get back the calculated CRC */ | |
730 | *(u64 *)&cmd->asv[0] = 0ULL; /* 0x80 */ | |
731 | ||
1451f414 | 732 | rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags); |
eaf4722d FH |
733 | |
734 | load->retc = cmd->retc; | |
735 | load->attn = cmd->attn; | |
736 | load->progress = cmd->progress; | |
737 | ||
738 | if ((rc < 0) && (rc != -EBADMSG)) { | |
eaf4722d FH |
739 | ddcb_requ_free(cmd); |
740 | goto free_buffer; | |
741 | } | |
742 | ||
743 | rc = copy_to_user(buf, xbuf, tocopy); | |
744 | if (rc) { | |
d276b6c5 | 745 | rc = -EFAULT; |
eaf4722d FH |
746 | ddcb_requ_free(cmd); |
747 | goto free_buffer; | |
748 | } | |
749 | ||
750 | /* We know that we can get retc 0x104 with CRC err */ | |
751 | if (((cmd->retc == DDCB_RETC_FAULT) && | |
752 | (cmd->attn != 0x02)) || /* Normally ignore CRC error */ | |
753 | ((cmd->retc == DDCB_RETC_COMPLETE) && | |
754 | (cmd->attn != 0x00))) { /* Everything was fine */ | |
eaf4722d FH |
755 | rc = -EIO; |
756 | ddcb_requ_free(cmd); | |
757 | goto free_buffer; | |
758 | } | |
759 | ||
760 | load->size -= tocopy; | |
761 | flash += tocopy; | |
762 | buf += tocopy; | |
763 | blocks_to_flash--; | |
764 | ddcb_requ_free(cmd); | |
765 | } | |
766 | rc = 0; | |
767 | ||
768 | free_buffer: | |
769 | __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr); | |
770 | return rc; | |
771 | } | |
772 | ||
773 | static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m) | |
774 | { | |
775 | int rc; | |
776 | struct genwqe_dev *cd = cfile->cd; | |
777 | struct pci_dev *pci_dev = cfile->cd->pci_dev; | |
778 | struct dma_mapping *dma_map; | |
779 | unsigned long map_addr; | |
780 | unsigned long map_size; | |
781 | ||
782 | if ((m->addr == 0x0) || (m->size == 0)) | |
783 | return -EINVAL; | |
784 | ||
785 | map_addr = (m->addr & PAGE_MASK); | |
786 | map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE); | |
787 | ||
19f7767e | 788 | dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL); |
eaf4722d FH |
789 | if (dma_map == NULL) |
790 | return -ENOMEM; | |
791 | ||
792 | genwqe_mapping_init(dma_map, GENWQE_MAPPING_SGL_PINNED); | |
793 | rc = genwqe_user_vmap(cd, dma_map, (void *)map_addr, map_size, NULL); | |
794 | if (rc != 0) { | |
795 | dev_err(&pci_dev->dev, | |
796 | "[%s] genwqe_user_vmap rc=%d\n", __func__, rc); | |
d913c743 | 797 | kfree(dma_map); |
eaf4722d FH |
798 | return rc; |
799 | } | |
800 | ||
801 | genwqe_add_pin(cfile, dma_map); | |
802 | return 0; | |
803 | } | |
804 | ||
805 | static int genwqe_unpin_mem(struct genwqe_file *cfile, struct genwqe_mem *m) | |
806 | { | |
807 | struct genwqe_dev *cd = cfile->cd; | |
808 | struct dma_mapping *dma_map; | |
809 | unsigned long map_addr; | |
810 | unsigned long map_size; | |
811 | ||
812 | if (m->addr == 0x0) | |
813 | return -EINVAL; | |
814 | ||
815 | map_addr = (m->addr & PAGE_MASK); | |
816 | map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE); | |
817 | ||
818 | dma_map = genwqe_search_pin(cfile, map_addr, map_size, NULL); | |
819 | if (dma_map == NULL) | |
820 | return -ENOENT; | |
821 | ||
822 | genwqe_del_pin(cfile, dma_map); | |
823 | genwqe_user_vunmap(cd, dma_map, NULL); | |
824 | kfree(dma_map); | |
825 | return 0; | |
826 | } | |
827 | ||
828 | /** | |
829 | * ddcb_cmd_cleanup() - Remove dynamically created fixup entries | |
830 | * | |
831 | * Only if there are any. Pinnings are not removed. | |
832 | */ | |
833 | static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req) | |
834 | { | |
835 | unsigned int i; | |
836 | struct dma_mapping *dma_map; | |
837 | struct genwqe_dev *cd = cfile->cd; | |
838 | ||
839 | for (i = 0; i < DDCB_FIXUPS; i++) { | |
840 | dma_map = &req->dma_mappings[i]; | |
841 | ||
842 | if (dma_mapping_used(dma_map)) { | |
843 | __genwqe_del_mapping(cfile, dma_map); | |
844 | genwqe_user_vunmap(cd, dma_map, req); | |
845 | } | |
718f762e FH |
846 | if (req->sgls[i].sgl != NULL) |
847 | genwqe_free_sync_sgl(cd, &req->sgls[i]); | |
eaf4722d FH |
848 | } |
849 | return 0; | |
850 | } | |
851 | ||
852 | /** | |
853 | * ddcb_cmd_fixups() - Establish DMA fixups/sglists for user memory references | |
854 | * | |
855 | * Before the DDCB gets executed we need to handle the fixups. We | |
856 | * replace the user-space addresses with DMA addresses or do | |
857 | * additional setup work e.g. generating a scatter-gather list which | |
858 | * is used to describe the memory referred to in the fixup. | |
859 | */ | |
860 | static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req) | |
861 | { | |
862 | int rc; | |
863 | unsigned int asiv_offs, i; | |
864 | struct genwqe_dev *cd = cfile->cd; | |
865 | struct genwqe_ddcb_cmd *cmd = &req->cmd; | |
866 | struct dma_mapping *m; | |
eaf4722d FH |
867 | const char *type = "UNKNOWN"; |
868 | ||
869 | for (i = 0, asiv_offs = 0x00; asiv_offs <= 0x58; | |
870 | i++, asiv_offs += 0x08) { | |
871 | ||
3c1547e7 FH |
872 | u64 u_addr; |
873 | dma_addr_t d_addr; | |
eaf4722d | 874 | u32 u_size = 0; |
58d66ce7 | 875 | u64 ats_flags; |
eaf4722d | 876 | |
58d66ce7 | 877 | ats_flags = ATS_GET_FLAGS(cmd->ats, asiv_offs); |
eaf4722d FH |
878 | |
879 | switch (ats_flags) { | |
880 | ||
881 | case ATS_TYPE_DATA: | |
882 | break; /* nothing to do here */ | |
883 | ||
884 | case ATS_TYPE_FLAT_RDWR: | |
885 | case ATS_TYPE_FLAT_RD: { | |
58d66ce7 | 886 | u_addr = be64_to_cpu(*((__be64 *)&cmd-> |
eaf4722d | 887 | asiv[asiv_offs])); |
58d66ce7 | 888 | u_size = be32_to_cpu(*((__be32 *)&cmd-> |
eaf4722d FH |
889 | asiv[asiv_offs + 0x08])); |
890 | ||
891 | /* | |
892 | * No data available. Ignore u_addr in this | |
893 | * case and set addr to 0. Hardware must not | |
894 | * fetch the buffer. | |
895 | */ | |
896 | if (u_size == 0x0) { | |
58d66ce7 | 897 | *((__be64 *)&cmd->asiv[asiv_offs]) = |
eaf4722d FH |
898 | cpu_to_be64(0x0); |
899 | break; | |
900 | } | |
901 | ||
902 | m = __genwqe_search_mapping(cfile, u_addr, u_size, | |
903 | &d_addr, NULL); | |
904 | if (m == NULL) { | |
905 | rc = -EFAULT; | |
906 | goto err_out; | |
907 | } | |
908 | ||
58d66ce7 FH |
909 | *((__be64 *)&cmd->asiv[asiv_offs]) = |
910 | cpu_to_be64(d_addr); | |
eaf4722d FH |
911 | break; |
912 | } | |
913 | ||
914 | case ATS_TYPE_SGL_RDWR: | |
915 | case ATS_TYPE_SGL_RD: { | |
718f762e | 916 | int page_offs; |
eaf4722d | 917 | |
58d66ce7 FH |
918 | u_addr = be64_to_cpu(*((__be64 *) |
919 | &cmd->asiv[asiv_offs])); | |
920 | u_size = be32_to_cpu(*((__be32 *) | |
921 | &cmd->asiv[asiv_offs + 0x08])); | |
eaf4722d FH |
922 | |
923 | /* | |
924 | * No data available. Ignore u_addr in this | |
925 | * case and set addr to 0. Hardware must not | |
926 | * fetch the empty sgl. | |
927 | */ | |
928 | if (u_size == 0x0) { | |
58d66ce7 | 929 | *((__be64 *)&cmd->asiv[asiv_offs]) = |
eaf4722d FH |
930 | cpu_to_be64(0x0); |
931 | break; | |
932 | } | |
933 | ||
934 | m = genwqe_search_pin(cfile, u_addr, u_size, NULL); | |
935 | if (m != NULL) { | |
936 | type = "PINNING"; | |
937 | page_offs = (u_addr - | |
938 | (u64)m->u_vaddr)/PAGE_SIZE; | |
939 | } else { | |
940 | type = "MAPPING"; | |
941 | m = &req->dma_mappings[i]; | |
942 | ||
943 | genwqe_mapping_init(m, | |
944 | GENWQE_MAPPING_SGL_TEMP); | |
945 | rc = genwqe_user_vmap(cd, m, (void *)u_addr, | |
946 | u_size, req); | |
947 | if (rc != 0) | |
948 | goto err_out; | |
949 | ||
950 | __genwqe_add_mapping(cfile, m); | |
951 | page_offs = 0; | |
952 | } | |
953 | ||
eaf4722d | 954 | /* create genwqe style scatter gather list */ |
718f762e FH |
955 | rc = genwqe_alloc_sync_sgl(cd, &req->sgls[i], |
956 | (void __user *)u_addr, | |
957 | u_size); | |
958 | if (rc != 0) | |
eaf4722d | 959 | goto err_out; |
718f762e FH |
960 | |
961 | genwqe_setup_sgl(cd, &req->sgls[i], | |
962 | &m->dma_list[page_offs]); | |
eaf4722d | 963 | |
58d66ce7 | 964 | *((__be64 *)&cmd->asiv[asiv_offs]) = |
718f762e | 965 | cpu_to_be64(req->sgls[i].sgl_dma_addr); |
eaf4722d FH |
966 | |
967 | break; | |
968 | } | |
969 | default: | |
eaf4722d FH |
970 | rc = -EINVAL; |
971 | goto err_out; | |
972 | } | |
973 | } | |
974 | return 0; | |
975 | ||
976 | err_out: | |
eaf4722d FH |
977 | ddcb_cmd_cleanup(cfile, req); |
978 | return rc; | |
979 | } | |
980 | ||
981 | /** | |
982 | * genwqe_execute_ddcb() - Execute DDCB using userspace address fixups | |
983 | * | |
984 | * The code will build up the translation tables or lookup the | |
985 | * contignous memory allocation table to find the right translations | |
986 | * and DMA addresses. | |
987 | */ | |
988 | static int genwqe_execute_ddcb(struct genwqe_file *cfile, | |
989 | struct genwqe_ddcb_cmd *cmd) | |
990 | { | |
991 | int rc; | |
992 | struct genwqe_dev *cd = cfile->cd; | |
1451f414 | 993 | struct file *filp = cfile->filp; |
eaf4722d FH |
994 | struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); |
995 | ||
996 | rc = ddcb_cmd_fixups(cfile, req); | |
997 | if (rc != 0) | |
998 | return rc; | |
999 | ||
1451f414 | 1000 | rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags); |
eaf4722d FH |
1001 | ddcb_cmd_cleanup(cfile, req); |
1002 | return rc; | |
1003 | } | |
1004 | ||
1005 | static int do_execute_ddcb(struct genwqe_file *cfile, | |
1006 | unsigned long arg, int raw) | |
1007 | { | |
1008 | int rc; | |
1009 | struct genwqe_ddcb_cmd *cmd; | |
1010 | struct ddcb_requ *req; | |
1011 | struct genwqe_dev *cd = cfile->cd; | |
1451f414 | 1012 | struct file *filp = cfile->filp; |
eaf4722d FH |
1013 | |
1014 | cmd = ddcb_requ_alloc(); | |
1015 | if (cmd == NULL) | |
1016 | return -ENOMEM; | |
1017 | ||
1018 | req = container_of(cmd, struct ddcb_requ, cmd); | |
1019 | ||
1020 | if (copy_from_user(cmd, (void __user *)arg, sizeof(*cmd))) { | |
eaf4722d FH |
1021 | ddcb_requ_free(cmd); |
1022 | return -EFAULT; | |
1023 | } | |
1024 | ||
1025 | if (!raw) | |
1026 | rc = genwqe_execute_ddcb(cfile, cmd); | |
1027 | else | |
1451f414 | 1028 | rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags); |
eaf4722d FH |
1029 | |
1030 | /* Copy back only the modifed fields. Do not copy ASIV | |
1031 | back since the copy got modified by the driver. */ | |
1032 | if (copy_to_user((void __user *)arg, cmd, | |
1033 | sizeof(*cmd) - DDCB_ASIV_LENGTH)) { | |
eaf4722d FH |
1034 | ddcb_requ_free(cmd); |
1035 | return -EFAULT; | |
1036 | } | |
1037 | ||
1038 | ddcb_requ_free(cmd); | |
1039 | return rc; | |
1040 | } | |
1041 | ||
1042 | /** | |
1043 | * genwqe_ioctl() - IO control | |
1044 | * @filp: file handle | |
1045 | * @cmd: command identifier (passed from user) | |
1046 | * @arg: argument (passed from user) | |
1047 | * | |
1048 | * Return: 0 success | |
1049 | */ | |
1050 | static long genwqe_ioctl(struct file *filp, unsigned int cmd, | |
1051 | unsigned long arg) | |
1052 | { | |
1053 | int rc = 0; | |
1054 | struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; | |
1055 | struct genwqe_dev *cd = cfile->cd; | |
fb145456 | 1056 | struct pci_dev *pci_dev = cd->pci_dev; |
eaf4722d FH |
1057 | struct genwqe_reg_io __user *io; |
1058 | u64 val; | |
1059 | u32 reg_offs; | |
eaf4722d | 1060 | |
fb145456 KSS |
1061 | /* Return -EIO if card hit EEH */ |
1062 | if (pci_channel_offline(pci_dev)) | |
1063 | return -EIO; | |
1064 | ||
d276b6c5 | 1065 | if (_IOC_TYPE(cmd) != GENWQE_IOC_CODE) |
eaf4722d | 1066 | return -EINVAL; |
eaf4722d FH |
1067 | |
1068 | switch (cmd) { | |
1069 | ||
1070 | case GENWQE_GET_CARD_STATE: | |
1071 | put_user(cd->card_state, (enum genwqe_card_state __user *)arg); | |
1072 | return 0; | |
1073 | ||
1074 | /* Register access */ | |
1075 | case GENWQE_READ_REG64: { | |
1076 | io = (struct genwqe_reg_io __user *)arg; | |
1077 | ||
d276b6c5 | 1078 | if (get_user(reg_offs, &io->num)) |
eaf4722d | 1079 | return -EFAULT; |
d276b6c5 | 1080 | |
eaf4722d FH |
1081 | if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7)) |
1082 | return -EINVAL; | |
1083 | ||
1084 | val = __genwqe_readq(cd, reg_offs); | |
1085 | put_user(val, &io->val64); | |
1086 | return 0; | |
1087 | } | |
1088 | ||
1089 | case GENWQE_WRITE_REG64: { | |
1090 | io = (struct genwqe_reg_io __user *)arg; | |
1091 | ||
1092 | if (!capable(CAP_SYS_ADMIN)) | |
1093 | return -EPERM; | |
1094 | ||
1095 | if ((filp->f_flags & O_ACCMODE) == O_RDONLY) | |
1096 | return -EPERM; | |
1097 | ||
d276b6c5 | 1098 | if (get_user(reg_offs, &io->num)) |
eaf4722d | 1099 | return -EFAULT; |
d276b6c5 | 1100 | |
eaf4722d FH |
1101 | if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7)) |
1102 | return -EINVAL; | |
1103 | ||
d276b6c5 | 1104 | if (get_user(val, &io->val64)) |
eaf4722d | 1105 | return -EFAULT; |
d276b6c5 | 1106 | |
eaf4722d FH |
1107 | __genwqe_writeq(cd, reg_offs, val); |
1108 | return 0; | |
1109 | } | |
1110 | ||
1111 | case GENWQE_READ_REG32: { | |
1112 | io = (struct genwqe_reg_io __user *)arg; | |
1113 | ||
d276b6c5 | 1114 | if (get_user(reg_offs, &io->num)) |
eaf4722d | 1115 | return -EFAULT; |
d276b6c5 | 1116 | |
eaf4722d FH |
1117 | if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3)) |
1118 | return -EINVAL; | |
1119 | ||
1120 | val = __genwqe_readl(cd, reg_offs); | |
1121 | put_user(val, &io->val64); | |
1122 | return 0; | |
1123 | } | |
1124 | ||
1125 | case GENWQE_WRITE_REG32: { | |
1126 | io = (struct genwqe_reg_io __user *)arg; | |
1127 | ||
1128 | if (!capable(CAP_SYS_ADMIN)) | |
1129 | return -EPERM; | |
1130 | ||
1131 | if ((filp->f_flags & O_ACCMODE) == O_RDONLY) | |
1132 | return -EPERM; | |
1133 | ||
d276b6c5 | 1134 | if (get_user(reg_offs, &io->num)) |
eaf4722d | 1135 | return -EFAULT; |
d276b6c5 | 1136 | |
eaf4722d FH |
1137 | if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3)) |
1138 | return -EINVAL; | |
1139 | ||
d276b6c5 | 1140 | if (get_user(val, &io->val64)) |
eaf4722d | 1141 | return -EFAULT; |
d276b6c5 | 1142 | |
eaf4722d FH |
1143 | __genwqe_writel(cd, reg_offs, val); |
1144 | return 0; | |
1145 | } | |
1146 | ||
1147 | /* Flash update/reading */ | |
1148 | case GENWQE_SLU_UPDATE: { | |
1149 | struct genwqe_bitstream load; | |
1150 | ||
1151 | if (!genwqe_is_privileged(cd)) | |
1152 | return -EPERM; | |
1153 | ||
1154 | if ((filp->f_flags & O_ACCMODE) == O_RDONLY) | |
1155 | return -EPERM; | |
1156 | ||
58d66ce7 | 1157 | if (copy_from_user(&load, (void __user *)arg, |
d276b6c5 | 1158 | sizeof(load))) |
eaf4722d | 1159 | return -EFAULT; |
d276b6c5 | 1160 | |
eaf4722d FH |
1161 | rc = do_flash_update(cfile, &load); |
1162 | ||
d276b6c5 | 1163 | if (copy_to_user((void __user *)arg, &load, sizeof(load))) |
eaf4722d | 1164 | return -EFAULT; |
d276b6c5 | 1165 | |
eaf4722d FH |
1166 | return rc; |
1167 | } | |
1168 | ||
1169 | case GENWQE_SLU_READ: { | |
1170 | struct genwqe_bitstream load; | |
1171 | ||
1172 | if (!genwqe_is_privileged(cd)) | |
1173 | return -EPERM; | |
1174 | ||
1175 | if (genwqe_flash_readback_fails(cd)) | |
1176 | return -ENOSPC; /* known to fail for old versions */ | |
1177 | ||
d276b6c5 | 1178 | if (copy_from_user(&load, (void __user *)arg, sizeof(load))) |
eaf4722d | 1179 | return -EFAULT; |
d276b6c5 | 1180 | |
eaf4722d FH |
1181 | rc = do_flash_read(cfile, &load); |
1182 | ||
d276b6c5 | 1183 | if (copy_to_user((void __user *)arg, &load, sizeof(load))) |
eaf4722d | 1184 | return -EFAULT; |
d276b6c5 | 1185 | |
eaf4722d FH |
1186 | return rc; |
1187 | } | |
1188 | ||
1189 | /* memory pinning and unpinning */ | |
1190 | case GENWQE_PIN_MEM: { | |
1191 | struct genwqe_mem m; | |
1192 | ||
d276b6c5 | 1193 | if (copy_from_user(&m, (void __user *)arg, sizeof(m))) |
eaf4722d | 1194 | return -EFAULT; |
d276b6c5 | 1195 | |
eaf4722d FH |
1196 | return genwqe_pin_mem(cfile, &m); |
1197 | } | |
1198 | ||
1199 | case GENWQE_UNPIN_MEM: { | |
1200 | struct genwqe_mem m; | |
1201 | ||
d276b6c5 | 1202 | if (copy_from_user(&m, (void __user *)arg, sizeof(m))) |
eaf4722d | 1203 | return -EFAULT; |
d276b6c5 | 1204 | |
eaf4722d FH |
1205 | return genwqe_unpin_mem(cfile, &m); |
1206 | } | |
1207 | ||
1208 | /* launch an DDCB and wait for completion */ | |
1209 | case GENWQE_EXECUTE_DDCB: | |
1210 | return do_execute_ddcb(cfile, arg, 0); | |
1211 | ||
1212 | case GENWQE_EXECUTE_RAW_DDCB: { | |
1213 | ||
d276b6c5 | 1214 | if (!capable(CAP_SYS_ADMIN)) |
eaf4722d | 1215 | return -EPERM; |
d276b6c5 | 1216 | |
eaf4722d FH |
1217 | return do_execute_ddcb(cfile, arg, 1); |
1218 | } | |
1219 | ||
1220 | default: | |
eaf4722d FH |
1221 | return -EINVAL; |
1222 | } | |
1223 | ||
1224 | return rc; | |
1225 | } | |
1226 | ||
1227 | #if defined(CONFIG_COMPAT) | |
1228 | /** | |
1229 | * genwqe_compat_ioctl() - Compatibility ioctl | |
1230 | * | |
1231 | * Called whenever a 32-bit process running under a 64-bit kernel | |
1232 | * performs an ioctl on /dev/genwqe<n>_card. | |
1233 | * | |
1234 | * @filp: file pointer. | |
1235 | * @cmd: command. | |
1236 | * @arg: user argument. | |
1237 | * Return: zero on success or negative number on failure. | |
1238 | */ | |
1239 | static long genwqe_compat_ioctl(struct file *filp, unsigned int cmd, | |
1240 | unsigned long arg) | |
1241 | { | |
1242 | return genwqe_ioctl(filp, cmd, arg); | |
1243 | } | |
1244 | #endif /* defined(CONFIG_COMPAT) */ | |
1245 | ||
1246 | static const struct file_operations genwqe_fops = { | |
1247 | .owner = THIS_MODULE, | |
1248 | .open = genwqe_open, | |
1249 | .fasync = genwqe_fasync, | |
1250 | .mmap = genwqe_mmap, | |
1251 | .unlocked_ioctl = genwqe_ioctl, | |
1252 | #if defined(CONFIG_COMPAT) | |
1253 | .compat_ioctl = genwqe_compat_ioctl, | |
1254 | #endif | |
1255 | .release = genwqe_release, | |
1256 | }; | |
1257 | ||
1258 | static int genwqe_device_initialized(struct genwqe_dev *cd) | |
1259 | { | |
1260 | return cd->dev != NULL; | |
1261 | } | |
1262 | ||
1263 | /** | |
1264 | * genwqe_device_create() - Create and configure genwqe char device | |
1265 | * @cd: genwqe device descriptor | |
1266 | * | |
1267 | * This function must be called before we create any more genwqe | |
1268 | * character devices, because it is allocating the major and minor | |
1269 | * number which are supposed to be used by the client drivers. | |
1270 | */ | |
1271 | int genwqe_device_create(struct genwqe_dev *cd) | |
1272 | { | |
1273 | int rc; | |
1274 | struct pci_dev *pci_dev = cd->pci_dev; | |
1275 | ||
1276 | /* | |
1277 | * Here starts the individual setup per client. It must | |
1278 | * initialize its own cdev data structure with its own fops. | |
1279 | * The appropriate devnum needs to be created. The ranges must | |
1280 | * not overlap. | |
1281 | */ | |
1282 | rc = alloc_chrdev_region(&cd->devnum_genwqe, 0, | |
1283 | GENWQE_MAX_MINOR, GENWQE_DEVNAME); | |
1284 | if (rc < 0) { | |
1285 | dev_err(&pci_dev->dev, "err: alloc_chrdev_region failed\n"); | |
1286 | goto err_dev; | |
1287 | } | |
1288 | ||
1289 | cdev_init(&cd->cdev_genwqe, &genwqe_fops); | |
1290 | cd->cdev_genwqe.owner = THIS_MODULE; | |
1291 | ||
1292 | rc = cdev_add(&cd->cdev_genwqe, cd->devnum_genwqe, 1); | |
1293 | if (rc < 0) { | |
1294 | dev_err(&pci_dev->dev, "err: cdev_add failed\n"); | |
1295 | goto err_add; | |
1296 | } | |
1297 | ||
1298 | /* | |
1299 | * Finally the device in /dev/... must be created. The rule is | |
1300 | * to use card%d_clientname for each created device. | |
1301 | */ | |
1302 | cd->dev = device_create_with_groups(cd->class_genwqe, | |
1303 | &cd->pci_dev->dev, | |
1304 | cd->devnum_genwqe, cd, | |
1305 | genwqe_attribute_groups, | |
1306 | GENWQE_DEVNAME "%u_card", | |
1307 | cd->card_idx); | |
634608f2 WY |
1308 | if (IS_ERR(cd->dev)) { |
1309 | rc = PTR_ERR(cd->dev); | |
eaf4722d FH |
1310 | goto err_cdev; |
1311 | } | |
1312 | ||
1313 | rc = genwqe_init_debugfs(cd); | |
1314 | if (rc != 0) | |
1315 | goto err_debugfs; | |
1316 | ||
1317 | return 0; | |
1318 | ||
1319 | err_debugfs: | |
1320 | device_destroy(cd->class_genwqe, cd->devnum_genwqe); | |
1321 | err_cdev: | |
1322 | cdev_del(&cd->cdev_genwqe); | |
1323 | err_add: | |
1324 | unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR); | |
1325 | err_dev: | |
1326 | cd->dev = NULL; | |
1327 | return rc; | |
1328 | } | |
1329 | ||
1330 | static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd) | |
1331 | { | |
1332 | int rc; | |
1333 | unsigned int i; | |
1334 | struct pci_dev *pci_dev = cd->pci_dev; | |
1335 | ||
1336 | if (!genwqe_open_files(cd)) | |
1337 | return 0; | |
1338 | ||
1339 | dev_warn(&pci_dev->dev, "[%s] send SIGIO and wait ...\n", __func__); | |
1340 | ||
1341 | rc = genwqe_kill_fasync(cd, SIGIO); | |
1342 | if (rc > 0) { | |
1343 | /* give kill_timeout seconds to close file descriptors ... */ | |
1344 | for (i = 0; (i < genwqe_kill_timeout) && | |
1345 | genwqe_open_files(cd); i++) { | |
1346 | dev_info(&pci_dev->dev, " %d sec ...", i); | |
1347 | ||
1348 | cond_resched(); | |
1349 | msleep(1000); | |
1350 | } | |
1351 | ||
1352 | /* if no open files we can safely continue, else ... */ | |
1353 | if (!genwqe_open_files(cd)) | |
1354 | return 0; | |
1355 | ||
1356 | dev_warn(&pci_dev->dev, | |
1357 | "[%s] send SIGKILL and wait ...\n", __func__); | |
1358 | ||
1359 | rc = genwqe_force_sig(cd, SIGKILL); /* force terminate */ | |
1360 | if (rc) { | |
1361 | /* Give kill_timout more seconds to end processes */ | |
1362 | for (i = 0; (i < genwqe_kill_timeout) && | |
1363 | genwqe_open_files(cd); i++) { | |
1364 | dev_warn(&pci_dev->dev, " %d sec ...", i); | |
1365 | ||
1366 | cond_resched(); | |
1367 | msleep(1000); | |
1368 | } | |
1369 | } | |
1370 | } | |
1371 | return 0; | |
1372 | } | |
1373 | ||
1374 | /** | |
1375 | * genwqe_device_remove() - Remove genwqe's char device | |
1376 | * | |
1377 | * This function must be called after the client devices are removed | |
1378 | * because it will free the major/minor number range for the genwqe | |
1379 | * drivers. | |
1380 | * | |
1381 | * This function must be robust enough to be called twice. | |
1382 | */ | |
1383 | int genwqe_device_remove(struct genwqe_dev *cd) | |
1384 | { | |
1385 | int rc; | |
1386 | struct pci_dev *pci_dev = cd->pci_dev; | |
1387 | ||
1388 | if (!genwqe_device_initialized(cd)) | |
1389 | return 1; | |
1390 | ||
1391 | genwqe_inform_and_stop_processes(cd); | |
1392 | ||
1393 | /* | |
1394 | * We currently do wait until all filedescriptors are | |
1395 | * closed. This leads to a problem when we abort the | |
1396 | * application which will decrease this reference from | |
1397 | * 1/unused to 0/illegal and not from 2/used 1/empty. | |
1398 | */ | |
1399 | rc = atomic_read(&cd->cdev_genwqe.kobj.kref.refcount); | |
1400 | if (rc != 1) { | |
1401 | dev_err(&pci_dev->dev, | |
1402 | "[%s] err: cdev_genwqe...refcount=%d\n", __func__, rc); | |
1403 | panic("Fatal err: cannot free resources with pending references!"); | |
1404 | } | |
1405 | ||
1406 | genqwe_exit_debugfs(cd); | |
1407 | device_destroy(cd->class_genwqe, cd->devnum_genwqe); | |
1408 | cdev_del(&cd->cdev_genwqe); | |
1409 | unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR); | |
1410 | cd->dev = NULL; | |
1411 | ||
1412 | return 0; | |
1413 | } |