Commit | Line | Data |
---|---|---|
e8de3701 AB |
1 | /* |
2 | * RapidIO mport character device | |
3 | * | |
4 | * Copyright 2014-2015 Integrated Device Technology, Inc. | |
5 | * Alexandre Bounine <alexandre.bounine@idt.com> | |
6 | * Copyright 2014-2015 Prodrive Technologies | |
7 | * Andre van Herk <andre.van.herk@prodrive-technologies.com> | |
8 | * Jerry Jacobs <jerry.jacobs@prodrive-technologies.com> | |
9 | * Copyright (C) 2014 Texas Instruments Incorporated | |
10 | * Aurelien Jacquiot <a-jacquiot@ti.com> | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or modify it | |
13 | * under the terms of the GNU General Public License as published by the | |
14 | * Free Software Foundation; either version 2 of the License, or (at your | |
15 | * option) any later version. | |
16 | */ | |
17 | #include <linux/module.h> | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/cdev.h> | |
20 | #include <linux/ioctl.h> | |
21 | #include <linux/uaccess.h> | |
22 | #include <linux/list.h> | |
23 | #include <linux/fs.h> | |
24 | #include <linux/err.h> | |
25 | #include <linux/net.h> | |
26 | #include <linux/poll.h> | |
27 | #include <linux/spinlock.h> | |
28 | #include <linux/sched.h> | |
29 | #include <linux/kfifo.h> | |
30 | ||
31 | #include <linux/mm.h> | |
32 | #include <linux/slab.h> | |
33 | #include <linux/vmalloc.h> | |
34 | #include <linux/mman.h> | |
35 | ||
36 | #include <linux/dma-mapping.h> | |
37 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | |
38 | #include <linux/dmaengine.h> | |
39 | #endif | |
40 | ||
41 | #include <linux/rio.h> | |
42 | #include <linux/rio_ids.h> | |
43 | #include <linux/rio_drv.h> | |
44 | #include <linux/rio_mport_cdev.h> | |
45 | ||
46 | #include "../rio.h" | |
47 | ||
48 | #define DRV_NAME "rio_mport" | |
49 | #define DRV_PREFIX DRV_NAME ": " | |
50 | #define DEV_NAME "rio_mport" | |
51 | #define DRV_VERSION "1.0.0" | |
52 | ||
53 | /* Debug output filtering masks */ | |
54 | enum { | |
55 | DBG_NONE = 0, | |
56 | DBG_INIT = BIT(0), /* driver init */ | |
57 | DBG_EXIT = BIT(1), /* driver exit */ | |
58 | DBG_MPORT = BIT(2), /* mport add/remove */ | |
59 | DBG_RDEV = BIT(3), /* RapidIO device add/remove */ | |
60 | DBG_DMA = BIT(4), /* DMA transfer messages */ | |
61 | DBG_MMAP = BIT(5), /* mapping messages */ | |
62 | DBG_IBW = BIT(6), /* inbound window */ | |
63 | DBG_EVENT = BIT(7), /* event handling messages */ | |
64 | DBG_OBW = BIT(8), /* outbound window messages */ | |
65 | DBG_DBELL = BIT(9), /* doorbell messages */ | |
66 | DBG_ALL = ~0, | |
67 | }; | |
68 | ||
69 | #ifdef DEBUG | |
70 | #define rmcd_debug(level, fmt, arg...) \ | |
71 | do { \ | |
72 | if (DBG_##level & dbg_level) \ | |
73 | pr_debug(DRV_PREFIX "%s: " fmt "\n", __func__, ##arg); \ | |
74 | } while (0) | |
75 | #else | |
76 | #define rmcd_debug(level, fmt, arg...) \ | |
77 | no_printk(KERN_DEBUG pr_fmt(DRV_PREFIX fmt "\n"), ##arg) | |
78 | #endif | |
79 | ||
80 | #define rmcd_warn(fmt, arg...) \ | |
81 | pr_warn(DRV_PREFIX "%s WARNING " fmt "\n", __func__, ##arg) | |
82 | ||
83 | #define rmcd_error(fmt, arg...) \ | |
84 | pr_err(DRV_PREFIX "%s ERROR " fmt "\n", __func__, ##arg) | |
85 | ||
86 | MODULE_AUTHOR("Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>"); | |
87 | MODULE_AUTHOR("Aurelien Jacquiot <a-jacquiot@ti.com>"); | |
88 | MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>"); | |
89 | MODULE_AUTHOR("Andre van Herk <andre.van.herk@prodrive-technologies.com>"); | |
90 | MODULE_DESCRIPTION("RapidIO mport character device driver"); | |
91 | MODULE_LICENSE("GPL"); | |
92 | MODULE_VERSION(DRV_VERSION); | |
93 | ||
94 | static int dma_timeout = 3000; /* DMA transfer timeout in msec */ | |
95 | module_param(dma_timeout, int, S_IRUGO); | |
96 | MODULE_PARM_DESC(dma_timeout, "DMA Transfer Timeout in msec (default: 3000)"); | |
97 | ||
98 | #ifdef DEBUG | |
99 | static u32 dbg_level = DBG_NONE; | |
100 | module_param(dbg_level, uint, S_IWUSR | S_IWGRP | S_IRUGO); | |
101 | MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); | |
102 | #endif | |
103 | ||
104 | /* | |
105 | * An internal DMA coherent buffer | |
106 | */ | |
107 | struct mport_dma_buf { | |
108 | void *ib_base; | |
109 | dma_addr_t ib_phys; | |
110 | u32 ib_size; | |
111 | u64 ib_rio_base; | |
112 | bool ib_map; | |
113 | struct file *filp; | |
114 | }; | |
115 | ||
116 | /* | |
117 | * Internal memory mapping structure | |
118 | */ | |
119 | enum rio_mport_map_dir { | |
120 | MAP_INBOUND, | |
121 | MAP_OUTBOUND, | |
122 | MAP_DMA, | |
123 | }; | |
124 | ||
125 | struct rio_mport_mapping { | |
126 | struct list_head node; | |
127 | struct mport_dev *md; | |
128 | enum rio_mport_map_dir dir; | |
4e1016da | 129 | u16 rioid; |
e8de3701 AB |
130 | u64 rio_addr; |
131 | dma_addr_t phys_addr; /* for mmap */ | |
132 | void *virt_addr; /* kernel address, for dma_free_coherent */ | |
133 | u64 size; | |
134 | struct kref ref; /* refcount of vmas sharing the mapping */ | |
135 | struct file *filp; | |
136 | }; | |
137 | ||
138 | struct rio_mport_dma_map { | |
139 | int valid; | |
4e1016da | 140 | u64 length; |
e8de3701 AB |
141 | void *vaddr; |
142 | dma_addr_t paddr; | |
143 | }; | |
144 | ||
145 | #define MPORT_MAX_DMA_BUFS 16 | |
146 | #define MPORT_EVENT_DEPTH 10 | |
147 | ||
148 | /* | |
149 | * mport_dev driver-specific structure that represents mport device | |
150 | * @active mport device status flag | |
151 | * @node list node to maintain list of registered mports | |
152 | * @cdev character device | |
153 | * @dev associated device object | |
154 | * @mport associated subsystem's master port device object | |
155 | * @buf_mutex lock for buffer handling | |
156 | * @file_mutex - lock for open files list | |
157 | * @file_list - list of open files on given mport | |
158 | * @properties properties of this mport | |
159 | * @portwrites queue of inbound portwrites | |
160 | * @pw_lock lock for port write queue | |
161 | * @mappings queue for memory mappings | |
162 | * @dma_chan DMA channels associated with this device | |
163 | * @dma_ref: | |
164 | * @comp: | |
165 | */ | |
166 | struct mport_dev { | |
167 | atomic_t active; | |
168 | struct list_head node; | |
169 | struct cdev cdev; | |
170 | struct device dev; | |
171 | struct rio_mport *mport; | |
172 | struct mutex buf_mutex; | |
173 | struct mutex file_mutex; | |
174 | struct list_head file_list; | |
175 | struct rio_mport_properties properties; | |
176 | struct list_head doorbells; | |
177 | spinlock_t db_lock; | |
178 | struct list_head portwrites; | |
179 | spinlock_t pw_lock; | |
180 | struct list_head mappings; | |
181 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | |
182 | struct dma_chan *dma_chan; | |
183 | struct kref dma_ref; | |
184 | struct completion comp; | |
185 | #endif | |
186 | }; | |
187 | ||
188 | /* | |
189 | * mport_cdev_priv - data structure specific to individual file object | |
190 | * associated with an open device | |
191 | * @md master port character device object | |
192 | * @async_queue - asynchronous notification queue | |
193 | * @list - file objects tracking list | |
194 | * @db_filters inbound doorbell filters for this descriptor | |
195 | * @pw_filters portwrite filters for this descriptor | |
196 | * @event_fifo event fifo for this descriptor | |
197 | * @event_rx_wait wait queue for this descriptor | |
198 | * @fifo_lock lock for event_fifo | |
199 | * @event_mask event mask for this descriptor | |
200 | * @dmach DMA engine channel allocated for specific file object | |
201 | */ | |
202 | struct mport_cdev_priv { | |
203 | struct mport_dev *md; | |
204 | struct fasync_struct *async_queue; | |
205 | struct list_head list; | |
206 | struct list_head db_filters; | |
207 | struct list_head pw_filters; | |
208 | struct kfifo event_fifo; | |
209 | wait_queue_head_t event_rx_wait; | |
210 | spinlock_t fifo_lock; | |
4e1016da | 211 | u32 event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */ |
e8de3701 AB |
212 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE |
213 | struct dma_chan *dmach; | |
214 | struct list_head async_list; | |
215 | struct list_head pend_list; | |
216 | spinlock_t req_lock; | |
217 | struct mutex dma_lock; | |
218 | struct kref dma_ref; | |
219 | struct completion comp; | |
220 | #endif | |
221 | }; | |
222 | ||
223 | /* | |
224 | * rio_mport_pw_filter - structure to describe a portwrite filter | |
225 | * md_node node in mport device's list | |
226 | * priv_node node in private file object's list | |
227 | * priv reference to private data | |
228 | * filter actual portwrite filter | |
229 | */ | |
230 | struct rio_mport_pw_filter { | |
231 | struct list_head md_node; | |
232 | struct list_head priv_node; | |
233 | struct mport_cdev_priv *priv; | |
234 | struct rio_pw_filter filter; | |
235 | }; | |
236 | ||
237 | /* | |
238 | * rio_mport_db_filter - structure to describe a doorbell filter | |
239 | * @data_node reference to device node | |
240 | * @priv_node node in private data | |
241 | * @priv reference to private data | |
242 | * @filter actual doorbell filter | |
243 | */ | |
244 | struct rio_mport_db_filter { | |
245 | struct list_head data_node; | |
246 | struct list_head priv_node; | |
247 | struct mport_cdev_priv *priv; | |
248 | struct rio_doorbell_filter filter; | |
249 | }; | |
250 | ||
251 | static LIST_HEAD(mport_devs); | |
252 | static DEFINE_MUTEX(mport_devs_lock); | |
253 | ||
254 | #if (0) /* used by commented out portion of poll function : FIXME */ | |
255 | static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait); | |
256 | #endif | |
257 | ||
258 | static struct class *dev_class; | |
259 | static dev_t dev_number; | |
260 | ||
261 | static struct workqueue_struct *dma_wq; | |
262 | ||
263 | static void mport_release_mapping(struct kref *ref); | |
264 | ||
265 | static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg, | |
266 | int local) | |
267 | { | |
268 | struct rio_mport *mport = priv->md->mport; | |
269 | struct rio_mport_maint_io maint_io; | |
270 | u32 *buffer; | |
271 | u32 offset; | |
272 | size_t length; | |
273 | int ret, i; | |
274 | ||
275 | if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io)))) | |
276 | return -EFAULT; | |
277 | ||
278 | if ((maint_io.offset % 4) || | |
4e1016da AB |
279 | (maint_io.length == 0) || (maint_io.length % 4) || |
280 | (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ) | |
e8de3701 AB |
281 | return -EINVAL; |
282 | ||
283 | buffer = vmalloc(maint_io.length); | |
284 | if (buffer == NULL) | |
285 | return -ENOMEM; | |
286 | length = maint_io.length/sizeof(u32); | |
287 | offset = maint_io.offset; | |
288 | ||
289 | for (i = 0; i < length; i++) { | |
290 | if (local) | |
291 | ret = __rio_local_read_config_32(mport, | |
292 | offset, &buffer[i]); | |
293 | else | |
294 | ret = rio_mport_read_config_32(mport, maint_io.rioid, | |
295 | maint_io.hopcount, offset, &buffer[i]); | |
296 | if (ret) | |
297 | goto out; | |
298 | ||
299 | offset += 4; | |
300 | } | |
301 | ||
4e1016da AB |
302 | if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer, |
303 | buffer, maint_io.length))) | |
e8de3701 AB |
304 | ret = -EFAULT; |
305 | out: | |
306 | vfree(buffer); | |
307 | return ret; | |
308 | } | |
309 | ||
310 | static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg, | |
311 | int local) | |
312 | { | |
313 | struct rio_mport *mport = priv->md->mport; | |
314 | struct rio_mport_maint_io maint_io; | |
315 | u32 *buffer; | |
316 | u32 offset; | |
317 | size_t length; | |
318 | int ret = -EINVAL, i; | |
319 | ||
320 | if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io)))) | |
321 | return -EFAULT; | |
322 | ||
323 | if ((maint_io.offset % 4) || | |
4e1016da AB |
324 | (maint_io.length == 0) || (maint_io.length % 4) || |
325 | (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ) | |
e8de3701 AB |
326 | return -EINVAL; |
327 | ||
328 | buffer = vmalloc(maint_io.length); | |
329 | if (buffer == NULL) | |
330 | return -ENOMEM; | |
331 | length = maint_io.length; | |
332 | ||
4e1016da AB |
333 | if (unlikely(copy_from_user(buffer, |
334 | (void __user *)(uintptr_t)maint_io.buffer, length))) { | |
e8de3701 AB |
335 | ret = -EFAULT; |
336 | goto out; | |
337 | } | |
338 | ||
339 | offset = maint_io.offset; | |
340 | length /= sizeof(u32); | |
341 | ||
342 | for (i = 0; i < length; i++) { | |
343 | if (local) | |
344 | ret = __rio_local_write_config_32(mport, | |
345 | offset, buffer[i]); | |
346 | else | |
347 | ret = rio_mport_write_config_32(mport, maint_io.rioid, | |
348 | maint_io.hopcount, | |
349 | offset, buffer[i]); | |
350 | if (ret) | |
351 | goto out; | |
352 | ||
353 | offset += 4; | |
354 | } | |
355 | ||
356 | out: | |
357 | vfree(buffer); | |
358 | return ret; | |
359 | } | |
360 | ||
361 | ||
362 | /* | |
363 | * Inbound/outbound memory mapping functions | |
364 | */ | |
365 | static int | |
366 | rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, | |
4e1016da | 367 | u16 rioid, u64 raddr, u32 size, |
e8de3701 AB |
368 | dma_addr_t *paddr) |
369 | { | |
370 | struct rio_mport *mport = md->mport; | |
371 | struct rio_mport_mapping *map; | |
372 | int ret; | |
373 | ||
374 | rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size); | |
375 | ||
4e1016da | 376 | map = kzalloc(sizeof(*map), GFP_KERNEL); |
e8de3701 AB |
377 | if (map == NULL) |
378 | return -ENOMEM; | |
379 | ||
380 | ret = rio_map_outb_region(mport, rioid, raddr, size, 0, paddr); | |
381 | if (ret < 0) | |
382 | goto err_map_outb; | |
383 | ||
384 | map->dir = MAP_OUTBOUND; | |
385 | map->rioid = rioid; | |
386 | map->rio_addr = raddr; | |
387 | map->size = size; | |
388 | map->phys_addr = *paddr; | |
389 | map->filp = filp; | |
390 | map->md = md; | |
391 | kref_init(&map->ref); | |
392 | list_add_tail(&map->node, &md->mappings); | |
393 | return 0; | |
394 | err_map_outb: | |
395 | kfree(map); | |
396 | return ret; | |
397 | } | |
398 | ||
399 | static int | |
400 | rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp, | |
4e1016da | 401 | u16 rioid, u64 raddr, u32 size, |
e8de3701 AB |
402 | dma_addr_t *paddr) |
403 | { | |
404 | struct rio_mport_mapping *map; | |
405 | int err = -ENOMEM; | |
406 | ||
407 | mutex_lock(&md->buf_mutex); | |
408 | list_for_each_entry(map, &md->mappings, node) { | |
409 | if (map->dir != MAP_OUTBOUND) | |
410 | continue; | |
411 | if (rioid == map->rioid && | |
412 | raddr == map->rio_addr && size == map->size) { | |
413 | *paddr = map->phys_addr; | |
414 | err = 0; | |
415 | break; | |
416 | } else if (rioid == map->rioid && | |
417 | raddr < (map->rio_addr + map->size - 1) && | |
418 | (raddr + size) > map->rio_addr) { | |
419 | err = -EBUSY; | |
420 | break; | |
421 | } | |
422 | } | |
423 | ||
424 | /* If not found, create new */ | |
425 | if (err == -ENOMEM) | |
426 | err = rio_mport_create_outbound_mapping(md, filp, rioid, raddr, | |
427 | size, paddr); | |
428 | mutex_unlock(&md->buf_mutex); | |
429 | return err; | |
430 | } | |
431 | ||
432 | static int rio_mport_obw_map(struct file *filp, void __user *arg) | |
433 | { | |
434 | struct mport_cdev_priv *priv = filp->private_data; | |
435 | struct mport_dev *data = priv->md; | |
436 | struct rio_mmap map; | |
437 | dma_addr_t paddr; | |
438 | int ret; | |
439 | ||
4e1016da | 440 | if (unlikely(copy_from_user(&map, arg, sizeof(map)))) |
e8de3701 AB |
441 | return -EFAULT; |
442 | ||
443 | rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx", | |
444 | map.rioid, map.rio_addr, map.length); | |
445 | ||
446 | ret = rio_mport_get_outbound_mapping(data, filp, map.rioid, | |
447 | map.rio_addr, map.length, &paddr); | |
448 | if (ret < 0) { | |
449 | rmcd_error("Failed to set OBW err= %d", ret); | |
450 | return ret; | |
451 | } | |
452 | ||
453 | map.handle = paddr; | |
454 | ||
4e1016da | 455 | if (unlikely(copy_to_user(arg, &map, sizeof(map)))) |
e8de3701 AB |
456 | return -EFAULT; |
457 | return 0; | |
458 | } | |
459 | ||
460 | /* | |
461 | * rio_mport_obw_free() - unmap an OutBound Window from RapidIO address space | |
462 | * | |
463 | * @priv: driver private data | |
464 | * @arg: buffer handle returned by allocation routine | |
465 | */ | |
466 | static int rio_mport_obw_free(struct file *filp, void __user *arg) | |
467 | { | |
468 | struct mport_cdev_priv *priv = filp->private_data; | |
469 | struct mport_dev *md = priv->md; | |
470 | u64 handle; | |
471 | struct rio_mport_mapping *map, *_map; | |
472 | ||
473 | if (!md->mport->ops->unmap_outb) | |
474 | return -EPROTONOSUPPORT; | |
475 | ||
4e1016da | 476 | if (copy_from_user(&handle, arg, sizeof(handle))) |
e8de3701 AB |
477 | return -EFAULT; |
478 | ||
479 | rmcd_debug(OBW, "h=0x%llx", handle); | |
480 | ||
481 | mutex_lock(&md->buf_mutex); | |
482 | list_for_each_entry_safe(map, _map, &md->mappings, node) { | |
483 | if (map->dir == MAP_OUTBOUND && map->phys_addr == handle) { | |
484 | if (map->filp == filp) { | |
485 | rmcd_debug(OBW, "kref_put h=0x%llx", handle); | |
486 | map->filp = NULL; | |
487 | kref_put(&map->ref, mport_release_mapping); | |
488 | } | |
489 | break; | |
490 | } | |
491 | } | |
492 | mutex_unlock(&md->buf_mutex); | |
493 | ||
494 | return 0; | |
495 | } | |
496 | ||
497 | /* | |
498 | * maint_hdid_set() - Set the host Device ID | |
499 | * @priv: driver private data | |
500 | * @arg: Device Id | |
501 | */ | |
502 | static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg) | |
503 | { | |
504 | struct mport_dev *md = priv->md; | |
4e1016da | 505 | u16 hdid; |
e8de3701 | 506 | |
4e1016da | 507 | if (copy_from_user(&hdid, arg, sizeof(hdid))) |
e8de3701 AB |
508 | return -EFAULT; |
509 | ||
510 | md->mport->host_deviceid = hdid; | |
511 | md->properties.hdid = hdid; | |
512 | rio_local_set_device_id(md->mport, hdid); | |
513 | ||
514 | rmcd_debug(MPORT, "Set host device Id to %d", hdid); | |
515 | ||
516 | return 0; | |
517 | } | |
518 | ||
519 | /* | |
520 | * maint_comptag_set() - Set the host Component Tag | |
521 | * @priv: driver private data | |
522 | * @arg: Component Tag | |
523 | */ | |
524 | static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg) | |
525 | { | |
526 | struct mport_dev *md = priv->md; | |
4e1016da | 527 | u32 comptag; |
e8de3701 | 528 | |
4e1016da | 529 | if (copy_from_user(&comptag, arg, sizeof(comptag))) |
e8de3701 AB |
530 | return -EFAULT; |
531 | ||
532 | rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag); | |
533 | ||
534 | rmcd_debug(MPORT, "Set host Component Tag to %d", comptag); | |
535 | ||
536 | return 0; | |
537 | } | |
538 | ||
539 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | |
540 | ||
541 | struct mport_dma_req { | |
542 | struct list_head node; | |
543 | struct file *filp; | |
544 | struct mport_cdev_priv *priv; | |
545 | enum rio_transfer_sync sync; | |
546 | struct sg_table sgt; | |
547 | struct page **page_list; | |
548 | unsigned int nr_pages; | |
549 | struct rio_mport_mapping *map; | |
550 | struct dma_chan *dmach; | |
551 | enum dma_data_direction dir; | |
552 | dma_cookie_t cookie; | |
553 | enum dma_status status; | |
554 | struct completion req_comp; | |
555 | }; | |
556 | ||
557 | struct mport_faf_work { | |
558 | struct work_struct work; | |
559 | struct mport_dma_req *req; | |
560 | }; | |
561 | ||
562 | static void mport_release_def_dma(struct kref *dma_ref) | |
563 | { | |
564 | struct mport_dev *md = | |
565 | container_of(dma_ref, struct mport_dev, dma_ref); | |
566 | ||
567 | rmcd_debug(EXIT, "DMA_%d", md->dma_chan->chan_id); | |
568 | rio_release_dma(md->dma_chan); | |
569 | md->dma_chan = NULL; | |
570 | } | |
571 | ||
572 | static void mport_release_dma(struct kref *dma_ref) | |
573 | { | |
574 | struct mport_cdev_priv *priv = | |
575 | container_of(dma_ref, struct mport_cdev_priv, dma_ref); | |
576 | ||
577 | rmcd_debug(EXIT, "DMA_%d", priv->dmach->chan_id); | |
578 | complete(&priv->comp); | |
579 | } | |
580 | ||
581 | static void dma_req_free(struct mport_dma_req *req) | |
582 | { | |
583 | struct mport_cdev_priv *priv = req->priv; | |
584 | unsigned int i; | |
585 | ||
586 | dma_unmap_sg(req->dmach->device->dev, | |
587 | req->sgt.sgl, req->sgt.nents, req->dir); | |
588 | sg_free_table(&req->sgt); | |
589 | if (req->page_list) { | |
590 | for (i = 0; i < req->nr_pages; i++) | |
591 | put_page(req->page_list[i]); | |
592 | kfree(req->page_list); | |
593 | } | |
594 | ||
595 | if (req->map) { | |
596 | mutex_lock(&req->map->md->buf_mutex); | |
597 | kref_put(&req->map->ref, mport_release_mapping); | |
598 | mutex_unlock(&req->map->md->buf_mutex); | |
599 | } | |
600 | ||
601 | kref_put(&priv->dma_ref, mport_release_dma); | |
602 | ||
603 | kfree(req); | |
604 | } | |
605 | ||
606 | static void dma_xfer_callback(void *param) | |
607 | { | |
608 | struct mport_dma_req *req = (struct mport_dma_req *)param; | |
609 | struct mport_cdev_priv *priv = req->priv; | |
610 | ||
611 | req->status = dma_async_is_tx_complete(priv->dmach, req->cookie, | |
612 | NULL, NULL); | |
613 | complete(&req->req_comp); | |
614 | } | |
615 | ||
616 | static void dma_faf_cleanup(struct work_struct *_work) | |
617 | { | |
618 | struct mport_faf_work *work = container_of(_work, | |
619 | struct mport_faf_work, work); | |
620 | struct mport_dma_req *req = work->req; | |
621 | ||
622 | dma_req_free(req); | |
623 | kfree(work); | |
624 | } | |
625 | ||
626 | static void dma_faf_callback(void *param) | |
627 | { | |
628 | struct mport_dma_req *req = (struct mport_dma_req *)param; | |
629 | struct mport_faf_work *work; | |
630 | ||
631 | work = kmalloc(sizeof(*work), GFP_ATOMIC); | |
632 | if (!work) | |
633 | return; | |
634 | ||
635 | INIT_WORK(&work->work, dma_faf_cleanup); | |
636 | work->req = req; | |
637 | queue_work(dma_wq, &work->work); | |
638 | } | |
639 | ||
640 | /* | |
641 | * prep_dma_xfer() - Configure and send request to DMAengine to prepare DMA | |
642 | * transfer object. | |
643 | * Returns pointer to DMA transaction descriptor allocated by DMA driver on | |
644 | * success or ERR_PTR (and/or NULL) if failed. Caller must check returned | |
645 | * non-NULL pointer using IS_ERR macro. | |
646 | */ | |
647 | static struct dma_async_tx_descriptor | |
648 | *prep_dma_xfer(struct dma_chan *chan, struct rio_transfer_io *transfer, | |
649 | struct sg_table *sgt, int nents, enum dma_transfer_direction dir, | |
650 | enum dma_ctrl_flags flags) | |
651 | { | |
652 | struct rio_dma_data tx_data; | |
653 | ||
654 | tx_data.sg = sgt->sgl; | |
655 | tx_data.sg_len = nents; | |
656 | tx_data.rio_addr_u = 0; | |
657 | tx_data.rio_addr = transfer->rio_addr; | |
658 | if (dir == DMA_MEM_TO_DEV) { | |
659 | switch (transfer->method) { | |
660 | case RIO_EXCHANGE_NWRITE: | |
661 | tx_data.wr_type = RDW_ALL_NWRITE; | |
662 | break; | |
663 | case RIO_EXCHANGE_NWRITE_R_ALL: | |
664 | tx_data.wr_type = RDW_ALL_NWRITE_R; | |
665 | break; | |
666 | case RIO_EXCHANGE_NWRITE_R: | |
667 | tx_data.wr_type = RDW_LAST_NWRITE_R; | |
668 | break; | |
669 | case RIO_EXCHANGE_DEFAULT: | |
670 | tx_data.wr_type = RDW_DEFAULT; | |
671 | break; | |
672 | default: | |
673 | return ERR_PTR(-EINVAL); | |
674 | } | |
675 | } | |
676 | ||
677 | return rio_dma_prep_xfer(chan, transfer->rioid, &tx_data, dir, flags); | |
678 | } | |
679 | ||
680 | /* Request DMA channel associated with this mport device. | |
681 | * Try to request DMA channel for every new process that opened given | |
682 | * mport. If a new DMA channel is not available use default channel | |
683 | * which is the first DMA channel opened on mport device. | |
684 | */ | |
685 | static int get_dma_channel(struct mport_cdev_priv *priv) | |
686 | { | |
687 | mutex_lock(&priv->dma_lock); | |
688 | if (!priv->dmach) { | |
689 | priv->dmach = rio_request_mport_dma(priv->md->mport); | |
690 | if (!priv->dmach) { | |
691 | /* Use default DMA channel if available */ | |
692 | if (priv->md->dma_chan) { | |
693 | priv->dmach = priv->md->dma_chan; | |
694 | kref_get(&priv->md->dma_ref); | |
695 | } else { | |
696 | rmcd_error("Failed to get DMA channel"); | |
697 | mutex_unlock(&priv->dma_lock); | |
698 | return -ENODEV; | |
699 | } | |
700 | } else if (!priv->md->dma_chan) { | |
701 | /* Register default DMA channel if we do not have one */ | |
702 | priv->md->dma_chan = priv->dmach; | |
703 | kref_init(&priv->md->dma_ref); | |
704 | rmcd_debug(DMA, "Register DMA_chan %d as default", | |
705 | priv->dmach->chan_id); | |
706 | } | |
707 | ||
708 | kref_init(&priv->dma_ref); | |
709 | init_completion(&priv->comp); | |
710 | } | |
711 | ||
712 | kref_get(&priv->dma_ref); | |
713 | mutex_unlock(&priv->dma_lock); | |
714 | return 0; | |
715 | } | |
716 | ||
717 | static void put_dma_channel(struct mport_cdev_priv *priv) | |
718 | { | |
719 | kref_put(&priv->dma_ref, mport_release_dma); | |
720 | } | |
721 | ||
722 | /* | |
723 | * DMA transfer functions | |
724 | */ | |
725 | static int do_dma_request(struct mport_dma_req *req, | |
726 | struct rio_transfer_io *xfer, | |
727 | enum rio_transfer_sync sync, int nents) | |
728 | { | |
729 | struct mport_cdev_priv *priv; | |
730 | struct sg_table *sgt; | |
731 | struct dma_chan *chan; | |
732 | struct dma_async_tx_descriptor *tx; | |
733 | dma_cookie_t cookie; | |
734 | unsigned long tmo = msecs_to_jiffies(dma_timeout); | |
735 | enum dma_transfer_direction dir; | |
736 | long wret; | |
737 | int ret = 0; | |
738 | ||
739 | priv = req->priv; | |
740 | sgt = &req->sgt; | |
741 | ||
742 | chan = priv->dmach; | |
743 | dir = (req->dir == DMA_FROM_DEVICE) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; | |
744 | ||
745 | rmcd_debug(DMA, "%s(%d) uses %s for DMA_%s", | |
746 | current->comm, task_pid_nr(current), | |
747 | dev_name(&chan->dev->device), | |
748 | (dir == DMA_DEV_TO_MEM)?"READ":"WRITE"); | |
749 | ||
750 | /* Initialize DMA transaction request */ | |
751 | tx = prep_dma_xfer(chan, xfer, sgt, nents, dir, | |
752 | DMA_CTRL_ACK | DMA_PREP_INTERRUPT); | |
753 | ||
754 | if (!tx) { | |
755 | rmcd_debug(DMA, "prep error for %s A:0x%llx L:0x%llx", | |
756 | (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", | |
757 | xfer->rio_addr, xfer->length); | |
758 | ret = -EIO; | |
759 | goto err_out; | |
760 | } else if (IS_ERR(tx)) { | |
761 | ret = PTR_ERR(tx); | |
762 | rmcd_debug(DMA, "prep error %d for %s A:0x%llx L:0x%llx", ret, | |
763 | (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", | |
764 | xfer->rio_addr, xfer->length); | |
765 | goto err_out; | |
766 | } | |
767 | ||
768 | if (sync == RIO_TRANSFER_FAF) | |
769 | tx->callback = dma_faf_callback; | |
770 | else | |
771 | tx->callback = dma_xfer_callback; | |
772 | tx->callback_param = req; | |
773 | ||
774 | req->dmach = chan; | |
775 | req->sync = sync; | |
776 | req->status = DMA_IN_PROGRESS; | |
777 | init_completion(&req->req_comp); | |
778 | ||
779 | cookie = dmaengine_submit(tx); | |
780 | req->cookie = cookie; | |
781 | ||
782 | rmcd_debug(DMA, "pid=%d DMA_%s tx_cookie = %d", task_pid_nr(current), | |
783 | (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie); | |
784 | ||
785 | if (dma_submit_error(cookie)) { | |
786 | rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)", | |
787 | cookie, xfer->rio_addr, xfer->length); | |
788 | ret = -EIO; | |
789 | goto err_out; | |
790 | } | |
791 | ||
792 | dma_async_issue_pending(chan); | |
793 | ||
794 | if (sync == RIO_TRANSFER_ASYNC) { | |
795 | spin_lock(&priv->req_lock); | |
796 | list_add_tail(&req->node, &priv->async_list); | |
797 | spin_unlock(&priv->req_lock); | |
798 | return cookie; | |
799 | } else if (sync == RIO_TRANSFER_FAF) | |
800 | return 0; | |
801 | ||
802 | wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo); | |
803 | ||
804 | if (wret == 0) { | |
805 | /* Timeout on wait occurred */ | |
806 | rmcd_error("%s(%d) timed out waiting for DMA_%s %d", | |
807 | current->comm, task_pid_nr(current), | |
808 | (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie); | |
809 | return -ETIMEDOUT; | |
810 | } else if (wret == -ERESTARTSYS) { | |
811 | /* Wait_for_completion was interrupted by a signal but DMA may | |
812 | * be in progress | |
813 | */ | |
814 | rmcd_error("%s(%d) wait for DMA_%s %d was interrupted", | |
815 | current->comm, task_pid_nr(current), | |
816 | (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie); | |
817 | return -EINTR; | |
818 | } | |
819 | ||
820 | if (req->status != DMA_COMPLETE) { | |
821 | /* DMA transaction completion was signaled with error */ | |
822 | rmcd_error("%s(%d) DMA_%s %d completed with status %d (ret=%d)", | |
823 | current->comm, task_pid_nr(current), | |
824 | (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", | |
825 | cookie, req->status, ret); | |
826 | ret = -EIO; | |
827 | } | |
828 | ||
829 | err_out: | |
830 | return ret; | |
831 | } | |
832 | ||
833 | /* | |
834 | * rio_dma_transfer() - Perform RapidIO DMA data transfer to/from | |
835 | * the remote RapidIO device | |
836 | * @filp: file pointer associated with the call | |
837 | * @transfer_mode: DMA transfer mode | |
838 | * @sync: synchronization mode | |
839 | * @dir: DMA transfer direction (DMA_MEM_TO_DEV = write OR | |
840 | * DMA_DEV_TO_MEM = read) | |
841 | * @xfer: data transfer descriptor structure | |
842 | */ | |
843 | static int | |
4e1016da | 844 | rio_dma_transfer(struct file *filp, u32 transfer_mode, |
e8de3701 AB |
845 | enum rio_transfer_sync sync, enum dma_data_direction dir, |
846 | struct rio_transfer_io *xfer) | |
847 | { | |
848 | struct mport_cdev_priv *priv = filp->private_data; | |
849 | unsigned long nr_pages = 0; | |
850 | struct page **page_list = NULL; | |
851 | struct mport_dma_req *req; | |
852 | struct mport_dev *md = priv->md; | |
853 | struct dma_chan *chan; | |
854 | int i, ret; | |
855 | int nents; | |
856 | ||
857 | if (xfer->length == 0) | |
858 | return -EINVAL; | |
859 | req = kzalloc(sizeof(*req), GFP_KERNEL); | |
860 | if (!req) | |
861 | return -ENOMEM; | |
862 | ||
863 | ret = get_dma_channel(priv); | |
864 | if (ret) { | |
865 | kfree(req); | |
866 | return ret; | |
867 | } | |
868 | ||
869 | /* | |
870 | * If parameter loc_addr != NULL, we are transferring data from/to | |
871 | * data buffer allocated in user-space: lock in memory user-space | |
872 | * buffer pages and build an SG table for DMA transfer request | |
873 | * | |
874 | * Otherwise (loc_addr == NULL) contiguous kernel-space buffer is | |
875 | * used for DMA data transfers: build single entry SG table using | |
876 | * offset within the internal buffer specified by handle parameter. | |
877 | */ | |
878 | if (xfer->loc_addr) { | |
879 | unsigned long offset; | |
880 | long pinned; | |
881 | ||
4e1016da | 882 | offset = (unsigned long)(uintptr_t)xfer->loc_addr & ~PAGE_MASK; |
e8de3701 AB |
883 | nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT; |
884 | ||
885 | page_list = kmalloc_array(nr_pages, | |
886 | sizeof(*page_list), GFP_KERNEL); | |
887 | if (page_list == NULL) { | |
888 | ret = -ENOMEM; | |
889 | goto err_req; | |
890 | } | |
891 | ||
892 | down_read(¤t->mm->mmap_sem); | |
cb107161 | 893 | pinned = get_user_pages( |
e8de3701 AB |
894 | (unsigned long)xfer->loc_addr & PAGE_MASK, |
895 | nr_pages, dir == DMA_FROM_DEVICE, 0, | |
896 | page_list, NULL); | |
897 | up_read(¤t->mm->mmap_sem); | |
898 | ||
899 | if (pinned != nr_pages) { | |
900 | if (pinned < 0) { | |
901 | rmcd_error("get_user_pages err=%ld", pinned); | |
902 | nr_pages = 0; | |
903 | } else | |
904 | rmcd_error("pinned %ld out of %ld pages", | |
905 | pinned, nr_pages); | |
906 | ret = -EFAULT; | |
907 | goto err_pg; | |
908 | } | |
909 | ||
910 | ret = sg_alloc_table_from_pages(&req->sgt, page_list, nr_pages, | |
911 | offset, xfer->length, GFP_KERNEL); | |
912 | if (ret) { | |
913 | rmcd_error("sg_alloc_table failed with err=%d", ret); | |
914 | goto err_pg; | |
915 | } | |
916 | ||
917 | req->page_list = page_list; | |
918 | req->nr_pages = nr_pages; | |
919 | } else { | |
920 | dma_addr_t baddr; | |
921 | struct rio_mport_mapping *map; | |
922 | ||
923 | baddr = (dma_addr_t)xfer->handle; | |
924 | ||
925 | mutex_lock(&md->buf_mutex); | |
926 | list_for_each_entry(map, &md->mappings, node) { | |
927 | if (baddr >= map->phys_addr && | |
928 | baddr < (map->phys_addr + map->size)) { | |
929 | kref_get(&map->ref); | |
930 | req->map = map; | |
931 | break; | |
932 | } | |
933 | } | |
934 | mutex_unlock(&md->buf_mutex); | |
935 | ||
936 | if (req->map == NULL) { | |
937 | ret = -ENOMEM; | |
938 | goto err_req; | |
939 | } | |
940 | ||
941 | if (xfer->length + xfer->offset > map->size) { | |
942 | ret = -EINVAL; | |
943 | goto err_req; | |
944 | } | |
945 | ||
946 | ret = sg_alloc_table(&req->sgt, 1, GFP_KERNEL); | |
947 | if (unlikely(ret)) { | |
948 | rmcd_error("sg_alloc_table failed for internal buf"); | |
949 | goto err_req; | |
950 | } | |
951 | ||
952 | sg_set_buf(req->sgt.sgl, | |
953 | map->virt_addr + (baddr - map->phys_addr) + | |
954 | xfer->offset, xfer->length); | |
955 | } | |
956 | ||
957 | req->dir = dir; | |
958 | req->filp = filp; | |
959 | req->priv = priv; | |
960 | chan = priv->dmach; | |
961 | ||
962 | nents = dma_map_sg(chan->device->dev, | |
963 | req->sgt.sgl, req->sgt.nents, dir); | |
964 | if (nents == -EFAULT) { | |
965 | rmcd_error("Failed to map SG list"); | |
966 | return -EFAULT; | |
967 | } | |
968 | ||
969 | ret = do_dma_request(req, xfer, sync, nents); | |
970 | ||
971 | if (ret >= 0) { | |
972 | if (sync == RIO_TRANSFER_SYNC) | |
973 | goto sync_out; | |
974 | return ret; /* return ASYNC cookie */ | |
975 | } | |
976 | ||
977 | if (ret == -ETIMEDOUT || ret == -EINTR) { | |
978 | /* | |
979 | * This can happen only in case of SYNC transfer. | |
980 | * Do not free unfinished request structure immediately. | |
981 | * Place it into pending list and deal with it later | |
982 | */ | |
983 | spin_lock(&priv->req_lock); | |
984 | list_add_tail(&req->node, &priv->pend_list); | |
985 | spin_unlock(&priv->req_lock); | |
986 | return ret; | |
987 | } | |
988 | ||
989 | ||
990 | rmcd_debug(DMA, "do_dma_request failed with err=%d", ret); | |
991 | sync_out: | |
992 | dma_unmap_sg(chan->device->dev, req->sgt.sgl, req->sgt.nents, dir); | |
993 | sg_free_table(&req->sgt); | |
994 | err_pg: | |
995 | if (page_list) { | |
996 | for (i = 0; i < nr_pages; i++) | |
997 | put_page(page_list[i]); | |
998 | kfree(page_list); | |
999 | } | |
1000 | err_req: | |
1001 | if (req->map) { | |
1002 | mutex_lock(&md->buf_mutex); | |
1003 | kref_put(&req->map->ref, mport_release_mapping); | |
1004 | mutex_unlock(&md->buf_mutex); | |
1005 | } | |
1006 | put_dma_channel(priv); | |
1007 | kfree(req); | |
1008 | return ret; | |
1009 | } | |
1010 | ||
1011 | static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg) | |
1012 | { | |
1013 | struct mport_cdev_priv *priv = filp->private_data; | |
1014 | struct rio_transaction transaction; | |
1015 | struct rio_transfer_io *transfer; | |
1016 | enum dma_data_direction dir; | |
1017 | int i, ret = 0; | |
1018 | ||
1019 | if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction)))) | |
1020 | return -EFAULT; | |
1021 | ||
4e1016da | 1022 | if (transaction.count != 1) /* only single transfer for now */ |
e8de3701 AB |
1023 | return -EINVAL; |
1024 | ||
1025 | if ((transaction.transfer_mode & | |
1026 | priv->md->properties.transfer_mode) == 0) | |
1027 | return -ENODEV; | |
1028 | ||
4e1016da | 1029 | transfer = vmalloc(transaction.count * sizeof(*transfer)); |
e8de3701 AB |
1030 | if (!transfer) |
1031 | return -ENOMEM; | |
1032 | ||
4e1016da AB |
1033 | if (unlikely(copy_from_user(transfer, |
1034 | (void __user *)(uintptr_t)transaction.block, | |
1035 | transaction.count * sizeof(*transfer)))) { | |
e8de3701 AB |
1036 | ret = -EFAULT; |
1037 | goto out_free; | |
1038 | } | |
1039 | ||
1040 | dir = (transaction.dir == RIO_TRANSFER_DIR_READ) ? | |
1041 | DMA_FROM_DEVICE : DMA_TO_DEVICE; | |
1042 | for (i = 0; i < transaction.count && ret == 0; i++) | |
1043 | ret = rio_dma_transfer(filp, transaction.transfer_mode, | |
1044 | transaction.sync, dir, &transfer[i]); | |
1045 | ||
4e1016da AB |
1046 | if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block, |
1047 | transfer, | |
1048 | transaction.count * sizeof(*transfer)))) | |
e8de3701 AB |
1049 | ret = -EFAULT; |
1050 | ||
1051 | out_free: | |
1052 | vfree(transfer); | |
1053 | ||
1054 | return ret; | |
1055 | } | |
1056 | ||
1057 | static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg) | |
1058 | { | |
1059 | struct mport_cdev_priv *priv; | |
1060 | struct mport_dev *md; | |
1061 | struct rio_async_tx_wait w_param; | |
1062 | struct mport_dma_req *req; | |
1063 | dma_cookie_t cookie; | |
1064 | unsigned long tmo; | |
1065 | long wret; | |
1066 | int found = 0; | |
1067 | int ret; | |
1068 | ||
1069 | priv = (struct mport_cdev_priv *)filp->private_data; | |
1070 | md = priv->md; | |
1071 | ||
1072 | if (unlikely(copy_from_user(&w_param, arg, sizeof(w_param)))) | |
1073 | return -EFAULT; | |
1074 | ||
1075 | cookie = w_param.token; | |
1076 | if (w_param.timeout) | |
1077 | tmo = msecs_to_jiffies(w_param.timeout); | |
1078 | else /* Use default DMA timeout */ | |
1079 | tmo = msecs_to_jiffies(dma_timeout); | |
1080 | ||
1081 | spin_lock(&priv->req_lock); | |
1082 | list_for_each_entry(req, &priv->async_list, node) { | |
1083 | if (req->cookie == cookie) { | |
1084 | list_del(&req->node); | |
1085 | found = 1; | |
1086 | break; | |
1087 | } | |
1088 | } | |
1089 | spin_unlock(&priv->req_lock); | |
1090 | ||
1091 | if (!found) | |
1092 | return -EAGAIN; | |
1093 | ||
1094 | wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo); | |
1095 | ||
1096 | if (wret == 0) { | |
1097 | /* Timeout on wait occurred */ | |
1098 | rmcd_error("%s(%d) timed out waiting for ASYNC DMA_%s", | |
1099 | current->comm, task_pid_nr(current), | |
1100 | (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE"); | |
1101 | ret = -ETIMEDOUT; | |
1102 | goto err_tmo; | |
1103 | } else if (wret == -ERESTARTSYS) { | |
1104 | /* Wait_for_completion was interrupted by a signal but DMA may | |
1105 | * be still in progress | |
1106 | */ | |
1107 | rmcd_error("%s(%d) wait for ASYNC DMA_%s was interrupted", | |
1108 | current->comm, task_pid_nr(current), | |
1109 | (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE"); | |
1110 | ret = -EINTR; | |
1111 | goto err_tmo; | |
1112 | } | |
1113 | ||
1114 | if (req->status != DMA_COMPLETE) { | |
1115 | /* DMA transaction completion signaled with transfer error */ | |
1116 | rmcd_error("%s(%d) ASYNC DMA_%s completion with status %d", | |
1117 | current->comm, task_pid_nr(current), | |
1118 | (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE", | |
1119 | req->status); | |
1120 | ret = -EIO; | |
1121 | } else | |
1122 | ret = 0; | |
1123 | ||
1124 | if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED) | |
1125 | dma_req_free(req); | |
1126 | ||
1127 | return ret; | |
1128 | ||
1129 | err_tmo: | |
1130 | /* Return request back into async queue */ | |
1131 | spin_lock(&priv->req_lock); | |
1132 | list_add_tail(&req->node, &priv->async_list); | |
1133 | spin_unlock(&priv->req_lock); | |
1134 | return ret; | |
1135 | } | |
1136 | ||
1137 | static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp, | |
4e1016da | 1138 | u64 size, struct rio_mport_mapping **mapping) |
e8de3701 AB |
1139 | { |
1140 | struct rio_mport_mapping *map; | |
1141 | ||
4e1016da | 1142 | map = kzalloc(sizeof(*map), GFP_KERNEL); |
e8de3701 AB |
1143 | if (map == NULL) |
1144 | return -ENOMEM; | |
1145 | ||
1146 | map->virt_addr = dma_alloc_coherent(md->mport->dev.parent, size, | |
1147 | &map->phys_addr, GFP_KERNEL); | |
1148 | if (map->virt_addr == NULL) { | |
1149 | kfree(map); | |
1150 | return -ENOMEM; | |
1151 | } | |
1152 | ||
1153 | map->dir = MAP_DMA; | |
1154 | map->size = size; | |
1155 | map->filp = filp; | |
1156 | map->md = md; | |
1157 | kref_init(&map->ref); | |
1158 | mutex_lock(&md->buf_mutex); | |
1159 | list_add_tail(&map->node, &md->mappings); | |
1160 | mutex_unlock(&md->buf_mutex); | |
1161 | *mapping = map; | |
1162 | ||
1163 | return 0; | |
1164 | } | |
1165 | ||
1166 | static int rio_mport_alloc_dma(struct file *filp, void __user *arg) | |
1167 | { | |
1168 | struct mport_cdev_priv *priv = filp->private_data; | |
1169 | struct mport_dev *md = priv->md; | |
1170 | struct rio_dma_mem map; | |
1171 | struct rio_mport_mapping *mapping = NULL; | |
1172 | int ret; | |
1173 | ||
4e1016da | 1174 | if (unlikely(copy_from_user(&map, arg, sizeof(map)))) |
e8de3701 AB |
1175 | return -EFAULT; |
1176 | ||
1177 | ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping); | |
1178 | if (ret) | |
1179 | return ret; | |
1180 | ||
1181 | map.dma_handle = mapping->phys_addr; | |
1182 | ||
4e1016da | 1183 | if (unlikely(copy_to_user(arg, &map, sizeof(map)))) { |
e8de3701 AB |
1184 | mutex_lock(&md->buf_mutex); |
1185 | kref_put(&mapping->ref, mport_release_mapping); | |
1186 | mutex_unlock(&md->buf_mutex); | |
1187 | return -EFAULT; | |
1188 | } | |
1189 | ||
1190 | return 0; | |
1191 | } | |
1192 | ||
1193 | static int rio_mport_free_dma(struct file *filp, void __user *arg) | |
1194 | { | |
1195 | struct mport_cdev_priv *priv = filp->private_data; | |
1196 | struct mport_dev *md = priv->md; | |
1197 | u64 handle; | |
1198 | int ret = -EFAULT; | |
1199 | struct rio_mport_mapping *map, *_map; | |
1200 | ||
4e1016da | 1201 | if (copy_from_user(&handle, arg, sizeof(handle))) |
e8de3701 AB |
1202 | return -EFAULT; |
1203 | rmcd_debug(EXIT, "filp=%p", filp); | |
1204 | ||
1205 | mutex_lock(&md->buf_mutex); | |
1206 | list_for_each_entry_safe(map, _map, &md->mappings, node) { | |
1207 | if (map->dir == MAP_DMA && map->phys_addr == handle && | |
1208 | map->filp == filp) { | |
1209 | kref_put(&map->ref, mport_release_mapping); | |
1210 | ret = 0; | |
1211 | break; | |
1212 | } | |
1213 | } | |
1214 | mutex_unlock(&md->buf_mutex); | |
1215 | ||
1216 | if (ret == -EFAULT) { | |
1217 | rmcd_debug(DMA, "ERR no matching mapping"); | |
1218 | return ret; | |
1219 | } | |
1220 | ||
1221 | return 0; | |
1222 | } | |
1223 | #else | |
1224 | static int rio_mport_transfer_ioctl(struct file *filp, void *arg) | |
1225 | { | |
1226 | return -ENODEV; | |
1227 | } | |
1228 | ||
1229 | static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg) | |
1230 | { | |
1231 | return -ENODEV; | |
1232 | } | |
1233 | ||
1234 | static int rio_mport_alloc_dma(struct file *filp, void __user *arg) | |
1235 | { | |
1236 | return -ENODEV; | |
1237 | } | |
1238 | ||
1239 | static int rio_mport_free_dma(struct file *filp, void __user *arg) | |
1240 | { | |
1241 | return -ENODEV; | |
1242 | } | |
1243 | #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ | |
1244 | ||
1245 | /* | |
1246 | * Inbound/outbound memory mapping functions | |
1247 | */ | |
1248 | ||
1249 | static int | |
1250 | rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp, | |
4e1016da | 1251 | u64 raddr, u64 size, |
e8de3701 AB |
1252 | struct rio_mport_mapping **mapping) |
1253 | { | |
1254 | struct rio_mport *mport = md->mport; | |
1255 | struct rio_mport_mapping *map; | |
1256 | int ret; | |
1257 | ||
4e1016da AB |
1258 | /* rio_map_inb_region() accepts u32 size */ |
1259 | if (size > 0xffffffff) | |
1260 | return -EINVAL; | |
1261 | ||
1262 | map = kzalloc(sizeof(*map), GFP_KERNEL); | |
e8de3701 AB |
1263 | if (map == NULL) |
1264 | return -ENOMEM; | |
1265 | ||
1266 | map->virt_addr = dma_alloc_coherent(mport->dev.parent, size, | |
1267 | &map->phys_addr, GFP_KERNEL); | |
1268 | if (map->virt_addr == NULL) { | |
1269 | ret = -ENOMEM; | |
1270 | goto err_dma_alloc; | |
1271 | } | |
1272 | ||
1273 | if (raddr == RIO_MAP_ANY_ADDR) | |
1274 | raddr = map->phys_addr; | |
4e1016da | 1275 | ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0); |
e8de3701 AB |
1276 | if (ret < 0) |
1277 | goto err_map_inb; | |
1278 | ||
1279 | map->dir = MAP_INBOUND; | |
1280 | map->rio_addr = raddr; | |
1281 | map->size = size; | |
1282 | map->filp = filp; | |
1283 | map->md = md; | |
1284 | kref_init(&map->ref); | |
1285 | mutex_lock(&md->buf_mutex); | |
1286 | list_add_tail(&map->node, &md->mappings); | |
1287 | mutex_unlock(&md->buf_mutex); | |
1288 | *mapping = map; | |
1289 | return 0; | |
1290 | ||
1291 | err_map_inb: | |
1292 | dma_free_coherent(mport->dev.parent, size, | |
1293 | map->virt_addr, map->phys_addr); | |
1294 | err_dma_alloc: | |
1295 | kfree(map); | |
1296 | return ret; | |
1297 | } | |
1298 | ||
1299 | static int | |
1300 | rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp, | |
4e1016da | 1301 | u64 raddr, u64 size, |
e8de3701 AB |
1302 | struct rio_mport_mapping **mapping) |
1303 | { | |
1304 | struct rio_mport_mapping *map; | |
1305 | int err = -ENOMEM; | |
1306 | ||
1307 | if (raddr == RIO_MAP_ANY_ADDR) | |
1308 | goto get_new; | |
1309 | ||
1310 | mutex_lock(&md->buf_mutex); | |
1311 | list_for_each_entry(map, &md->mappings, node) { | |
1312 | if (map->dir != MAP_INBOUND) | |
1313 | continue; | |
1314 | if (raddr == map->rio_addr && size == map->size) { | |
1315 | /* allow exact match only */ | |
1316 | *mapping = map; | |
1317 | err = 0; | |
1318 | break; | |
1319 | } else if (raddr < (map->rio_addr + map->size - 1) && | |
1320 | (raddr + size) > map->rio_addr) { | |
1321 | err = -EBUSY; | |
1322 | break; | |
1323 | } | |
1324 | } | |
1325 | mutex_unlock(&md->buf_mutex); | |
1326 | ||
1327 | if (err != -ENOMEM) | |
1328 | return err; | |
1329 | get_new: | |
1330 | /* not found, create new */ | |
1331 | return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping); | |
1332 | } | |
1333 | ||
1334 | static int rio_mport_map_inbound(struct file *filp, void __user *arg) | |
1335 | { | |
1336 | struct mport_cdev_priv *priv = filp->private_data; | |
1337 | struct mport_dev *md = priv->md; | |
1338 | struct rio_mmap map; | |
1339 | struct rio_mport_mapping *mapping = NULL; | |
1340 | int ret; | |
1341 | ||
1342 | if (!md->mport->ops->map_inb) | |
1343 | return -EPROTONOSUPPORT; | |
4e1016da | 1344 | if (unlikely(copy_from_user(&map, arg, sizeof(map)))) |
e8de3701 AB |
1345 | return -EFAULT; |
1346 | ||
1347 | rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); | |
1348 | ||
1349 | ret = rio_mport_get_inbound_mapping(md, filp, map.rio_addr, | |
1350 | map.length, &mapping); | |
1351 | if (ret) | |
1352 | return ret; | |
1353 | ||
1354 | map.handle = mapping->phys_addr; | |
1355 | map.rio_addr = mapping->rio_addr; | |
1356 | ||
4e1016da | 1357 | if (unlikely(copy_to_user(arg, &map, sizeof(map)))) { |
e8de3701 AB |
1358 | /* Delete mapping if it was created by this request */ |
1359 | if (ret == 0 && mapping->filp == filp) { | |
1360 | mutex_lock(&md->buf_mutex); | |
1361 | kref_put(&mapping->ref, mport_release_mapping); | |
1362 | mutex_unlock(&md->buf_mutex); | |
1363 | } | |
1364 | return -EFAULT; | |
1365 | } | |
1366 | ||
1367 | return 0; | |
1368 | } | |
1369 | ||
1370 | /* | |
1371 | * rio_mport_inbound_free() - unmap from RapidIO address space and free | |
1372 | * previously allocated inbound DMA coherent buffer | |
1373 | * @priv: driver private data | |
1374 | * @arg: buffer handle returned by allocation routine | |
1375 | */ | |
1376 | static int rio_mport_inbound_free(struct file *filp, void __user *arg) | |
1377 | { | |
1378 | struct mport_cdev_priv *priv = filp->private_data; | |
1379 | struct mport_dev *md = priv->md; | |
1380 | u64 handle; | |
1381 | struct rio_mport_mapping *map, *_map; | |
1382 | ||
1383 | rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); | |
1384 | ||
1385 | if (!md->mport->ops->unmap_inb) | |
1386 | return -EPROTONOSUPPORT; | |
1387 | ||
4e1016da | 1388 | if (copy_from_user(&handle, arg, sizeof(handle))) |
e8de3701 AB |
1389 | return -EFAULT; |
1390 | ||
1391 | mutex_lock(&md->buf_mutex); | |
1392 | list_for_each_entry_safe(map, _map, &md->mappings, node) { | |
1393 | if (map->dir == MAP_INBOUND && map->phys_addr == handle) { | |
1394 | if (map->filp == filp) { | |
1395 | map->filp = NULL; | |
1396 | kref_put(&map->ref, mport_release_mapping); | |
1397 | } | |
1398 | break; | |
1399 | } | |
1400 | } | |
1401 | mutex_unlock(&md->buf_mutex); | |
1402 | ||
1403 | return 0; | |
1404 | } | |
1405 | ||
1406 | /* | |
1407 | * maint_port_idx_get() - Get the port index of the mport instance | |
1408 | * @priv: driver private data | |
1409 | * @arg: port index | |
1410 | */ | |
1411 | static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg) | |
1412 | { | |
1413 | struct mport_dev *md = priv->md; | |
4e1016da | 1414 | u32 port_idx = md->mport->index; |
e8de3701 AB |
1415 | |
1416 | rmcd_debug(MPORT, "port_index=%d", port_idx); | |
1417 | ||
1418 | if (copy_to_user(arg, &port_idx, sizeof(port_idx))) | |
1419 | return -EFAULT; | |
1420 | ||
1421 | return 0; | |
1422 | } | |
1423 | ||
1424 | static int rio_mport_add_event(struct mport_cdev_priv *priv, | |
1425 | struct rio_event *event) | |
1426 | { | |
1427 | int overflow; | |
1428 | ||
1429 | if (!(priv->event_mask & event->header)) | |
1430 | return -EACCES; | |
1431 | ||
1432 | spin_lock(&priv->fifo_lock); | |
1433 | overflow = kfifo_avail(&priv->event_fifo) < sizeof(*event) | |
1434 | || kfifo_in(&priv->event_fifo, (unsigned char *)event, | |
1435 | sizeof(*event)) != sizeof(*event); | |
1436 | spin_unlock(&priv->fifo_lock); | |
1437 | ||
1438 | wake_up_interruptible(&priv->event_rx_wait); | |
1439 | ||
1440 | if (overflow) { | |
1441 | dev_warn(&priv->md->dev, DRV_NAME ": event fifo overflow\n"); | |
1442 | return -EBUSY; | |
1443 | } | |
1444 | ||
1445 | return 0; | |
1446 | } | |
1447 | ||
1448 | static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id, | |
1449 | u16 src, u16 dst, u16 info) | |
1450 | { | |
1451 | struct mport_dev *data = dev_id; | |
1452 | struct mport_cdev_priv *priv; | |
1453 | struct rio_mport_db_filter *db_filter; | |
1454 | struct rio_event event; | |
1455 | int handled; | |
1456 | ||
1457 | event.header = RIO_DOORBELL; | |
1458 | event.u.doorbell.rioid = src; | |
1459 | event.u.doorbell.payload = info; | |
1460 | ||
1461 | handled = 0; | |
1462 | spin_lock(&data->db_lock); | |
1463 | list_for_each_entry(db_filter, &data->doorbells, data_node) { | |
4e1016da | 1464 | if (((db_filter->filter.rioid == RIO_INVALID_DESTID || |
e8de3701 AB |
1465 | db_filter->filter.rioid == src)) && |
1466 | info >= db_filter->filter.low && | |
1467 | info <= db_filter->filter.high) { | |
1468 | priv = db_filter->priv; | |
1469 | rio_mport_add_event(priv, &event); | |
1470 | handled = 1; | |
1471 | } | |
1472 | } | |
1473 | spin_unlock(&data->db_lock); | |
1474 | ||
1475 | if (!handled) | |
1476 | dev_warn(&data->dev, | |
1477 | "%s: spurious DB received from 0x%x, info=0x%04x\n", | |
1478 | __func__, src, info); | |
1479 | } | |
1480 | ||
1481 | static int rio_mport_add_db_filter(struct mport_cdev_priv *priv, | |
1482 | void __user *arg) | |
1483 | { | |
1484 | struct mport_dev *md = priv->md; | |
1485 | struct rio_mport_db_filter *db_filter; | |
1486 | struct rio_doorbell_filter filter; | |
1487 | unsigned long flags; | |
1488 | int ret; | |
1489 | ||
1490 | if (copy_from_user(&filter, arg, sizeof(filter))) | |
1491 | return -EFAULT; | |
1492 | ||
1493 | if (filter.low > filter.high) | |
1494 | return -EINVAL; | |
1495 | ||
1496 | ret = rio_request_inb_dbell(md->mport, md, filter.low, filter.high, | |
1497 | rio_mport_doorbell_handler); | |
1498 | if (ret) { | |
1499 | rmcd_error("%s failed to register IBDB, err=%d", | |
1500 | dev_name(&md->dev), ret); | |
1501 | return ret; | |
1502 | } | |
1503 | ||
1504 | db_filter = kzalloc(sizeof(*db_filter), GFP_KERNEL); | |
1505 | if (db_filter == NULL) { | |
1506 | rio_release_inb_dbell(md->mport, filter.low, filter.high); | |
1507 | return -ENOMEM; | |
1508 | } | |
1509 | ||
1510 | db_filter->filter = filter; | |
1511 | db_filter->priv = priv; | |
1512 | spin_lock_irqsave(&md->db_lock, flags); | |
1513 | list_add_tail(&db_filter->priv_node, &priv->db_filters); | |
1514 | list_add_tail(&db_filter->data_node, &md->doorbells); | |
1515 | spin_unlock_irqrestore(&md->db_lock, flags); | |
1516 | ||
1517 | return 0; | |
1518 | } | |
1519 | ||
1520 | static void rio_mport_delete_db_filter(struct rio_mport_db_filter *db_filter) | |
1521 | { | |
1522 | list_del(&db_filter->data_node); | |
1523 | list_del(&db_filter->priv_node); | |
1524 | kfree(db_filter); | |
1525 | } | |
1526 | ||
1527 | static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv, | |
1528 | void __user *arg) | |
1529 | { | |
1530 | struct rio_mport_db_filter *db_filter; | |
1531 | struct rio_doorbell_filter filter; | |
1532 | unsigned long flags; | |
1533 | int ret = -EINVAL; | |
1534 | ||
1535 | if (copy_from_user(&filter, arg, sizeof(filter))) | |
1536 | return -EFAULT; | |
1537 | ||
4e1016da AB |
1538 | if (filter.low > filter.high) |
1539 | return -EINVAL; | |
1540 | ||
e8de3701 AB |
1541 | spin_lock_irqsave(&priv->md->db_lock, flags); |
1542 | list_for_each_entry(db_filter, &priv->db_filters, priv_node) { | |
1543 | if (db_filter->filter.rioid == filter.rioid && | |
1544 | db_filter->filter.low == filter.low && | |
1545 | db_filter->filter.high == filter.high) { | |
1546 | rio_mport_delete_db_filter(db_filter); | |
1547 | ret = 0; | |
1548 | break; | |
1549 | } | |
1550 | } | |
1551 | spin_unlock_irqrestore(&priv->md->db_lock, flags); | |
1552 | ||
1553 | if (!ret) | |
1554 | rio_release_inb_dbell(priv->md->mport, filter.low, filter.high); | |
1555 | ||
1556 | return ret; | |
1557 | } | |
1558 | ||
1559 | static int rio_mport_match_pw(union rio_pw_msg *msg, | |
1560 | struct rio_pw_filter *filter) | |
1561 | { | |
1562 | if ((msg->em.comptag & filter->mask) < filter->low || | |
1563 | (msg->em.comptag & filter->mask) > filter->high) | |
1564 | return 0; | |
1565 | return 1; | |
1566 | } | |
1567 | ||
1568 | static int rio_mport_pw_handler(struct rio_mport *mport, void *context, | |
1569 | union rio_pw_msg *msg, int step) | |
1570 | { | |
1571 | struct mport_dev *md = context; | |
1572 | struct mport_cdev_priv *priv; | |
1573 | struct rio_mport_pw_filter *pw_filter; | |
1574 | struct rio_event event; | |
1575 | int handled; | |
1576 | ||
1577 | event.header = RIO_PORTWRITE; | |
1578 | memcpy(event.u.portwrite.payload, msg->raw, RIO_PW_MSG_SIZE); | |
1579 | ||
1580 | handled = 0; | |
1581 | spin_lock(&md->pw_lock); | |
1582 | list_for_each_entry(pw_filter, &md->portwrites, md_node) { | |
1583 | if (rio_mport_match_pw(msg, &pw_filter->filter)) { | |
1584 | priv = pw_filter->priv; | |
1585 | rio_mport_add_event(priv, &event); | |
1586 | handled = 1; | |
1587 | } | |
1588 | } | |
1589 | spin_unlock(&md->pw_lock); | |
1590 | ||
1591 | if (!handled) { | |
1592 | printk_ratelimited(KERN_WARNING DRV_NAME | |
1593 | ": mport%d received spurious PW from 0x%08x\n", | |
1594 | mport->id, msg->em.comptag); | |
1595 | } | |
1596 | ||
1597 | return 0; | |
1598 | } | |
1599 | ||
1600 | static int rio_mport_add_pw_filter(struct mport_cdev_priv *priv, | |
1601 | void __user *arg) | |
1602 | { | |
1603 | struct mport_dev *md = priv->md; | |
1604 | struct rio_mport_pw_filter *pw_filter; | |
1605 | struct rio_pw_filter filter; | |
1606 | unsigned long flags; | |
1607 | int hadd = 0; | |
1608 | ||
1609 | if (copy_from_user(&filter, arg, sizeof(filter))) | |
1610 | return -EFAULT; | |
1611 | ||
1612 | pw_filter = kzalloc(sizeof(*pw_filter), GFP_KERNEL); | |
1613 | if (pw_filter == NULL) | |
1614 | return -ENOMEM; | |
1615 | ||
1616 | pw_filter->filter = filter; | |
1617 | pw_filter->priv = priv; | |
1618 | spin_lock_irqsave(&md->pw_lock, flags); | |
1619 | if (list_empty(&md->portwrites)) | |
1620 | hadd = 1; | |
1621 | list_add_tail(&pw_filter->priv_node, &priv->pw_filters); | |
1622 | list_add_tail(&pw_filter->md_node, &md->portwrites); | |
1623 | spin_unlock_irqrestore(&md->pw_lock, flags); | |
1624 | ||
1625 | if (hadd) { | |
1626 | int ret; | |
1627 | ||
1628 | ret = rio_add_mport_pw_handler(md->mport, md, | |
1629 | rio_mport_pw_handler); | |
1630 | if (ret) { | |
1631 | dev_err(&md->dev, | |
1632 | "%s: failed to add IB_PW handler, err=%d\n", | |
1633 | __func__, ret); | |
1634 | return ret; | |
1635 | } | |
1636 | rio_pw_enable(md->mport, 1); | |
1637 | } | |
1638 | ||
1639 | return 0; | |
1640 | } | |
1641 | ||
1642 | static void rio_mport_delete_pw_filter(struct rio_mport_pw_filter *pw_filter) | |
1643 | { | |
1644 | list_del(&pw_filter->md_node); | |
1645 | list_del(&pw_filter->priv_node); | |
1646 | kfree(pw_filter); | |
1647 | } | |
1648 | ||
1649 | static int rio_mport_match_pw_filter(struct rio_pw_filter *a, | |
1650 | struct rio_pw_filter *b) | |
1651 | { | |
1652 | if ((a->mask == b->mask) && (a->low == b->low) && (a->high == b->high)) | |
1653 | return 1; | |
1654 | return 0; | |
1655 | } | |
1656 | ||
1657 | static int rio_mport_remove_pw_filter(struct mport_cdev_priv *priv, | |
1658 | void __user *arg) | |
1659 | { | |
1660 | struct mport_dev *md = priv->md; | |
1661 | struct rio_mport_pw_filter *pw_filter; | |
1662 | struct rio_pw_filter filter; | |
1663 | unsigned long flags; | |
1664 | int ret = -EINVAL; | |
1665 | int hdel = 0; | |
1666 | ||
1667 | if (copy_from_user(&filter, arg, sizeof(filter))) | |
1668 | return -EFAULT; | |
1669 | ||
1670 | spin_lock_irqsave(&md->pw_lock, flags); | |
1671 | list_for_each_entry(pw_filter, &priv->pw_filters, priv_node) { | |
1672 | if (rio_mport_match_pw_filter(&pw_filter->filter, &filter)) { | |
1673 | rio_mport_delete_pw_filter(pw_filter); | |
1674 | ret = 0; | |
1675 | break; | |
1676 | } | |
1677 | } | |
1678 | ||
1679 | if (list_empty(&md->portwrites)) | |
1680 | hdel = 1; | |
1681 | spin_unlock_irqrestore(&md->pw_lock, flags); | |
1682 | ||
1683 | if (hdel) { | |
1684 | rio_del_mport_pw_handler(md->mport, priv->md, | |
1685 | rio_mport_pw_handler); | |
1686 | rio_pw_enable(md->mport, 0); | |
1687 | } | |
1688 | ||
1689 | return ret; | |
1690 | } | |
1691 | ||
1692 | /* | |
1693 | * rio_release_dev - release routine for kernel RIO device object | |
1694 | * @dev: kernel device object associated with a RIO device structure | |
1695 | * | |
1696 | * Frees a RIO device struct associated a RIO device struct. | |
1697 | * The RIO device struct is freed. | |
1698 | */ | |
1699 | static void rio_release_dev(struct device *dev) | |
1700 | { | |
1701 | struct rio_dev *rdev; | |
1702 | ||
1703 | rdev = to_rio_dev(dev); | |
1704 | pr_info(DRV_PREFIX "%s: %s\n", __func__, rio_name(rdev)); | |
1705 | kfree(rdev); | |
1706 | } | |
1707 | ||
1708 | ||
1709 | static void rio_release_net(struct device *dev) | |
1710 | { | |
1711 | struct rio_net *net; | |
1712 | ||
1713 | net = to_rio_net(dev); | |
1714 | rmcd_debug(RDEV, "net_%d", net->id); | |
1715 | kfree(net); | |
1716 | } | |
1717 | ||
1718 | ||
1719 | /* | |
1720 | * rio_mport_add_riodev - creates a kernel RIO device object | |
1721 | * | |
1722 | * Allocates a RIO device data structure and initializes required fields based | |
1723 | * on device's configuration space contents. | |
1724 | * If the device has switch capabilities, then a switch specific portion is | |
1725 | * allocated and configured. | |
1726 | */ | |
1727 | static int rio_mport_add_riodev(struct mport_cdev_priv *priv, | |
1728 | void __user *arg) | |
1729 | { | |
1730 | struct mport_dev *md = priv->md; | |
1731 | struct rio_rdev_info dev_info; | |
1732 | struct rio_dev *rdev; | |
1733 | struct rio_switch *rswitch = NULL; | |
1734 | struct rio_mport *mport; | |
1735 | size_t size; | |
1736 | u32 rval; | |
1737 | u32 swpinfo = 0; | |
1738 | u16 destid; | |
1739 | u8 hopcount; | |
1740 | int err; | |
1741 | ||
1742 | if (copy_from_user(&dev_info, arg, sizeof(dev_info))) | |
1743 | return -EFAULT; | |
1744 | ||
1745 | rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name, | |
1746 | dev_info.comptag, dev_info.destid, dev_info.hopcount); | |
1747 | ||
1748 | if (bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name)) { | |
1749 | rmcd_debug(RDEV, "device %s already exists", dev_info.name); | |
1750 | return -EEXIST; | |
1751 | } | |
1752 | ||
4e1016da | 1753 | size = sizeof(*rdev); |
e8de3701 | 1754 | mport = md->mport; |
4e1016da AB |
1755 | destid = dev_info.destid; |
1756 | hopcount = dev_info.hopcount; | |
e8de3701 AB |
1757 | |
1758 | if (rio_mport_read_config_32(mport, destid, hopcount, | |
1759 | RIO_PEF_CAR, &rval)) | |
1760 | return -EIO; | |
1761 | ||
1762 | if (rval & RIO_PEF_SWITCH) { | |
1763 | rio_mport_read_config_32(mport, destid, hopcount, | |
1764 | RIO_SWP_INFO_CAR, &swpinfo); | |
1765 | size += (RIO_GET_TOTAL_PORTS(swpinfo) * | |
1766 | sizeof(rswitch->nextdev[0])) + sizeof(*rswitch); | |
1767 | } | |
1768 | ||
1769 | rdev = kzalloc(size, GFP_KERNEL); | |
1770 | if (rdev == NULL) | |
1771 | return -ENOMEM; | |
1772 | ||
1773 | if (mport->net == NULL) { | |
1774 | struct rio_net *net; | |
1775 | ||
1776 | net = rio_alloc_net(mport); | |
1777 | if (!net) { | |
1778 | err = -ENOMEM; | |
1779 | rmcd_debug(RDEV, "failed to allocate net object"); | |
1780 | goto cleanup; | |
1781 | } | |
1782 | ||
1783 | net->id = mport->id; | |
1784 | net->hport = mport; | |
1785 | dev_set_name(&net->dev, "rnet_%d", net->id); | |
1786 | net->dev.parent = &mport->dev; | |
1787 | net->dev.release = rio_release_net; | |
1788 | err = rio_add_net(net); | |
1789 | if (err) { | |
1790 | rmcd_debug(RDEV, "failed to register net, err=%d", err); | |
1791 | kfree(net); | |
1792 | goto cleanup; | |
1793 | } | |
1794 | } | |
1795 | ||
1796 | rdev->net = mport->net; | |
1797 | rdev->pef = rval; | |
1798 | rdev->swpinfo = swpinfo; | |
1799 | rio_mport_read_config_32(mport, destid, hopcount, | |
1800 | RIO_DEV_ID_CAR, &rval); | |
1801 | rdev->did = rval >> 16; | |
1802 | rdev->vid = rval & 0xffff; | |
1803 | rio_mport_read_config_32(mport, destid, hopcount, RIO_DEV_INFO_CAR, | |
1804 | &rdev->device_rev); | |
1805 | rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_ID_CAR, | |
1806 | &rval); | |
1807 | rdev->asm_did = rval >> 16; | |
1808 | rdev->asm_vid = rval & 0xffff; | |
1809 | rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_INFO_CAR, | |
1810 | &rval); | |
1811 | rdev->asm_rev = rval >> 16; | |
1812 | ||
1813 | if (rdev->pef & RIO_PEF_EXT_FEATURES) { | |
1814 | rdev->efptr = rval & 0xffff; | |
1815 | rdev->phys_efptr = rio_mport_get_physefb(mport, 0, destid, | |
1816 | hopcount); | |
1817 | ||
1818 | rdev->em_efptr = rio_mport_get_feature(mport, 0, destid, | |
1819 | hopcount, RIO_EFB_ERR_MGMNT); | |
1820 | } | |
1821 | ||
1822 | rio_mport_read_config_32(mport, destid, hopcount, RIO_SRC_OPS_CAR, | |
1823 | &rdev->src_ops); | |
1824 | rio_mport_read_config_32(mport, destid, hopcount, RIO_DST_OPS_CAR, | |
1825 | &rdev->dst_ops); | |
1826 | ||
1827 | rdev->comp_tag = dev_info.comptag; | |
1828 | rdev->destid = destid; | |
1829 | /* hopcount is stored as specified by a caller, regardles of EP or SW */ | |
1830 | rdev->hopcount = hopcount; | |
1831 | ||
1832 | if (rdev->pef & RIO_PEF_SWITCH) { | |
1833 | rswitch = rdev->rswitch; | |
1834 | rswitch->route_table = NULL; | |
1835 | } | |
1836 | ||
1837 | if (strlen(dev_info.name)) | |
1838 | dev_set_name(&rdev->dev, "%s", dev_info.name); | |
1839 | else if (rdev->pef & RIO_PEF_SWITCH) | |
1840 | dev_set_name(&rdev->dev, "%02x:s:%04x", mport->id, | |
1841 | rdev->comp_tag & RIO_CTAG_UDEVID); | |
1842 | else | |
1843 | dev_set_name(&rdev->dev, "%02x:e:%04x", mport->id, | |
1844 | rdev->comp_tag & RIO_CTAG_UDEVID); | |
1845 | ||
1846 | INIT_LIST_HEAD(&rdev->net_list); | |
1847 | rdev->dev.parent = &mport->net->dev; | |
1848 | rio_attach_device(rdev); | |
1849 | rdev->dev.release = rio_release_dev; | |
1850 | ||
1851 | if (rdev->dst_ops & RIO_DST_OPS_DOORBELL) | |
1852 | rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE], | |
1853 | 0, 0xffff); | |
1854 | err = rio_add_device(rdev); | |
1855 | if (err) | |
1856 | goto cleanup; | |
1857 | rio_dev_get(rdev); | |
1858 | ||
1859 | return 0; | |
1860 | cleanup: | |
1861 | kfree(rdev); | |
1862 | return err; | |
1863 | } | |
1864 | ||
1865 | static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg) | |
1866 | { | |
1867 | struct rio_rdev_info dev_info; | |
1868 | struct rio_dev *rdev = NULL; | |
1869 | struct device *dev; | |
1870 | struct rio_mport *mport; | |
1871 | struct rio_net *net; | |
1872 | ||
1873 | if (copy_from_user(&dev_info, arg, sizeof(dev_info))) | |
1874 | return -EFAULT; | |
1875 | ||
1876 | mport = priv->md->mport; | |
1877 | ||
1878 | /* If device name is specified, removal by name has priority */ | |
1879 | if (strlen(dev_info.name)) { | |
1880 | dev = bus_find_device_by_name(&rio_bus_type, NULL, | |
1881 | dev_info.name); | |
1882 | if (dev) | |
1883 | rdev = to_rio_dev(dev); | |
1884 | } else { | |
1885 | do { | |
1886 | rdev = rio_get_comptag(dev_info.comptag, rdev); | |
1887 | if (rdev && rdev->dev.parent == &mport->net->dev && | |
4e1016da AB |
1888 | rdev->destid == dev_info.destid && |
1889 | rdev->hopcount == dev_info.hopcount) | |
e8de3701 AB |
1890 | break; |
1891 | } while (rdev); | |
1892 | } | |
1893 | ||
1894 | if (!rdev) { | |
1895 | rmcd_debug(RDEV, | |
1896 | "device name:%s ct:0x%x did:0x%x hc:0x%x not found", | |
1897 | dev_info.name, dev_info.comptag, dev_info.destid, | |
1898 | dev_info.hopcount); | |
1899 | return -ENODEV; | |
1900 | } | |
1901 | ||
1902 | net = rdev->net; | |
1903 | rio_dev_put(rdev); | |
1904 | rio_del_device(rdev, RIO_DEVICE_SHUTDOWN); | |
1905 | ||
1906 | if (list_empty(&net->devices)) { | |
1907 | rio_free_net(net); | |
1908 | mport->net = NULL; | |
1909 | } | |
1910 | ||
1911 | return 0; | |
1912 | } | |
1913 | ||
1914 | /* | |
1915 | * Mport cdev management | |
1916 | */ | |
1917 | ||
1918 | /* | |
1919 | * mport_cdev_open() - Open character device (mport) | |
1920 | */ | |
1921 | static int mport_cdev_open(struct inode *inode, struct file *filp) | |
1922 | { | |
1923 | int ret; | |
1924 | int minor = iminor(inode); | |
1925 | struct mport_dev *chdev; | |
1926 | struct mport_cdev_priv *priv; | |
1927 | ||
1928 | /* Test for valid device */ | |
1929 | if (minor >= RIO_MAX_MPORTS) { | |
1930 | rmcd_error("Invalid minor device number"); | |
1931 | return -EINVAL; | |
1932 | } | |
1933 | ||
1934 | chdev = container_of(inode->i_cdev, struct mport_dev, cdev); | |
1935 | ||
1936 | rmcd_debug(INIT, "%s filp=%p", dev_name(&chdev->dev), filp); | |
1937 | ||
1938 | if (atomic_read(&chdev->active) == 0) | |
1939 | return -ENODEV; | |
1940 | ||
1941 | get_device(&chdev->dev); | |
1942 | ||
1943 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | |
1944 | if (!priv) { | |
1945 | put_device(&chdev->dev); | |
1946 | return -ENOMEM; | |
1947 | } | |
1948 | ||
1949 | priv->md = chdev; | |
1950 | ||
1951 | mutex_lock(&chdev->file_mutex); | |
1952 | list_add_tail(&priv->list, &chdev->file_list); | |
1953 | mutex_unlock(&chdev->file_mutex); | |
1954 | ||
1955 | INIT_LIST_HEAD(&priv->db_filters); | |
1956 | INIT_LIST_HEAD(&priv->pw_filters); | |
1957 | spin_lock_init(&priv->fifo_lock); | |
1958 | init_waitqueue_head(&priv->event_rx_wait); | |
1959 | ret = kfifo_alloc(&priv->event_fifo, | |
1960 | sizeof(struct rio_event) * MPORT_EVENT_DEPTH, | |
1961 | GFP_KERNEL); | |
1962 | if (ret < 0) { | |
1963 | dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n"); | |
1964 | ret = -ENOMEM; | |
1965 | goto err_fifo; | |
1966 | } | |
1967 | ||
1968 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | |
1969 | INIT_LIST_HEAD(&priv->async_list); | |
1970 | INIT_LIST_HEAD(&priv->pend_list); | |
1971 | spin_lock_init(&priv->req_lock); | |
1972 | mutex_init(&priv->dma_lock); | |
1973 | #endif | |
1974 | ||
1975 | filp->private_data = priv; | |
1976 | goto out; | |
1977 | err_fifo: | |
1978 | kfree(priv); | |
1979 | out: | |
1980 | return ret; | |
1981 | } | |
1982 | ||
1983 | static int mport_cdev_fasync(int fd, struct file *filp, int mode) | |
1984 | { | |
1985 | struct mport_cdev_priv *priv = filp->private_data; | |
1986 | ||
1987 | return fasync_helper(fd, filp, mode, &priv->async_queue); | |
1988 | } | |
1989 | ||
1990 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | |
1991 | static void mport_cdev_release_dma(struct file *filp) | |
1992 | { | |
1993 | struct mport_cdev_priv *priv = filp->private_data; | |
1994 | struct mport_dev *md; | |
1995 | struct mport_dma_req *req, *req_next; | |
1996 | unsigned long tmo = msecs_to_jiffies(dma_timeout); | |
1997 | long wret; | |
1998 | LIST_HEAD(list); | |
1999 | ||
2000 | rmcd_debug(EXIT, "from filp=%p %s(%d)", | |
2001 | filp, current->comm, task_pid_nr(current)); | |
2002 | ||
2003 | if (!priv->dmach) { | |
2004 | rmcd_debug(EXIT, "No DMA channel for filp=%p", filp); | |
2005 | return; | |
2006 | } | |
2007 | ||
2008 | md = priv->md; | |
2009 | ||
2010 | flush_workqueue(dma_wq); | |
2011 | ||
2012 | spin_lock(&priv->req_lock); | |
2013 | if (!list_empty(&priv->async_list)) { | |
2014 | rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)", | |
2015 | filp, current->comm, task_pid_nr(current)); | |
2016 | list_splice_init(&priv->async_list, &list); | |
2017 | } | |
2018 | spin_unlock(&priv->req_lock); | |
2019 | ||
2020 | if (!list_empty(&list)) { | |
2021 | rmcd_debug(EXIT, "temp list not empty"); | |
2022 | list_for_each_entry_safe(req, req_next, &list, node) { | |
2023 | rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s", | |
2024 | req->filp, req->cookie, | |
2025 | completion_done(&req->req_comp)?"yes":"no"); | |
2026 | list_del(&req->node); | |
2027 | dma_req_free(req); | |
2028 | } | |
2029 | } | |
2030 | ||
2031 | if (!list_empty(&priv->pend_list)) { | |
2032 | rmcd_debug(EXIT, "Free pending DMA requests for filp=%p %s(%d)", | |
2033 | filp, current->comm, task_pid_nr(current)); | |
2034 | list_for_each_entry_safe(req, | |
2035 | req_next, &priv->pend_list, node) { | |
2036 | rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s", | |
2037 | req->filp, req->cookie, | |
2038 | completion_done(&req->req_comp)?"yes":"no"); | |
2039 | list_del(&req->node); | |
2040 | dma_req_free(req); | |
2041 | } | |
2042 | } | |
2043 | ||
2044 | put_dma_channel(priv); | |
2045 | wret = wait_for_completion_interruptible_timeout(&priv->comp, tmo); | |
2046 | ||
2047 | if (wret <= 0) { | |
2048 | rmcd_error("%s(%d) failed waiting for DMA release err=%ld", | |
2049 | current->comm, task_pid_nr(current), wret); | |
2050 | } | |
2051 | ||
2052 | spin_lock(&priv->req_lock); | |
2053 | ||
2054 | if (!list_empty(&priv->pend_list)) { | |
2055 | rmcd_debug(EXIT, "ATTN: pending DMA requests, filp=%p %s(%d)", | |
2056 | filp, current->comm, task_pid_nr(current)); | |
2057 | } | |
2058 | ||
2059 | spin_unlock(&priv->req_lock); | |
2060 | ||
2061 | if (priv->dmach != priv->md->dma_chan) { | |
2062 | rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)", | |
2063 | filp, current->comm, task_pid_nr(current)); | |
2064 | rio_release_dma(priv->dmach); | |
2065 | } else { | |
2066 | rmcd_debug(EXIT, "Adjust default DMA channel refcount"); | |
2067 | kref_put(&md->dma_ref, mport_release_def_dma); | |
2068 | } | |
2069 | ||
2070 | priv->dmach = NULL; | |
2071 | } | |
2072 | #else | |
2073 | #define mport_cdev_release_dma(priv) do {} while (0) | |
2074 | #endif | |
2075 | ||
2076 | /* | |
2077 | * mport_cdev_release() - Release character device | |
2078 | */ | |
2079 | static int mport_cdev_release(struct inode *inode, struct file *filp) | |
2080 | { | |
2081 | struct mport_cdev_priv *priv = filp->private_data; | |
2082 | struct mport_dev *chdev; | |
2083 | struct rio_mport_pw_filter *pw_filter, *pw_filter_next; | |
2084 | struct rio_mport_db_filter *db_filter, *db_filter_next; | |
2085 | struct rio_mport_mapping *map, *_map; | |
2086 | unsigned long flags; | |
2087 | ||
2088 | rmcd_debug(EXIT, "%s filp=%p", dev_name(&priv->md->dev), filp); | |
2089 | ||
2090 | chdev = priv->md; | |
2091 | mport_cdev_release_dma(filp); | |
2092 | ||
2093 | priv->event_mask = 0; | |
2094 | ||
2095 | spin_lock_irqsave(&chdev->pw_lock, flags); | |
2096 | if (!list_empty(&priv->pw_filters)) { | |
2097 | list_for_each_entry_safe(pw_filter, pw_filter_next, | |
2098 | &priv->pw_filters, priv_node) | |
2099 | rio_mport_delete_pw_filter(pw_filter); | |
2100 | } | |
2101 | spin_unlock_irqrestore(&chdev->pw_lock, flags); | |
2102 | ||
2103 | spin_lock_irqsave(&chdev->db_lock, flags); | |
2104 | list_for_each_entry_safe(db_filter, db_filter_next, | |
2105 | &priv->db_filters, priv_node) { | |
2106 | rio_mport_delete_db_filter(db_filter); | |
2107 | } | |
2108 | spin_unlock_irqrestore(&chdev->db_lock, flags); | |
2109 | ||
2110 | kfifo_free(&priv->event_fifo); | |
2111 | ||
2112 | mutex_lock(&chdev->buf_mutex); | |
2113 | list_for_each_entry_safe(map, _map, &chdev->mappings, node) { | |
2114 | if (map->filp == filp) { | |
2115 | rmcd_debug(EXIT, "release mapping %p filp=%p", | |
2116 | map->virt_addr, filp); | |
2117 | kref_put(&map->ref, mport_release_mapping); | |
2118 | } | |
2119 | } | |
2120 | mutex_unlock(&chdev->buf_mutex); | |
2121 | ||
2122 | mport_cdev_fasync(-1, filp, 0); | |
2123 | filp->private_data = NULL; | |
2124 | mutex_lock(&chdev->file_mutex); | |
2125 | list_del(&priv->list); | |
2126 | mutex_unlock(&chdev->file_mutex); | |
2127 | put_device(&chdev->dev); | |
2128 | kfree(priv); | |
2129 | return 0; | |
2130 | } | |
2131 | ||
2132 | /* | |
2133 | * mport_cdev_ioctl() - IOCTLs for character device | |
2134 | */ | |
2135 | static long mport_cdev_ioctl(struct file *filp, | |
2136 | unsigned int cmd, unsigned long arg) | |
2137 | { | |
2138 | int err = -EINVAL; | |
2139 | struct mport_cdev_priv *data = filp->private_data; | |
2140 | struct mport_dev *md = data->md; | |
2141 | ||
2142 | if (atomic_read(&md->active) == 0) | |
2143 | return -ENODEV; | |
2144 | ||
2145 | switch (cmd) { | |
2146 | case RIO_MPORT_MAINT_READ_LOCAL: | |
2147 | return rio_mport_maint_rd(data, (void __user *)arg, 1); | |
2148 | case RIO_MPORT_MAINT_WRITE_LOCAL: | |
2149 | return rio_mport_maint_wr(data, (void __user *)arg, 1); | |
2150 | case RIO_MPORT_MAINT_READ_REMOTE: | |
2151 | return rio_mport_maint_rd(data, (void __user *)arg, 0); | |
2152 | case RIO_MPORT_MAINT_WRITE_REMOTE: | |
2153 | return rio_mport_maint_wr(data, (void __user *)arg, 0); | |
2154 | case RIO_MPORT_MAINT_HDID_SET: | |
2155 | return maint_hdid_set(data, (void __user *)arg); | |
2156 | case RIO_MPORT_MAINT_COMPTAG_SET: | |
2157 | return maint_comptag_set(data, (void __user *)arg); | |
2158 | case RIO_MPORT_MAINT_PORT_IDX_GET: | |
2159 | return maint_port_idx_get(data, (void __user *)arg); | |
2160 | case RIO_MPORT_GET_PROPERTIES: | |
2161 | md->properties.hdid = md->mport->host_deviceid; | |
4e1016da AB |
2162 | if (copy_to_user((void __user *)arg, &(md->properties), |
2163 | sizeof(md->properties))) | |
e8de3701 AB |
2164 | return -EFAULT; |
2165 | return 0; | |
2166 | case RIO_ENABLE_DOORBELL_RANGE: | |
2167 | return rio_mport_add_db_filter(data, (void __user *)arg); | |
2168 | case RIO_DISABLE_DOORBELL_RANGE: | |
2169 | return rio_mport_remove_db_filter(data, (void __user *)arg); | |
2170 | case RIO_ENABLE_PORTWRITE_RANGE: | |
2171 | return rio_mport_add_pw_filter(data, (void __user *)arg); | |
2172 | case RIO_DISABLE_PORTWRITE_RANGE: | |
2173 | return rio_mport_remove_pw_filter(data, (void __user *)arg); | |
2174 | case RIO_SET_EVENT_MASK: | |
4e1016da | 2175 | data->event_mask = (u32)arg; |
e8de3701 AB |
2176 | return 0; |
2177 | case RIO_GET_EVENT_MASK: | |
2178 | if (copy_to_user((void __user *)arg, &data->event_mask, | |
4e1016da | 2179 | sizeof(u32))) |
e8de3701 AB |
2180 | return -EFAULT; |
2181 | return 0; | |
2182 | case RIO_MAP_OUTBOUND: | |
2183 | return rio_mport_obw_map(filp, (void __user *)arg); | |
2184 | case RIO_MAP_INBOUND: | |
2185 | return rio_mport_map_inbound(filp, (void __user *)arg); | |
2186 | case RIO_UNMAP_OUTBOUND: | |
2187 | return rio_mport_obw_free(filp, (void __user *)arg); | |
2188 | case RIO_UNMAP_INBOUND: | |
2189 | return rio_mport_inbound_free(filp, (void __user *)arg); | |
2190 | case RIO_ALLOC_DMA: | |
2191 | return rio_mport_alloc_dma(filp, (void __user *)arg); | |
2192 | case RIO_FREE_DMA: | |
2193 | return rio_mport_free_dma(filp, (void __user *)arg); | |
2194 | case RIO_WAIT_FOR_ASYNC: | |
2195 | return rio_mport_wait_for_async_dma(filp, (void __user *)arg); | |
2196 | case RIO_TRANSFER: | |
2197 | return rio_mport_transfer_ioctl(filp, (void __user *)arg); | |
2198 | case RIO_DEV_ADD: | |
2199 | return rio_mport_add_riodev(data, (void __user *)arg); | |
2200 | case RIO_DEV_DEL: | |
2201 | return rio_mport_del_riodev(data, (void __user *)arg); | |
2202 | default: | |
2203 | break; | |
2204 | } | |
2205 | ||
2206 | return err; | |
2207 | } | |
2208 | ||
2209 | /* | |
2210 | * mport_release_mapping - free mapping resources and info structure | |
2211 | * @ref: a pointer to the kref within struct rio_mport_mapping | |
2212 | * | |
2213 | * NOTE: Shall be called while holding buf_mutex. | |
2214 | */ | |
2215 | static void mport_release_mapping(struct kref *ref) | |
2216 | { | |
2217 | struct rio_mport_mapping *map = | |
2218 | container_of(ref, struct rio_mport_mapping, ref); | |
2219 | struct rio_mport *mport = map->md->mport; | |
2220 | ||
2221 | rmcd_debug(MMAP, "type %d mapping @ %p (phys = %pad) for %s", | |
2222 | map->dir, map->virt_addr, | |
2223 | &map->phys_addr, mport->name); | |
2224 | ||
2225 | list_del(&map->node); | |
2226 | ||
2227 | switch (map->dir) { | |
2228 | case MAP_INBOUND: | |
2229 | rio_unmap_inb_region(mport, map->phys_addr); | |
2230 | case MAP_DMA: | |
2231 | dma_free_coherent(mport->dev.parent, map->size, | |
2232 | map->virt_addr, map->phys_addr); | |
2233 | break; | |
2234 | case MAP_OUTBOUND: | |
2235 | rio_unmap_outb_region(mport, map->rioid, map->rio_addr); | |
2236 | break; | |
2237 | } | |
2238 | kfree(map); | |
2239 | } | |
2240 | ||
2241 | static void mport_mm_open(struct vm_area_struct *vma) | |
2242 | { | |
2243 | struct rio_mport_mapping *map = vma->vm_private_data; | |
2244 | ||
2245 | rmcd_debug(MMAP, "0x%pad", &map->phys_addr); | |
2246 | kref_get(&map->ref); | |
2247 | } | |
2248 | ||
2249 | static void mport_mm_close(struct vm_area_struct *vma) | |
2250 | { | |
2251 | struct rio_mport_mapping *map = vma->vm_private_data; | |
2252 | ||
2253 | rmcd_debug(MMAP, "0x%pad", &map->phys_addr); | |
2254 | mutex_lock(&map->md->buf_mutex); | |
2255 | kref_put(&map->ref, mport_release_mapping); | |
2256 | mutex_unlock(&map->md->buf_mutex); | |
2257 | } | |
2258 | ||
2259 | static const struct vm_operations_struct vm_ops = { | |
2260 | .open = mport_mm_open, | |
2261 | .close = mport_mm_close, | |
2262 | }; | |
2263 | ||
2264 | static int mport_cdev_mmap(struct file *filp, struct vm_area_struct *vma) | |
2265 | { | |
2266 | struct mport_cdev_priv *priv = filp->private_data; | |
2267 | struct mport_dev *md; | |
2268 | size_t size = vma->vm_end - vma->vm_start; | |
2269 | dma_addr_t baddr; | |
2270 | unsigned long offset; | |
2271 | int found = 0, ret; | |
2272 | struct rio_mport_mapping *map; | |
2273 | ||
2274 | rmcd_debug(MMAP, "0x%x bytes at offset 0x%lx", | |
2275 | (unsigned int)size, vma->vm_pgoff); | |
2276 | ||
2277 | md = priv->md; | |
2278 | baddr = ((dma_addr_t)vma->vm_pgoff << PAGE_SHIFT); | |
2279 | ||
2280 | mutex_lock(&md->buf_mutex); | |
2281 | list_for_each_entry(map, &md->mappings, node) { | |
2282 | if (baddr >= map->phys_addr && | |
2283 | baddr < (map->phys_addr + map->size)) { | |
2284 | found = 1; | |
2285 | break; | |
2286 | } | |
2287 | } | |
2288 | mutex_unlock(&md->buf_mutex); | |
2289 | ||
2290 | if (!found) | |
2291 | return -ENOMEM; | |
2292 | ||
2293 | offset = baddr - map->phys_addr; | |
2294 | ||
2295 | if (size + offset > map->size) | |
2296 | return -EINVAL; | |
2297 | ||
2298 | vma->vm_pgoff = offset >> PAGE_SHIFT; | |
2299 | rmcd_debug(MMAP, "MMAP adjusted offset = 0x%lx", vma->vm_pgoff); | |
2300 | ||
2301 | if (map->dir == MAP_INBOUND || map->dir == MAP_DMA) | |
2302 | ret = dma_mmap_coherent(md->mport->dev.parent, vma, | |
2303 | map->virt_addr, map->phys_addr, map->size); | |
2304 | else if (map->dir == MAP_OUTBOUND) { | |
2305 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | |
2306 | ret = vm_iomap_memory(vma, map->phys_addr, map->size); | |
2307 | } else { | |
2308 | rmcd_error("Attempt to mmap unsupported mapping type"); | |
2309 | ret = -EIO; | |
2310 | } | |
2311 | ||
2312 | if (!ret) { | |
2313 | vma->vm_private_data = map; | |
2314 | vma->vm_ops = &vm_ops; | |
2315 | mport_mm_open(vma); | |
2316 | } else { | |
2317 | rmcd_error("MMAP exit with err=%d", ret); | |
2318 | } | |
2319 | ||
2320 | return ret; | |
2321 | } | |
2322 | ||
2323 | static unsigned int mport_cdev_poll(struct file *filp, poll_table *wait) | |
2324 | { | |
2325 | struct mport_cdev_priv *priv = filp->private_data; | |
2326 | ||
2327 | poll_wait(filp, &priv->event_rx_wait, wait); | |
2328 | if (kfifo_len(&priv->event_fifo)) | |
2329 | return POLLIN | POLLRDNORM; | |
2330 | ||
2331 | return 0; | |
2332 | } | |
2333 | ||
2334 | static ssize_t mport_read(struct file *filp, char __user *buf, size_t count, | |
2335 | loff_t *ppos) | |
2336 | { | |
2337 | struct mport_cdev_priv *priv = filp->private_data; | |
2338 | int copied; | |
2339 | ssize_t ret; | |
2340 | ||
2341 | if (!count) | |
2342 | return 0; | |
2343 | ||
2344 | if (kfifo_is_empty(&priv->event_fifo) && | |
2345 | (filp->f_flags & O_NONBLOCK)) | |
2346 | return -EAGAIN; | |
2347 | ||
2348 | if (count % sizeof(struct rio_event)) | |
2349 | return -EINVAL; | |
2350 | ||
2351 | ret = wait_event_interruptible(priv->event_rx_wait, | |
2352 | kfifo_len(&priv->event_fifo) != 0); | |
2353 | if (ret) | |
2354 | return ret; | |
2355 | ||
2356 | while (ret < count) { | |
2357 | if (kfifo_to_user(&priv->event_fifo, buf, | |
2358 | sizeof(struct rio_event), &copied)) | |
2359 | return -EFAULT; | |
2360 | ret += copied; | |
2361 | buf += copied; | |
2362 | } | |
2363 | ||
2364 | return ret; | |
2365 | } | |
2366 | ||
2367 | static ssize_t mport_write(struct file *filp, const char __user *buf, | |
2368 | size_t count, loff_t *ppos) | |
2369 | { | |
2370 | struct mport_cdev_priv *priv = filp->private_data; | |
2371 | struct rio_mport *mport = priv->md->mport; | |
2372 | struct rio_event event; | |
2373 | int len, ret; | |
2374 | ||
2375 | if (!count) | |
2376 | return 0; | |
2377 | ||
2378 | if (count % sizeof(event)) | |
2379 | return -EINVAL; | |
2380 | ||
2381 | len = 0; | |
2382 | while ((count - len) >= (int)sizeof(event)) { | |
2383 | if (copy_from_user(&event, buf, sizeof(event))) | |
2384 | return -EFAULT; | |
2385 | ||
2386 | if (event.header != RIO_DOORBELL) | |
2387 | return -EINVAL; | |
2388 | ||
2389 | ret = rio_mport_send_doorbell(mport, | |
4e1016da | 2390 | event.u.doorbell.rioid, |
e8de3701 AB |
2391 | event.u.doorbell.payload); |
2392 | if (ret < 0) | |
2393 | return ret; | |
2394 | ||
2395 | len += sizeof(event); | |
2396 | buf += sizeof(event); | |
2397 | } | |
2398 | ||
2399 | return len; | |
2400 | } | |
2401 | ||
2402 | static const struct file_operations mport_fops = { | |
2403 | .owner = THIS_MODULE, | |
2404 | .open = mport_cdev_open, | |
2405 | .release = mport_cdev_release, | |
2406 | .poll = mport_cdev_poll, | |
2407 | .read = mport_read, | |
2408 | .write = mport_write, | |
2409 | .mmap = mport_cdev_mmap, | |
2410 | .fasync = mport_cdev_fasync, | |
2411 | .unlocked_ioctl = mport_cdev_ioctl | |
2412 | }; | |
2413 | ||
2414 | /* | |
2415 | * Character device management | |
2416 | */ | |
2417 | ||
2418 | static void mport_device_release(struct device *dev) | |
2419 | { | |
2420 | struct mport_dev *md; | |
2421 | ||
2422 | rmcd_debug(EXIT, "%s", dev_name(dev)); | |
2423 | md = container_of(dev, struct mport_dev, dev); | |
2424 | kfree(md); | |
2425 | } | |
2426 | ||
2427 | /* | |
2428 | * mport_cdev_add() - Create mport_dev from rio_mport | |
2429 | * @mport: RapidIO master port | |
2430 | */ | |
2431 | static struct mport_dev *mport_cdev_add(struct rio_mport *mport) | |
2432 | { | |
2433 | int ret = 0; | |
2434 | struct mport_dev *md; | |
2435 | struct rio_mport_attr attr; | |
2436 | ||
4e1016da | 2437 | md = kzalloc(sizeof(*md), GFP_KERNEL); |
e8de3701 AB |
2438 | if (!md) { |
2439 | rmcd_error("Unable allocate a device object"); | |
2440 | return NULL; | |
2441 | } | |
2442 | ||
2443 | md->mport = mport; | |
2444 | mutex_init(&md->buf_mutex); | |
2445 | mutex_init(&md->file_mutex); | |
2446 | INIT_LIST_HEAD(&md->file_list); | |
2447 | cdev_init(&md->cdev, &mport_fops); | |
2448 | md->cdev.owner = THIS_MODULE; | |
2449 | ret = cdev_add(&md->cdev, MKDEV(MAJOR(dev_number), mport->id), 1); | |
2450 | if (ret < 0) { | |
2451 | kfree(md); | |
2452 | rmcd_error("Unable to register a device, err=%d", ret); | |
2453 | return NULL; | |
2454 | } | |
2455 | ||
2456 | md->dev.devt = md->cdev.dev; | |
2457 | md->dev.class = dev_class; | |
2458 | md->dev.parent = &mport->dev; | |
2459 | md->dev.release = mport_device_release; | |
2460 | dev_set_name(&md->dev, DEV_NAME "%d", mport->id); | |
2461 | atomic_set(&md->active, 1); | |
2462 | ||
2463 | ret = device_register(&md->dev); | |
2464 | if (ret) { | |
2465 | rmcd_error("Failed to register mport %d (err=%d)", | |
2466 | mport->id, ret); | |
2467 | goto err_cdev; | |
2468 | } | |
2469 | ||
2470 | get_device(&md->dev); | |
2471 | ||
2472 | INIT_LIST_HEAD(&md->doorbells); | |
2473 | spin_lock_init(&md->db_lock); | |
2474 | INIT_LIST_HEAD(&md->portwrites); | |
2475 | spin_lock_init(&md->pw_lock); | |
2476 | INIT_LIST_HEAD(&md->mappings); | |
2477 | ||
2478 | md->properties.id = mport->id; | |
2479 | md->properties.sys_size = mport->sys_size; | |
2480 | md->properties.hdid = mport->host_deviceid; | |
2481 | md->properties.index = mport->index; | |
2482 | ||
2483 | /* The transfer_mode property will be returned through mport query | |
2484 | * interface | |
2485 | */ | |
4e1016da | 2486 | #ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */ |
e8de3701 AB |
2487 | md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED; |
2488 | #else | |
2489 | md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER; | |
2490 | #endif | |
2491 | ret = rio_query_mport(mport, &attr); | |
2492 | if (!ret) { | |
2493 | md->properties.flags = attr.flags; | |
2494 | md->properties.link_speed = attr.link_speed; | |
2495 | md->properties.link_width = attr.link_width; | |
2496 | md->properties.dma_max_sge = attr.dma_max_sge; | |
2497 | md->properties.dma_max_size = attr.dma_max_size; | |
2498 | md->properties.dma_align = attr.dma_align; | |
2499 | md->properties.cap_sys_size = 0; | |
2500 | md->properties.cap_transfer_mode = 0; | |
2501 | md->properties.cap_addr_size = 0; | |
2502 | } else | |
2503 | pr_info(DRV_PREFIX "Failed to obtain info for %s cdev(%d:%d)\n", | |
2504 | mport->name, MAJOR(dev_number), mport->id); | |
2505 | ||
2506 | mutex_lock(&mport_devs_lock); | |
2507 | list_add_tail(&md->node, &mport_devs); | |
2508 | mutex_unlock(&mport_devs_lock); | |
2509 | ||
2510 | pr_info(DRV_PREFIX "Added %s cdev(%d:%d)\n", | |
2511 | mport->name, MAJOR(dev_number), mport->id); | |
2512 | ||
2513 | return md; | |
2514 | ||
2515 | err_cdev: | |
2516 | cdev_del(&md->cdev); | |
2517 | kfree(md); | |
2518 | return NULL; | |
2519 | } | |
2520 | ||
2521 | /* | |
2522 | * mport_cdev_terminate_dma() - Stop all active DMA data transfers and release | |
2523 | * associated DMA channels. | |
2524 | */ | |
2525 | static void mport_cdev_terminate_dma(struct mport_dev *md) | |
2526 | { | |
2527 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | |
2528 | struct mport_cdev_priv *client; | |
2529 | ||
2530 | rmcd_debug(DMA, "%s", dev_name(&md->dev)); | |
2531 | ||
2532 | mutex_lock(&md->file_mutex); | |
2533 | list_for_each_entry(client, &md->file_list, list) { | |
2534 | if (client->dmach) { | |
2535 | dmaengine_terminate_all(client->dmach); | |
2536 | rio_release_dma(client->dmach); | |
2537 | } | |
2538 | } | |
2539 | mutex_unlock(&md->file_mutex); | |
2540 | ||
2541 | if (md->dma_chan) { | |
2542 | dmaengine_terminate_all(md->dma_chan); | |
2543 | rio_release_dma(md->dma_chan); | |
2544 | md->dma_chan = NULL; | |
2545 | } | |
2546 | #endif | |
2547 | } | |
2548 | ||
2549 | ||
2550 | /* | |
2551 | * mport_cdev_kill_fasync() - Send SIGIO signal to all processes with open | |
2552 | * mport_cdev files. | |
2553 | */ | |
2554 | static int mport_cdev_kill_fasync(struct mport_dev *md) | |
2555 | { | |
2556 | unsigned int files = 0; | |
2557 | struct mport_cdev_priv *client; | |
2558 | ||
2559 | mutex_lock(&md->file_mutex); | |
2560 | list_for_each_entry(client, &md->file_list, list) { | |
2561 | if (client->async_queue) | |
2562 | kill_fasync(&client->async_queue, SIGIO, POLL_HUP); | |
2563 | files++; | |
2564 | } | |
2565 | mutex_unlock(&md->file_mutex); | |
2566 | return files; | |
2567 | } | |
2568 | ||
2569 | /* | |
2570 | * mport_cdev_remove() - Remove mport character device | |
2571 | * @dev: Mport device to remove | |
2572 | */ | |
2573 | static void mport_cdev_remove(struct mport_dev *md) | |
2574 | { | |
2575 | struct rio_mport_mapping *map, *_map; | |
2576 | ||
2577 | rmcd_debug(EXIT, "Remove %s cdev", md->mport->name); | |
2578 | atomic_set(&md->active, 0); | |
2579 | mport_cdev_terminate_dma(md); | |
2580 | rio_del_mport_pw_handler(md->mport, md, rio_mport_pw_handler); | |
2581 | cdev_del(&(md->cdev)); | |
2582 | mport_cdev_kill_fasync(md); | |
2583 | ||
2584 | flush_workqueue(dma_wq); | |
2585 | ||
2586 | /* TODO: do we need to give clients some time to close file | |
2587 | * descriptors? Simple wait for XX, or kref? | |
2588 | */ | |
2589 | ||
2590 | /* | |
2591 | * Release DMA buffers allocated for the mport device. | |
2592 | * Disable associated inbound Rapidio requests mapping if applicable. | |
2593 | */ | |
2594 | mutex_lock(&md->buf_mutex); | |
2595 | list_for_each_entry_safe(map, _map, &md->mappings, node) { | |
2596 | kref_put(&map->ref, mport_release_mapping); | |
2597 | } | |
2598 | mutex_unlock(&md->buf_mutex); | |
2599 | ||
2600 | if (!list_empty(&md->mappings)) | |
2601 | rmcd_warn("WARNING: %s pending mappings on removal", | |
2602 | md->mport->name); | |
2603 | ||
2604 | rio_release_inb_dbell(md->mport, 0, 0x0fff); | |
2605 | ||
2606 | device_unregister(&md->dev); | |
2607 | put_device(&md->dev); | |
2608 | } | |
2609 | ||
2610 | /* | |
2611 | * RIO rio_mport_interface driver | |
2612 | */ | |
2613 | ||
2614 | /* | |
2615 | * mport_add_mport() - Add rio_mport from LDM device struct | |
2616 | * @dev: Linux device model struct | |
2617 | * @class_intf: Linux class_interface | |
2618 | */ | |
2619 | static int mport_add_mport(struct device *dev, | |
2620 | struct class_interface *class_intf) | |
2621 | { | |
2622 | struct rio_mport *mport = NULL; | |
2623 | struct mport_dev *chdev = NULL; | |
2624 | ||
2625 | mport = to_rio_mport(dev); | |
2626 | if (!mport) | |
2627 | return -ENODEV; | |
2628 | ||
2629 | chdev = mport_cdev_add(mport); | |
2630 | if (!chdev) | |
2631 | return -ENODEV; | |
2632 | ||
2633 | return 0; | |
2634 | } | |
2635 | ||
2636 | /* | |
2637 | * mport_remove_mport() - Remove rio_mport from global list | |
2638 | * TODO remove device from global mport_dev list | |
2639 | */ | |
2640 | static void mport_remove_mport(struct device *dev, | |
2641 | struct class_interface *class_intf) | |
2642 | { | |
2643 | struct rio_mport *mport = NULL; | |
2644 | struct mport_dev *chdev; | |
2645 | int found = 0; | |
2646 | ||
2647 | mport = to_rio_mport(dev); | |
2648 | rmcd_debug(EXIT, "Remove %s", mport->name); | |
2649 | ||
2650 | mutex_lock(&mport_devs_lock); | |
2651 | list_for_each_entry(chdev, &mport_devs, node) { | |
2652 | if (chdev->mport->id == mport->id) { | |
2653 | atomic_set(&chdev->active, 0); | |
2654 | list_del(&chdev->node); | |
2655 | found = 1; | |
2656 | break; | |
2657 | } | |
2658 | } | |
2659 | mutex_unlock(&mport_devs_lock); | |
2660 | ||
2661 | if (found) | |
2662 | mport_cdev_remove(chdev); | |
2663 | } | |
2664 | ||
2665 | /* the rio_mport_interface is used to handle local mport devices */ | |
2666 | static struct class_interface rio_mport_interface __refdata = { | |
2667 | .class = &rio_mport_class, | |
2668 | .add_dev = mport_add_mport, | |
2669 | .remove_dev = mport_remove_mport, | |
2670 | }; | |
2671 | ||
2672 | /* | |
2673 | * Linux kernel module | |
2674 | */ | |
2675 | ||
2676 | /* | |
2677 | * mport_init - Driver module loading | |
2678 | */ | |
2679 | static int __init mport_init(void) | |
2680 | { | |
2681 | int ret; | |
2682 | ||
2683 | /* Create device class needed by udev */ | |
2684 | dev_class = class_create(THIS_MODULE, DRV_NAME); | |
99f23c2c | 2685 | if (IS_ERR(dev_class)) { |
e8de3701 | 2686 | rmcd_error("Unable to create " DRV_NAME " class"); |
99f23c2c | 2687 | return PTR_ERR(dev_class); |
e8de3701 AB |
2688 | } |
2689 | ||
2690 | ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME); | |
2691 | if (ret < 0) | |
2692 | goto err_chr; | |
2693 | ||
2694 | rmcd_debug(INIT, "Registered class with major=%d", MAJOR(dev_number)); | |
2695 | ||
2696 | /* Register to rio_mport_interface */ | |
2697 | ret = class_interface_register(&rio_mport_interface); | |
2698 | if (ret) { | |
2699 | rmcd_error("class_interface_register() failed, err=%d", ret); | |
2700 | goto err_cli; | |
2701 | } | |
2702 | ||
2703 | dma_wq = create_singlethread_workqueue("dma_wq"); | |
2704 | if (!dma_wq) { | |
2705 | rmcd_error("failed to create DMA work queue"); | |
2706 | ret = -ENOMEM; | |
2707 | goto err_wq; | |
2708 | } | |
2709 | ||
2710 | return 0; | |
2711 | ||
2712 | err_wq: | |
2713 | class_interface_unregister(&rio_mport_interface); | |
2714 | err_cli: | |
2715 | unregister_chrdev_region(dev_number, RIO_MAX_MPORTS); | |
2716 | err_chr: | |
2717 | class_destroy(dev_class); | |
2718 | return ret; | |
2719 | } | |
2720 | ||
2721 | /** | |
2722 | * mport_exit - Driver module unloading | |
2723 | */ | |
2724 | static void __exit mport_exit(void) | |
2725 | { | |
2726 | class_interface_unregister(&rio_mport_interface); | |
2727 | class_destroy(dev_class); | |
2728 | unregister_chrdev_region(dev_number, RIO_MAX_MPORTS); | |
2729 | destroy_workqueue(dma_wq); | |
2730 | } | |
2731 | ||
2732 | module_init(mport_init); | |
2733 | module_exit(mport_exit); |