Commit | Line | Data |
---|---|---|
48bae050 EB |
1 | /* |
2 | * linux/drivers/misc/xillybus_core.c | |
3 | * | |
4 | * Copyright 2011 Xillybus Ltd, http://xillybus.com | |
5 | * | |
6 | * Driver for the Xillybus FPGA/host framework. | |
7 | * | |
8 | * This driver interfaces with a special IP core in an FPGA, setting up | |
9 | * a pipe between a hardware FIFO in the programmable logic and a device | |
10 | * file in the host. The number of such pipes and their attributes are | |
11 | * set up on the logic. This driver detects these automatically and | |
12 | * creates the device files accordingly. | |
13 | * | |
14 | * This program is free software; you can redistribute it and/or modify | |
15 | * it under the smems of the GNU General Public License as published by | |
16 | * the Free Software Foundation; version 2 of the License. | |
17 | */ | |
18 | ||
19 | #include <linux/list.h> | |
20 | #include <linux/device.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/io.h> | |
23 | #include <linux/dma-mapping.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/sched.h> | |
26 | #include <linux/fs.h> | |
27 | #include <linux/cdev.h> | |
28 | #include <linux/spinlock.h> | |
29 | #include <linux/mutex.h> | |
48bae050 EB |
30 | #include <linux/crc32.h> |
31 | #include <linux/poll.h> | |
32 | #include <linux/delay.h> | |
48bae050 EB |
33 | #include <linux/slab.h> |
34 | #include <linux/workqueue.h> | |
35 | #include "xillybus.h" | |
36 | ||
37 | MODULE_DESCRIPTION("Xillybus core functions"); | |
38 | MODULE_AUTHOR("Eli Billauer, Xillybus Ltd."); | |
39 | MODULE_VERSION("1.07"); | |
40 | MODULE_ALIAS("xillybus_core"); | |
41 | MODULE_LICENSE("GPL v2"); | |
42 | ||
43 | /* General timeout is 100 ms, rx timeout is 10 ms */ | |
44 | #define XILLY_RX_TIMEOUT (10*HZ/1000) | |
45 | #define XILLY_TIMEOUT (100*HZ/1000) | |
46 | ||
539889ee EB |
47 | #define fpga_msg_ctrl_reg 0x0008 |
48 | #define fpga_dma_control_reg 0x0020 | |
49 | #define fpga_dma_bufno_reg 0x0024 | |
50 | #define fpga_dma_bufaddr_lowaddr_reg 0x0028 | |
51 | #define fpga_dma_bufaddr_highaddr_reg 0x002c | |
52 | #define fpga_buf_ctrl_reg 0x0030 | |
53 | #define fpga_buf_offset_reg 0x0034 | |
54 | #define fpga_endian_reg 0x0040 | |
48bae050 EB |
55 | |
56 | #define XILLYMSG_OPCODE_RELEASEBUF 1 | |
57 | #define XILLYMSG_OPCODE_QUIESCEACK 2 | |
58 | #define XILLYMSG_OPCODE_FIFOEOF 3 | |
59 | #define XILLYMSG_OPCODE_FATAL_ERROR 4 | |
60 | #define XILLYMSG_OPCODE_NONEMPTY 5 | |
61 | ||
e71042f2 EB |
62 | static const char xillyname[] = "xillybus"; |
63 | ||
48bae050 EB |
64 | static struct class *xillybus_class; |
65 | ||
66 | /* | |
67 | * ep_list_lock is the last lock to be taken; No other lock requests are | |
68 | * allowed while holding it. It merely protects list_of_endpoints, and not | |
69 | * the endpoints listed in it. | |
70 | */ | |
71 | ||
72 | static LIST_HEAD(list_of_endpoints); | |
73 | static struct mutex ep_list_lock; | |
7ee9ded2 | 74 | static struct workqueue_struct *xillybus_wq; |
48bae050 EB |
75 | |
76 | /* | |
77 | * Locking scheme: Mutexes protect invocations of character device methods. | |
78 | * If both locks are taken, wr_mutex is taken first, rd_mutex second. | |
79 | * | |
80 | * wr_spinlock protects wr_*_buf_idx, wr_empty, wr_sleepy, wr_ready and the | |
81 | * buffers' end_offset fields against changes made by IRQ handler (and in | |
82 | * theory, other file request handlers, but the mutex handles that). Nothing | |
83 | * else. | |
84 | * They are held for short direct memory manipulations. Needless to say, | |
85 | * no mutex locking is allowed when a spinlock is held. | |
86 | * | |
87 | * rd_spinlock does the same with rd_*_buf_idx, rd_empty and end_offset. | |
88 | * | |
89 | * register_mutex is endpoint-specific, and is held when non-atomic | |
90 | * register operations are performed. wr_mutex and rd_mutex may be | |
91 | * held when register_mutex is taken, but none of the spinlocks. Note that | |
92 | * register_mutex doesn't protect against sporadic buf_ctrl_reg writes | |
93 | * which are unrelated to buf_offset_reg, since they are harmless. | |
94 | * | |
95 | * Blocking on the wait queues is allowed with mutexes held, but not with | |
96 | * spinlocks. | |
97 | * | |
98 | * Only interruptible blocking is allowed on mutexes and wait queues. | |
99 | * | |
100 | * All in all, the locking order goes (with skips allowed, of course): | |
101 | * wr_mutex -> rd_mutex -> register_mutex -> wr_spinlock -> rd_spinlock | |
102 | */ | |
103 | ||
35fcf7e3 | 104 | static void malformed_message(struct xilly_endpoint *endpoint, u32 *buf) |
48bae050 EB |
105 | { |
106 | int opcode; | |
107 | int msg_channel, msg_bufno, msg_data, msg_dir; | |
108 | ||
109 | opcode = (buf[0] >> 24) & 0xff; | |
110 | msg_dir = buf[0] & 1; | |
111 | msg_channel = (buf[0] >> 1) & 0x7ff; | |
112 | msg_bufno = (buf[0] >> 12) & 0x3ff; | |
113 | msg_data = buf[1] & 0xfffffff; | |
114 | ||
35fcf7e3 EB |
115 | dev_warn(endpoint->dev, |
116 | "Malformed message (skipping): opcode=%d, channel=%03x, dir=%d, bufno=%03x, data=%07x\n", | |
117 | opcode, msg_channel, msg_dir, msg_bufno, msg_data); | |
48bae050 EB |
118 | } |
119 | ||
120 | /* | |
121 | * xillybus_isr assumes the interrupt is allocated exclusively to it, | |
122 | * which is the natural case MSI and several other hardware-oriented | |
123 | * interrupts. Sharing is not allowed. | |
124 | */ | |
125 | ||
126 | irqreturn_t xillybus_isr(int irq, void *data) | |
127 | { | |
128 | struct xilly_endpoint *ep = data; | |
129 | u32 *buf; | |
130 | unsigned int buf_size; | |
131 | int i; | |
132 | int opcode; | |
133 | unsigned int msg_channel, msg_bufno, msg_data, msg_dir; | |
134 | struct xilly_channel *channel; | |
135 | ||
48bae050 EB |
136 | buf = ep->msgbuf_addr; |
137 | buf_size = ep->msg_buf_size/sizeof(u32); | |
138 | ||
7ee9ded2 | 139 | ep->ephw->hw_sync_sgl_for_cpu(ep, |
48bae050 EB |
140 | ep->msgbuf_dma_addr, |
141 | ep->msg_buf_size, | |
142 | DMA_FROM_DEVICE); | |
143 | ||
c14cc622 | 144 | for (i = 0; i < buf_size; i += 2) { |
48bae050 | 145 | if (((buf[i+1] >> 28) & 0xf) != ep->msg_counter) { |
35fcf7e3 EB |
146 | malformed_message(ep, &buf[i]); |
147 | dev_warn(ep->dev, | |
148 | "Sending a NACK on counter %x (instead of %x) on entry %d\n", | |
91a2dea8 EB |
149 | ((buf[i+1] >> 28) & 0xf), |
150 | ep->msg_counter, | |
151 | i/2); | |
48bae050 | 152 | |
84590b1a | 153 | if (++ep->failed_messages > 10) { |
35fcf7e3 EB |
154 | dev_err(ep->dev, |
155 | "Lost sync with interrupt messages. Stopping.\n"); | |
84590b1a | 156 | } else { |
7ee9ded2 | 157 | ep->ephw->hw_sync_sgl_for_device( |
48bae050 EB |
158 | ep, |
159 | ep->msgbuf_dma_addr, | |
160 | ep->msg_buf_size, | |
161 | DMA_FROM_DEVICE); | |
162 | ||
163 | iowrite32(0x01, /* Message NACK */ | |
539889ee | 164 | ep->registers + fpga_msg_ctrl_reg); |
48bae050 EB |
165 | } |
166 | return IRQ_HANDLED; | |
167 | } else if (buf[i] & (1 << 22)) /* Last message */ | |
168 | break; | |
c14cc622 | 169 | } |
48bae050 EB |
170 | |
171 | if (i >= buf_size) { | |
35fcf7e3 | 172 | dev_err(ep->dev, "Bad interrupt message. Stopping.\n"); |
48bae050 EB |
173 | return IRQ_HANDLED; |
174 | } | |
175 | ||
1af1ea6b | 176 | buf_size = i + 2; |
48bae050 | 177 | |
1af1ea6b | 178 | for (i = 0; i < buf_size; i += 2) { /* Scan through messages */ |
48bae050 EB |
179 | opcode = (buf[i] >> 24) & 0xff; |
180 | ||
181 | msg_dir = buf[i] & 1; | |
182 | msg_channel = (buf[i] >> 1) & 0x7ff; | |
183 | msg_bufno = (buf[i] >> 12) & 0x3ff; | |
184 | msg_data = buf[i+1] & 0xfffffff; | |
185 | ||
186 | switch (opcode) { | |
187 | case XILLYMSG_OPCODE_RELEASEBUF: | |
48bae050 EB |
188 | if ((msg_channel > ep->num_channels) || |
189 | (msg_channel == 0)) { | |
35fcf7e3 | 190 | malformed_message(ep, &buf[i]); |
48bae050 EB |
191 | break; |
192 | } | |
193 | ||
194 | channel = ep->channels[msg_channel]; | |
195 | ||
196 | if (msg_dir) { /* Write channel */ | |
197 | if (msg_bufno >= channel->num_wr_buffers) { | |
35fcf7e3 | 198 | malformed_message(ep, &buf[i]); |
48bae050 EB |
199 | break; |
200 | } | |
201 | spin_lock(&channel->wr_spinlock); | |
202 | channel->wr_buffers[msg_bufno]->end_offset = | |
203 | msg_data; | |
204 | channel->wr_fpga_buf_idx = msg_bufno; | |
205 | channel->wr_empty = 0; | |
206 | channel->wr_sleepy = 0; | |
207 | spin_unlock(&channel->wr_spinlock); | |
208 | ||
209 | wake_up_interruptible(&channel->wr_wait); | |
210 | ||
211 | } else { | |
212 | /* Read channel */ | |
213 | ||
214 | if (msg_bufno >= channel->num_rd_buffers) { | |
35fcf7e3 | 215 | malformed_message(ep, &buf[i]); |
48bae050 EB |
216 | break; |
217 | } | |
218 | ||
219 | spin_lock(&channel->rd_spinlock); | |
220 | channel->rd_fpga_buf_idx = msg_bufno; | |
221 | channel->rd_full = 0; | |
222 | spin_unlock(&channel->rd_spinlock); | |
223 | ||
224 | wake_up_interruptible(&channel->rd_wait); | |
225 | if (!channel->rd_synchronous) | |
226 | queue_delayed_work( | |
227 | xillybus_wq, | |
228 | &channel->rd_workitem, | |
229 | XILLY_RX_TIMEOUT); | |
230 | } | |
231 | ||
232 | break; | |
233 | case XILLYMSG_OPCODE_NONEMPTY: | |
234 | if ((msg_channel > ep->num_channels) || | |
235 | (msg_channel == 0) || (!msg_dir) || | |
236 | !ep->channels[msg_channel]->wr_supports_nonempty) { | |
35fcf7e3 | 237 | malformed_message(ep, &buf[i]); |
48bae050 EB |
238 | break; |
239 | } | |
240 | ||
241 | channel = ep->channels[msg_channel]; | |
242 | ||
243 | if (msg_bufno >= channel->num_wr_buffers) { | |
35fcf7e3 | 244 | malformed_message(ep, &buf[i]); |
48bae050 EB |
245 | break; |
246 | } | |
247 | spin_lock(&channel->wr_spinlock); | |
248 | if (msg_bufno == channel->wr_host_buf_idx) | |
249 | channel->wr_ready = 1; | |
250 | spin_unlock(&channel->wr_spinlock); | |
251 | ||
252 | wake_up_interruptible(&channel->wr_ready_wait); | |
253 | ||
254 | break; | |
255 | case XILLYMSG_OPCODE_QUIESCEACK: | |
256 | ep->idtlen = msg_data; | |
257 | wake_up_interruptible(&ep->ep_wait); | |
258 | ||
259 | break; | |
260 | case XILLYMSG_OPCODE_FIFOEOF: | |
cc6289fa EB |
261 | if ((msg_channel > ep->num_channels) || |
262 | (msg_channel == 0) || (!msg_dir) || | |
263 | !ep->channels[msg_channel]->num_wr_buffers) { | |
264 | malformed_message(ep, &buf[i]); | |
265 | break; | |
266 | } | |
48bae050 EB |
267 | channel = ep->channels[msg_channel]; |
268 | spin_lock(&channel->wr_spinlock); | |
269 | channel->wr_eof = msg_bufno; | |
270 | channel->wr_sleepy = 0; | |
271 | ||
272 | channel->wr_hangup = channel->wr_empty && | |
273 | (channel->wr_host_buf_idx == msg_bufno); | |
274 | ||
275 | spin_unlock(&channel->wr_spinlock); | |
276 | ||
277 | wake_up_interruptible(&channel->wr_wait); | |
278 | ||
279 | break; | |
280 | case XILLYMSG_OPCODE_FATAL_ERROR: | |
281 | ep->fatal_error = 1; | |
282 | wake_up_interruptible(&ep->ep_wait); /* For select() */ | |
35fcf7e3 EB |
283 | dev_err(ep->dev, |
284 | "FPGA reported a fatal error. This means that the low-level communication with the device has failed. This hardware problem is most likely unrelated to Xillybus (neither kernel module nor FPGA core), but reports are still welcome. All I/O is aborted.\n"); | |
48bae050 EB |
285 | break; |
286 | default: | |
35fcf7e3 | 287 | malformed_message(ep, &buf[i]); |
48bae050 EB |
288 | break; |
289 | } | |
290 | } | |
291 | ||
7ee9ded2 | 292 | ep->ephw->hw_sync_sgl_for_device(ep, |
48bae050 EB |
293 | ep->msgbuf_dma_addr, |
294 | ep->msg_buf_size, | |
295 | DMA_FROM_DEVICE); | |
296 | ||
297 | ep->msg_counter = (ep->msg_counter + 1) & 0xf; | |
298 | ep->failed_messages = 0; | |
539889ee | 299 | iowrite32(0x03, ep->registers + fpga_msg_ctrl_reg); /* Message ACK */ |
48bae050 EB |
300 | |
301 | return IRQ_HANDLED; | |
302 | } | |
303 | EXPORT_SYMBOL(xillybus_isr); | |
304 | ||
305 | /* | |
306 | * A few trivial memory management functions. | |
307 | * NOTE: These functions are used only on probe and remove, and therefore | |
308 | * no locks are applied! | |
309 | */ | |
310 | ||
48bae050 EB |
311 | static void xillybus_autoflush(struct work_struct *work); |
312 | ||
049c1fb4 EB |
313 | struct xilly_alloc_state { |
314 | void *salami; | |
315 | int left_of_salami; | |
316 | int nbuffer; | |
317 | enum dma_data_direction direction; | |
318 | u32 regdirection; | |
319 | }; | |
320 | ||
321 | static int xilly_get_dma_buffers(struct xilly_endpoint *ep, | |
322 | struct xilly_alloc_state *s, | |
323 | struct xilly_buffer **buffers, | |
324 | int bufnum, int bytebufsize) | |
325 | { | |
326 | int i, rc; | |
327 | dma_addr_t dma_addr; | |
328 | struct device *dev = ep->dev; | |
329 | struct xilly_buffer *this_buffer = NULL; /* Init to silence warning */ | |
330 | ||
331 | if (buffers) { /* Not the message buffer */ | |
5899005f EB |
332 | this_buffer = devm_kcalloc(dev, bufnum, |
333 | sizeof(struct xilly_buffer), | |
334 | GFP_KERNEL); | |
049c1fb4 EB |
335 | if (!this_buffer) |
336 | return -ENOMEM; | |
337 | } | |
338 | ||
339 | for (i = 0; i < bufnum; i++) { | |
340 | /* | |
341 | * Buffers are expected in descending size order, so there | |
342 | * is either enough space for this buffer or none at all. | |
343 | */ | |
344 | ||
345 | if ((s->left_of_salami < bytebufsize) && | |
346 | (s->left_of_salami > 0)) { | |
347 | dev_err(ep->dev, | |
348 | "Corrupt buffer allocation in IDT. Aborting.\n"); | |
349 | return -ENODEV; | |
350 | } | |
351 | ||
352 | if (s->left_of_salami == 0) { | |
353 | int allocorder, allocsize; | |
354 | ||
355 | allocsize = PAGE_SIZE; | |
356 | allocorder = 0; | |
357 | while (bytebufsize > allocsize) { | |
358 | allocsize *= 2; | |
359 | allocorder++; | |
360 | } | |
361 | ||
362 | s->salami = (void *) devm_get_free_pages( | |
363 | dev, | |
364 | GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO, | |
365 | allocorder); | |
049c1fb4 EB |
366 | if (!s->salami) |
367 | return -ENOMEM; | |
64fa2b1b | 368 | |
049c1fb4 EB |
369 | s->left_of_salami = allocsize; |
370 | } | |
371 | ||
372 | rc = ep->ephw->map_single(ep, s->salami, | |
373 | bytebufsize, s->direction, | |
374 | &dma_addr); | |
049c1fb4 EB |
375 | if (rc) |
376 | return rc; | |
377 | ||
378 | iowrite32((u32) (dma_addr & 0xffffffff), | |
539889ee | 379 | ep->registers + fpga_dma_bufaddr_lowaddr_reg); |
049c1fb4 | 380 | iowrite32(((u32) ((((u64) dma_addr) >> 32) & 0xffffffff)), |
539889ee | 381 | ep->registers + fpga_dma_bufaddr_highaddr_reg); |
049c1fb4 EB |
382 | |
383 | if (buffers) { /* Not the message buffer */ | |
384 | this_buffer->addr = s->salami; | |
385 | this_buffer->dma_addr = dma_addr; | |
386 | buffers[i] = this_buffer++; | |
387 | ||
388 | iowrite32(s->regdirection | s->nbuffer++, | |
539889ee | 389 | ep->registers + fpga_dma_bufno_reg); |
049c1fb4 EB |
390 | } else { |
391 | ep->msgbuf_addr = s->salami; | |
392 | ep->msgbuf_dma_addr = dma_addr; | |
393 | ep->msg_buf_size = bytebufsize; | |
394 | ||
395 | iowrite32(s->regdirection, | |
539889ee | 396 | ep->registers + fpga_dma_bufno_reg); |
049c1fb4 EB |
397 | } |
398 | ||
399 | s->left_of_salami -= bytebufsize; | |
400 | s->salami += bytebufsize; | |
401 | } | |
21c3184c | 402 | return 0; |
049c1fb4 EB |
403 | } |
404 | ||
48bae050 | 405 | static int xilly_setupchannels(struct xilly_endpoint *ep, |
48bae050 | 406 | unsigned char *chandesc, |
79ae92c4 | 407 | int entries) |
48bae050 | 408 | { |
525be905 | 409 | struct device *dev = ep->dev; |
049c1fb4 | 410 | int i, entry, rc; |
48bae050 EB |
411 | struct xilly_channel *channel; |
412 | int channelnum, bufnum, bufsize, format, is_writebuf; | |
413 | int bytebufsize; | |
414 | int synchronous, allowpartial, exclusive_open, seekable; | |
415 | int supports_nonempty; | |
48bae050 EB |
416 | int msg_buf_done = 0; |
417 | ||
049c1fb4 EB |
418 | struct xilly_alloc_state rd_alloc = { |
419 | .salami = NULL, | |
420 | .left_of_salami = 0, | |
421 | .nbuffer = 1, | |
422 | .direction = DMA_TO_DEVICE, | |
423 | .regdirection = 0, | |
424 | }; | |
425 | ||
426 | struct xilly_alloc_state wr_alloc = { | |
427 | .salami = NULL, | |
428 | .left_of_salami = 0, | |
429 | .nbuffer = 1, | |
430 | .direction = DMA_FROM_DEVICE, | |
431 | .regdirection = 0x80000000, | |
432 | }; | |
48bae050 | 433 | |
5899005f | 434 | channel = devm_kcalloc(dev, ep->num_channels, |
525be905 | 435 | sizeof(struct xilly_channel), GFP_KERNEL); |
48bae050 | 436 | if (!channel) |
31ca128d | 437 | return -ENOMEM; |
48bae050 | 438 | |
5899005f | 439 | ep->channels = devm_kcalloc(dev, ep->num_channels + 1, |
525be905 EB |
440 | sizeof(struct xilly_channel *), |
441 | GFP_KERNEL); | |
48bae050 | 442 | if (!ep->channels) |
31ca128d | 443 | return -ENOMEM; |
48bae050 EB |
444 | |
445 | ep->channels[0] = NULL; /* Channel 0 is message buf. */ | |
446 | ||
447 | /* Initialize all channels with defaults */ | |
448 | ||
449 | for (i = 1; i <= ep->num_channels; i++) { | |
450 | channel->wr_buffers = NULL; | |
451 | channel->rd_buffers = NULL; | |
452 | channel->num_wr_buffers = 0; | |
453 | channel->num_rd_buffers = 0; | |
454 | channel->wr_fpga_buf_idx = -1; | |
455 | channel->wr_host_buf_idx = 0; | |
456 | channel->wr_host_buf_pos = 0; | |
457 | channel->wr_empty = 1; | |
458 | channel->wr_ready = 0; | |
459 | channel->wr_sleepy = 1; | |
460 | channel->rd_fpga_buf_idx = 0; | |
461 | channel->rd_host_buf_idx = 0; | |
462 | channel->rd_host_buf_pos = 0; | |
463 | channel->rd_full = 0; | |
464 | channel->wr_ref_count = 0; | |
465 | channel->rd_ref_count = 0; | |
466 | ||
467 | spin_lock_init(&channel->wr_spinlock); | |
468 | spin_lock_init(&channel->rd_spinlock); | |
469 | mutex_init(&channel->wr_mutex); | |
470 | mutex_init(&channel->rd_mutex); | |
471 | init_waitqueue_head(&channel->rd_wait); | |
472 | init_waitqueue_head(&channel->wr_wait); | |
473 | init_waitqueue_head(&channel->wr_ready_wait); | |
474 | ||
475 | INIT_DELAYED_WORK(&channel->rd_workitem, xillybus_autoflush); | |
476 | ||
477 | channel->endpoint = ep; | |
478 | channel->chan_num = i; | |
479 | ||
480 | channel->log2_element_size = 0; | |
481 | ||
482 | ep->channels[i] = channel++; | |
483 | } | |
484 | ||
48bae050 | 485 | for (entry = 0; entry < entries; entry++, chandesc += 4) { |
049c1fb4 EB |
486 | struct xilly_buffer **buffers = NULL; |
487 | ||
48bae050 EB |
488 | is_writebuf = chandesc[0] & 0x01; |
489 | channelnum = (chandesc[0] >> 1) | ((chandesc[1] & 0x0f) << 7); | |
490 | format = (chandesc[1] >> 4) & 0x03; | |
491 | allowpartial = (chandesc[1] >> 6) & 0x01; | |
492 | synchronous = (chandesc[1] >> 7) & 0x01; | |
493 | bufsize = 1 << (chandesc[2] & 0x1f); | |
494 | bufnum = 1 << (chandesc[3] & 0x0f); | |
495 | exclusive_open = (chandesc[2] >> 7) & 0x01; | |
496 | seekable = (chandesc[2] >> 6) & 0x01; | |
497 | supports_nonempty = (chandesc[2] >> 5) & 0x01; | |
498 | ||
499 | if ((channelnum > ep->num_channels) || | |
500 | ((channelnum == 0) && !is_writebuf)) { | |
35fcf7e3 EB |
501 | dev_err(ep->dev, |
502 | "IDT requests channel out of range. Aborting.\n"); | |
48bae050 EB |
503 | return -ENODEV; |
504 | } | |
505 | ||
506 | channel = ep->channels[channelnum]; /* NULL for msg channel */ | |
507 | ||
049c1fb4 | 508 | if (!is_writebuf || channelnum > 0) { |
48bae050 EB |
509 | channel->log2_element_size = ((format > 2) ? |
510 | 2 : format); | |
049c1fb4 | 511 | |
ba327173 | 512 | bytebufsize = bufsize * |
48bae050 | 513 | (1 << channel->log2_element_size); |
48bae050 | 514 | |
5899005f EB |
515 | buffers = devm_kcalloc(dev, bufnum, |
516 | sizeof(struct xilly_buffer *), | |
517 | GFP_KERNEL); | |
049c1fb4 | 518 | if (!buffers) |
31ca128d | 519 | return -ENOMEM; |
84590b1a | 520 | } else { |
049c1fb4 | 521 | bytebufsize = bufsize << 2; |
84590b1a | 522 | } |
48bae050 | 523 | |
049c1fb4 EB |
524 | if (!is_writebuf) { |
525 | channel->num_rd_buffers = bufnum; | |
ba327173 | 526 | channel->rd_buf_size = bytebufsize; |
049c1fb4 EB |
527 | channel->rd_allow_partial = allowpartial; |
528 | channel->rd_synchronous = synchronous; | |
529 | channel->rd_exclusive_open = exclusive_open; | |
530 | channel->seekable = seekable; | |
48bae050 | 531 | |
049c1fb4 EB |
532 | channel->rd_buffers = buffers; |
533 | rc = xilly_get_dma_buffers(ep, &rd_alloc, buffers, | |
534 | bufnum, bytebufsize); | |
91d42194 | 535 | } else if (channelnum > 0) { |
48bae050 | 536 | channel->num_wr_buffers = bufnum; |
ba327173 | 537 | channel->wr_buf_size = bytebufsize; |
48bae050 EB |
538 | |
539 | channel->seekable = seekable; | |
540 | channel->wr_supports_nonempty = supports_nonempty; | |
541 | ||
542 | channel->wr_allow_partial = allowpartial; | |
543 | channel->wr_synchronous = synchronous; | |
544 | channel->wr_exclusive_open = exclusive_open; | |
545 | ||
049c1fb4 EB |
546 | channel->wr_buffers = buffers; |
547 | rc = xilly_get_dma_buffers(ep, &wr_alloc, buffers, | |
548 | bufnum, bytebufsize); | |
549 | } else { | |
550 | rc = xilly_get_dma_buffers(ep, &wr_alloc, NULL, | |
551 | bufnum, bytebufsize); | |
552 | msg_buf_done++; | |
48bae050 EB |
553 | } |
554 | ||
049c1fb4 | 555 | if (rc) |
31ca128d | 556 | return -ENOMEM; |
48bae050 EB |
557 | } |
558 | ||
559 | if (!msg_buf_done) { | |
35fcf7e3 EB |
560 | dev_err(ep->dev, |
561 | "Corrupt IDT: No message buffer. Aborting.\n"); | |
48bae050 EB |
562 | return -ENODEV; |
563 | } | |
48bae050 | 564 | return 0; |
48bae050 EB |
565 | } |
566 | ||
9ac77ec6 EB |
567 | static int xilly_scan_idt(struct xilly_endpoint *endpoint, |
568 | struct xilly_idt_handle *idt_handle) | |
48bae050 EB |
569 | { |
570 | int count = 0; | |
571 | unsigned char *idt = endpoint->channels[1]->wr_buffers[0]->addr; | |
572 | unsigned char *end_of_idt = idt + endpoint->idtlen - 4; | |
573 | unsigned char *scan; | |
574 | int len; | |
575 | ||
576 | scan = idt; | |
577 | idt_handle->idt = idt; | |
578 | ||
579 | scan++; /* Skip version number */ | |
580 | ||
581 | while ((scan <= end_of_idt) && *scan) { | |
582 | while ((scan <= end_of_idt) && *scan++) | |
583 | /* Do nothing, just scan thru string */; | |
584 | count++; | |
585 | } | |
586 | ||
587 | scan++; | |
588 | ||
589 | if (scan > end_of_idt) { | |
35fcf7e3 EB |
590 | dev_err(endpoint->dev, |
591 | "IDT device name list overflow. Aborting.\n"); | |
9ac77ec6 | 592 | return -ENODEV; |
91d42194 VB |
593 | } |
594 | idt_handle->chandesc = scan; | |
48bae050 EB |
595 | |
596 | len = endpoint->idtlen - (3 + ((int) (scan - idt))); | |
597 | ||
598 | if (len & 0x03) { | |
35fcf7e3 EB |
599 | dev_err(endpoint->dev, |
600 | "Corrupt IDT device name list. Aborting.\n"); | |
9ac77ec6 | 601 | return -ENODEV; |
48bae050 EB |
602 | } |
603 | ||
604 | idt_handle->entries = len >> 2; | |
48bae050 | 605 | endpoint->num_channels = count; |
9ac77ec6 EB |
606 | |
607 | return 0; | |
48bae050 EB |
608 | } |
609 | ||
610 | static int xilly_obtain_idt(struct xilly_endpoint *endpoint) | |
611 | { | |
48bae050 EB |
612 | struct xilly_channel *channel; |
613 | unsigned char *version; | |
ae870e5d | 614 | long t; |
48bae050 EB |
615 | |
616 | channel = endpoint->channels[1]; /* This should be generated ad-hoc */ | |
617 | ||
618 | channel->wr_sleepy = 1; | |
48bae050 EB |
619 | |
620 | iowrite32(1 | | |
91a2dea8 EB |
621 | (3 << 24), /* Opcode 3 for channel 0 = Send IDT */ |
622 | endpoint->registers + fpga_buf_ctrl_reg); | |
48bae050 | 623 | |
ae870e5d EB |
624 | t = wait_event_interruptible_timeout(channel->wr_wait, |
625 | (!channel->wr_sleepy), | |
626 | XILLY_TIMEOUT); | |
48bae050 | 627 | |
ae870e5d | 628 | if (t <= 0) { |
35fcf7e3 | 629 | dev_err(endpoint->dev, "Failed to obtain IDT. Aborting.\n"); |
48bae050 EB |
630 | |
631 | if (endpoint->fatal_error) | |
632 | return -EIO; | |
633 | ||
40931bbb | 634 | return -ENODEV; |
48bae050 EB |
635 | } |
636 | ||
7ee9ded2 | 637 | endpoint->ephw->hw_sync_sgl_for_cpu( |
48bae050 EB |
638 | channel->endpoint, |
639 | channel->wr_buffers[0]->dma_addr, | |
640 | channel->wr_buf_size, | |
641 | DMA_FROM_DEVICE); | |
642 | ||
643 | if (channel->wr_buffers[0]->end_offset != endpoint->idtlen) { | |
35fcf7e3 EB |
644 | dev_err(endpoint->dev, |
645 | "IDT length mismatch (%d != %d). Aborting.\n", | |
91a2dea8 | 646 | channel->wr_buffers[0]->end_offset, endpoint->idtlen); |
40931bbb | 647 | return -ENODEV; |
48bae050 EB |
648 | } |
649 | ||
650 | if (crc32_le(~0, channel->wr_buffers[0]->addr, | |
651 | endpoint->idtlen+1) != 0) { | |
35fcf7e3 | 652 | dev_err(endpoint->dev, "IDT failed CRC check. Aborting.\n"); |
40931bbb | 653 | return -ENODEV; |
48bae050 EB |
654 | } |
655 | ||
656 | version = channel->wr_buffers[0]->addr; | |
657 | ||
658 | /* Check version number. Accept anything below 0x82 for now. */ | |
659 | if (*version > 0x82) { | |
35fcf7e3 EB |
660 | dev_err(endpoint->dev, |
661 | "No support for IDT version 0x%02x. Maybe the xillybus driver needs an upgarde. Aborting.\n", | |
a5199461 | 662 | *version); |
40931bbb | 663 | return -ENODEV; |
48bae050 EB |
664 | } |
665 | ||
21c3184c | 666 | return 0; |
48bae050 EB |
667 | } |
668 | ||
7ee9ded2 EB |
669 | static ssize_t xillybus_read(struct file *filp, char __user *userbuf, |
670 | size_t count, loff_t *f_pos) | |
48bae050 EB |
671 | { |
672 | ssize_t rc; | |
673 | unsigned long flags; | |
674 | int bytes_done = 0; | |
675 | int no_time_left = 0; | |
676 | long deadline, left_to_sleep; | |
677 | struct xilly_channel *channel = filp->private_data; | |
678 | ||
679 | int empty, reached_eof, exhausted, ready; | |
680 | /* Initializations are there only to silence warnings */ | |
681 | ||
682 | int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0; | |
683 | int waiting_bufidx; | |
684 | ||
685 | if (channel->endpoint->fatal_error) | |
686 | return -EIO; | |
687 | ||
688 | deadline = jiffies + 1 + XILLY_RX_TIMEOUT; | |
689 | ||
690 | rc = mutex_lock_interruptible(&channel->wr_mutex); | |
48bae050 EB |
691 | if (rc) |
692 | return rc; | |
693 | ||
48bae050 EB |
694 | while (1) { /* Note that we may drop mutex within this loop */ |
695 | int bytes_to_do = count - bytes_done; | |
91d42194 | 696 | |
48bae050 EB |
697 | spin_lock_irqsave(&channel->wr_spinlock, flags); |
698 | ||
699 | empty = channel->wr_empty; | |
700 | ready = !empty || channel->wr_ready; | |
701 | ||
702 | if (!empty) { | |
703 | bufidx = channel->wr_host_buf_idx; | |
704 | bufpos = channel->wr_host_buf_pos; | |
705 | howmany = ((channel->wr_buffers[bufidx]->end_offset | |
706 | + 1) << channel->log2_element_size) | |
707 | - bufpos; | |
708 | ||
709 | /* Update wr_host_* to its post-operation state */ | |
710 | if (howmany > bytes_to_do) { | |
711 | bufferdone = 0; | |
712 | ||
713 | howmany = bytes_to_do; | |
714 | channel->wr_host_buf_pos += howmany; | |
715 | } else { | |
716 | bufferdone = 1; | |
717 | ||
718 | channel->wr_host_buf_pos = 0; | |
719 | ||
720 | if (bufidx == channel->wr_fpga_buf_idx) { | |
721 | channel->wr_empty = 1; | |
722 | channel->wr_sleepy = 1; | |
723 | channel->wr_ready = 0; | |
724 | } | |
725 | ||
726 | if (bufidx >= (channel->num_wr_buffers - 1)) | |
727 | channel->wr_host_buf_idx = 0; | |
728 | else | |
729 | channel->wr_host_buf_idx++; | |
730 | } | |
731 | } | |
732 | ||
733 | /* | |
734 | * Marking our situation after the possible changes above, | |
735 | * for use after releasing the spinlock. | |
736 | * | |
737 | * empty = empty before change | |
738 | * exhasted = empty after possible change | |
739 | */ | |
740 | ||
741 | reached_eof = channel->wr_empty && | |
742 | (channel->wr_host_buf_idx == channel->wr_eof); | |
743 | channel->wr_hangup = reached_eof; | |
744 | exhausted = channel->wr_empty; | |
745 | waiting_bufidx = channel->wr_host_buf_idx; | |
746 | ||
747 | spin_unlock_irqrestore(&channel->wr_spinlock, flags); | |
748 | ||
749 | if (!empty) { /* Go on, now without the spinlock */ | |
750 | ||
751 | if (bufpos == 0) /* Position zero means it's virgin */ | |
7ee9ded2 | 752 | channel->endpoint->ephw->hw_sync_sgl_for_cpu( |
48bae050 EB |
753 | channel->endpoint, |
754 | channel->wr_buffers[bufidx]->dma_addr, | |
755 | channel->wr_buf_size, | |
756 | DMA_FROM_DEVICE); | |
757 | ||
758 | if (copy_to_user( | |
759 | userbuf, | |
760 | channel->wr_buffers[bufidx]->addr | |
761 | + bufpos, howmany)) | |
762 | rc = -EFAULT; | |
763 | ||
764 | userbuf += howmany; | |
765 | bytes_done += howmany; | |
766 | ||
767 | if (bufferdone) { | |
d3274f20 EB |
768 | channel->endpoint->ephw->hw_sync_sgl_for_device( |
769 | channel->endpoint, | |
770 | channel->wr_buffers[bufidx]->dma_addr, | |
771 | channel->wr_buf_size, | |
772 | DMA_FROM_DEVICE); | |
48bae050 EB |
773 | |
774 | /* | |
775 | * Tell FPGA the buffer is done with. It's an | |
776 | * atomic operation to the FPGA, so what | |
777 | * happens with other channels doesn't matter, | |
778 | * and the certain channel is protected with | |
779 | * the channel-specific mutex. | |
780 | */ | |
781 | ||
79ae92c4 EB |
782 | iowrite32(1 | (channel->chan_num << 1) | |
783 | (bufidx << 12), | |
91a2dea8 EB |
784 | channel->endpoint->registers + |
785 | fpga_buf_ctrl_reg); | |
48bae050 EB |
786 | } |
787 | ||
788 | if (rc) { | |
789 | mutex_unlock(&channel->wr_mutex); | |
790 | return rc; | |
791 | } | |
792 | } | |
793 | ||
794 | /* This includes a zero-count return = EOF */ | |
795 | if ((bytes_done >= count) || reached_eof) | |
796 | break; | |
797 | ||
798 | if (!exhausted) | |
799 | continue; /* More in RAM buffer(s)? Just go on. */ | |
800 | ||
801 | if ((bytes_done > 0) && | |
802 | (no_time_left || | |
803 | (channel->wr_synchronous && channel->wr_allow_partial))) | |
804 | break; | |
805 | ||
806 | /* | |
807 | * Nonblocking read: The "ready" flag tells us that the FPGA | |
808 | * has data to send. In non-blocking mode, if it isn't on, | |
809 | * just return. But if there is, we jump directly to the point | |
810 | * where we ask for the FPGA to send all it has, and wait | |
811 | * until that data arrives. So in a sense, we *do* block in | |
812 | * nonblocking mode, but only for a very short time. | |
813 | */ | |
814 | ||
815 | if (!no_time_left && (filp->f_flags & O_NONBLOCK)) { | |
816 | if (bytes_done > 0) | |
817 | break; | |
818 | ||
819 | if (ready) | |
820 | goto desperate; | |
821 | ||
06bda66b | 822 | rc = -EAGAIN; |
48bae050 EB |
823 | break; |
824 | } | |
825 | ||
826 | if (!no_time_left || (bytes_done > 0)) { | |
827 | /* | |
828 | * Note that in case of an element-misaligned read | |
829 | * request, offsetlimit will include the last element, | |
830 | * which will be partially read from. | |
831 | */ | |
832 | int offsetlimit = ((count - bytes_done) - 1) >> | |
833 | channel->log2_element_size; | |
834 | int buf_elements = channel->wr_buf_size >> | |
835 | channel->log2_element_size; | |
836 | ||
837 | /* | |
838 | * In synchronous mode, always send an offset limit. | |
839 | * Just don't send a value too big. | |
840 | */ | |
841 | ||
842 | if (channel->wr_synchronous) { | |
843 | /* Don't request more than one buffer */ | |
844 | if (channel->wr_allow_partial && | |
845 | (offsetlimit >= buf_elements)) | |
846 | offsetlimit = buf_elements - 1; | |
847 | ||
848 | /* Don't request more than all buffers */ | |
849 | if (!channel->wr_allow_partial && | |
850 | (offsetlimit >= | |
851 | (buf_elements * channel->num_wr_buffers))) | |
852 | offsetlimit = buf_elements * | |
853 | channel->num_wr_buffers - 1; | |
854 | } | |
855 | ||
856 | /* | |
857 | * In asynchronous mode, force early flush of a buffer | |
858 | * only if that will allow returning a full count. The | |
859 | * "offsetlimit < ( ... )" rather than "<=" excludes | |
860 | * requesting a full buffer, which would obviously | |
861 | * cause a buffer transmission anyhow | |
862 | */ | |
863 | ||
864 | if (channel->wr_synchronous || | |
865 | (offsetlimit < (buf_elements - 1))) { | |
48bae050 EB |
866 | mutex_lock(&channel->endpoint->register_mutex); |
867 | ||
868 | iowrite32(offsetlimit, | |
539889ee EB |
869 | channel->endpoint->registers + |
870 | fpga_buf_offset_reg); | |
48bae050 EB |
871 | |
872 | iowrite32(1 | (channel->chan_num << 1) | | |
91a2dea8 EB |
873 | (2 << 24) | /* 2 = offset limit */ |
874 | (waiting_bufidx << 12), | |
875 | channel->endpoint->registers + | |
876 | fpga_buf_ctrl_reg); | |
48bae050 | 877 | |
48bae050 EB |
878 | mutex_unlock(&channel->endpoint-> |
879 | register_mutex); | |
880 | } | |
48bae050 EB |
881 | } |
882 | ||
883 | /* | |
884 | * If partial completion is disallowed, there is no point in | |
885 | * timeout sleeping. Neither if no_time_left is set and | |
886 | * there's no data. | |
887 | */ | |
888 | ||
889 | if (!channel->wr_allow_partial || | |
890 | (no_time_left && (bytes_done == 0))) { | |
48bae050 EB |
891 | /* |
892 | * This do-loop will run more than once if another | |
893 | * thread reasserted wr_sleepy before we got the mutex | |
894 | * back, so we try again. | |
895 | */ | |
896 | ||
897 | do { | |
898 | mutex_unlock(&channel->wr_mutex); | |
899 | ||
900 | if (wait_event_interruptible( | |
901 | channel->wr_wait, | |
902 | (!channel->wr_sleepy))) | |
903 | goto interrupted; | |
904 | ||
905 | if (mutex_lock_interruptible( | |
906 | &channel->wr_mutex)) | |
907 | goto interrupted; | |
908 | } while (channel->wr_sleepy); | |
909 | ||
910 | continue; | |
911 | ||
912 | interrupted: /* Mutex is not held if got here */ | |
913 | if (channel->endpoint->fatal_error) | |
914 | return -EIO; | |
915 | if (bytes_done) | |
916 | return bytes_done; | |
917 | if (filp->f_flags & O_NONBLOCK) | |
918 | return -EAGAIN; /* Don't admit snoozing */ | |
919 | return -EINTR; | |
920 | } | |
921 | ||
922 | left_to_sleep = deadline - ((long) jiffies); | |
923 | ||
924 | /* | |
925 | * If our time is out, skip the waiting. We may miss wr_sleepy | |
926 | * being deasserted but hey, almost missing the train is like | |
927 | * missing it. | |
928 | */ | |
929 | ||
930 | if (left_to_sleep > 0) { | |
931 | left_to_sleep = | |
932 | wait_event_interruptible_timeout( | |
933 | channel->wr_wait, | |
934 | (!channel->wr_sleepy), | |
935 | left_to_sleep); | |
936 | ||
ae870e5d | 937 | if (left_to_sleep > 0) /* wr_sleepy deasserted */ |
48bae050 EB |
938 | continue; |
939 | ||
940 | if (left_to_sleep < 0) { /* Interrupt */ | |
941 | mutex_unlock(&channel->wr_mutex); | |
942 | if (channel->endpoint->fatal_error) | |
943 | return -EIO; | |
944 | if (bytes_done) | |
945 | return bytes_done; | |
946 | return -EINTR; | |
947 | } | |
948 | } | |
949 | ||
950 | desperate: | |
951 | no_time_left = 1; /* We're out of sleeping time. Desperate! */ | |
952 | ||
953 | if (bytes_done == 0) { | |
954 | /* | |
955 | * Reaching here means that we allow partial return, | |
956 | * that we've run out of time, and that we have | |
957 | * nothing to return. | |
958 | * So tell the FPGA to send anything it has or gets. | |
959 | */ | |
960 | ||
961 | iowrite32(1 | (channel->chan_num << 1) | | |
91a2dea8 EB |
962 | (3 << 24) | /* Opcode 3, flush it all! */ |
963 | (waiting_bufidx << 12), | |
964 | channel->endpoint->registers + | |
965 | fpga_buf_ctrl_reg); | |
48bae050 EB |
966 | } |
967 | ||
968 | /* | |
21c3184c EB |
969 | * Reaching here means that we *do* have data in the buffer, |
970 | * but the "partial" flag disallows returning less than | |
971 | * required. And we don't have as much. So loop again, | |
972 | * which is likely to end up blocking indefinitely until | |
973 | * enough data has arrived. | |
48bae050 EB |
974 | */ |
975 | } | |
976 | ||
977 | mutex_unlock(&channel->wr_mutex); | |
978 | ||
979 | if (channel->endpoint->fatal_error) | |
980 | return -EIO; | |
981 | ||
06bda66b EB |
982 | if (rc) |
983 | return rc; | |
984 | ||
48bae050 EB |
985 | return bytes_done; |
986 | } | |
987 | ||
988 | /* | |
989 | * The timeout argument takes values as follows: | |
990 | * >0 : Flush with timeout | |
991 | * ==0 : Flush, and wait idefinitely for the flush to complete | |
992 | * <0 : Autoflush: Flush only if there's a single buffer occupied | |
993 | */ | |
994 | ||
995 | static int xillybus_myflush(struct xilly_channel *channel, long timeout) | |
996 | { | |
40931bbb | 997 | int rc; |
48bae050 EB |
998 | unsigned long flags; |
999 | ||
1000 | int end_offset_plus1; | |
1001 | int bufidx, bufidx_minus1; | |
1002 | int i; | |
1003 | int empty; | |
1004 | int new_rd_host_buf_pos; | |
1005 | ||
1006 | if (channel->endpoint->fatal_error) | |
1007 | return -EIO; | |
1008 | rc = mutex_lock_interruptible(&channel->rd_mutex); | |
48bae050 EB |
1009 | if (rc) |
1010 | return rc; | |
1011 | ||
1012 | /* | |
1013 | * Don't flush a closed channel. This can happen when the work queued | |
1014 | * autoflush thread fires off after the file has closed. This is not | |
1015 | * an error, just something to dismiss. | |
1016 | */ | |
1017 | ||
1018 | if (!channel->rd_ref_count) | |
1019 | goto done; | |
1020 | ||
1021 | bufidx = channel->rd_host_buf_idx; | |
1022 | ||
d3274f20 EB |
1023 | bufidx_minus1 = (bufidx == 0) ? |
1024 | channel->num_rd_buffers - 1 : | |
1025 | bufidx - 1; | |
48bae050 EB |
1026 | |
1027 | end_offset_plus1 = channel->rd_host_buf_pos >> | |
1028 | channel->log2_element_size; | |
1029 | ||
1030 | new_rd_host_buf_pos = channel->rd_host_buf_pos - | |
1031 | (end_offset_plus1 << channel->log2_element_size); | |
1032 | ||
1033 | /* Submit the current buffer if it's nonempty */ | |
1034 | if (end_offset_plus1) { | |
1035 | unsigned char *tail = channel->rd_buffers[bufidx]->addr + | |
1036 | (end_offset_plus1 << channel->log2_element_size); | |
1037 | ||
1038 | /* Copy unflushed data, so we can put it in next buffer */ | |
1039 | for (i = 0; i < new_rd_host_buf_pos; i++) | |
1040 | channel->rd_leftovers[i] = *tail++; | |
1041 | ||
1042 | spin_lock_irqsave(&channel->rd_spinlock, flags); | |
1043 | ||
1044 | /* Autoflush only if a single buffer is occupied */ | |
1045 | ||
1046 | if ((timeout < 0) && | |
1047 | (channel->rd_full || | |
1048 | (bufidx_minus1 != channel->rd_fpga_buf_idx))) { | |
1049 | spin_unlock_irqrestore(&channel->rd_spinlock, flags); | |
1050 | /* | |
1051 | * A new work item may be queued by the ISR exactly | |
1052 | * now, since the execution of a work item allows the | |
1053 | * queuing of a new one while it's running. | |
1054 | */ | |
1055 | goto done; | |
1056 | } | |
1057 | ||
1058 | /* The 4th element is never needed for data, so it's a flag */ | |
1059 | channel->rd_leftovers[3] = (new_rd_host_buf_pos != 0); | |
1060 | ||
1061 | /* Set up rd_full to reflect a certain moment's state */ | |
1062 | ||
1063 | if (bufidx == channel->rd_fpga_buf_idx) | |
1064 | channel->rd_full = 1; | |
1065 | spin_unlock_irqrestore(&channel->rd_spinlock, flags); | |
1066 | ||
1067 | if (bufidx >= (channel->num_rd_buffers - 1)) | |
1068 | channel->rd_host_buf_idx = 0; | |
1069 | else | |
1070 | channel->rd_host_buf_idx++; | |
1071 | ||
7ee9ded2 | 1072 | channel->endpoint->ephw->hw_sync_sgl_for_device( |
48bae050 EB |
1073 | channel->endpoint, |
1074 | channel->rd_buffers[bufidx]->dma_addr, | |
1075 | channel->rd_buf_size, | |
1076 | DMA_TO_DEVICE); | |
1077 | ||
1078 | mutex_lock(&channel->endpoint->register_mutex); | |
1079 | ||
1080 | iowrite32(end_offset_plus1 - 1, | |
539889ee | 1081 | channel->endpoint->registers + fpga_buf_offset_reg); |
48bae050 EB |
1082 | |
1083 | iowrite32((channel->chan_num << 1) | /* Channel ID */ | |
91a2dea8 EB |
1084 | (2 << 24) | /* Opcode 2, submit buffer */ |
1085 | (bufidx << 12), | |
1086 | channel->endpoint->registers + fpga_buf_ctrl_reg); | |
48bae050 EB |
1087 | |
1088 | mutex_unlock(&channel->endpoint->register_mutex); | |
c14cc622 | 1089 | } else if (bufidx == 0) { |
48bae050 | 1090 | bufidx = channel->num_rd_buffers - 1; |
c14cc622 | 1091 | } else { |
48bae050 | 1092 | bufidx--; |
c14cc622 | 1093 | } |
48bae050 EB |
1094 | |
1095 | channel->rd_host_buf_pos = new_rd_host_buf_pos; | |
1096 | ||
1097 | if (timeout < 0) | |
1098 | goto done; /* Autoflush */ | |
1099 | ||
48bae050 EB |
1100 | /* |
1101 | * bufidx is now the last buffer written to (or equal to | |
1102 | * rd_fpga_buf_idx if buffer was never written to), and | |
1103 | * channel->rd_host_buf_idx the one after it. | |
1104 | * | |
1105 | * If bufidx == channel->rd_fpga_buf_idx we're either empty or full. | |
1106 | */ | |
1107 | ||
48bae050 EB |
1108 | while (1) { /* Loop waiting for draining of buffers */ |
1109 | spin_lock_irqsave(&channel->rd_spinlock, flags); | |
1110 | ||
1111 | if (bufidx != channel->rd_fpga_buf_idx) | |
1112 | channel->rd_full = 1; /* | |
1113 | * Not really full, | |
1114 | * but needs waiting. | |
1115 | */ | |
1116 | ||
1117 | empty = !channel->rd_full; | |
1118 | ||
1119 | spin_unlock_irqrestore(&channel->rd_spinlock, flags); | |
1120 | ||
1121 | if (empty) | |
1122 | break; | |
1123 | ||
1124 | /* | |
1125 | * Indefinite sleep with mutex taken. With data waiting for | |
1126 | * flushing user should not be surprised if open() for write | |
1127 | * sleeps. | |
1128 | */ | |
1129 | if (timeout == 0) | |
1130 | wait_event_interruptible(channel->rd_wait, | |
1131 | (!channel->rd_full)); | |
1132 | ||
1133 | else if (wait_event_interruptible_timeout( | |
1134 | channel->rd_wait, | |
1135 | (!channel->rd_full), | |
1136 | timeout) == 0) { | |
35fcf7e3 | 1137 | dev_warn(channel->endpoint->dev, |
91a2dea8 | 1138 | "Timed out while flushing. Output data may be lost.\n"); |
48bae050 EB |
1139 | |
1140 | rc = -ETIMEDOUT; | |
1141 | break; | |
1142 | } | |
1143 | ||
1144 | if (channel->rd_full) { | |
1145 | rc = -EINTR; | |
1146 | break; | |
1147 | } | |
1148 | } | |
1149 | ||
1150 | done: | |
1151 | mutex_unlock(&channel->rd_mutex); | |
1152 | ||
1153 | if (channel->endpoint->fatal_error) | |
1154 | return -EIO; | |
1155 | ||
1156 | return rc; | |
1157 | } | |
1158 | ||
1159 | static int xillybus_flush(struct file *filp, fl_owner_t id) | |
1160 | { | |
1161 | if (!(filp->f_mode & FMODE_WRITE)) | |
1162 | return 0; | |
1163 | ||
1164 | return xillybus_myflush(filp->private_data, HZ); /* 1 second timeout */ | |
1165 | } | |
1166 | ||
1167 | static void xillybus_autoflush(struct work_struct *work) | |
1168 | { | |
1169 | struct delayed_work *workitem = container_of( | |
1170 | work, struct delayed_work, work); | |
1171 | struct xilly_channel *channel = container_of( | |
1172 | workitem, struct xilly_channel, rd_workitem); | |
1173 | int rc; | |
1174 | ||
1175 | rc = xillybus_myflush(channel, -1); | |
48bae050 | 1176 | if (rc == -EINTR) |
35fcf7e3 EB |
1177 | dev_warn(channel->endpoint->dev, |
1178 | "Autoflush failed because work queue thread got a signal.\n"); | |
48bae050 | 1179 | else if (rc) |
35fcf7e3 EB |
1180 | dev_err(channel->endpoint->dev, |
1181 | "Autoflush failed under weird circumstances.\n"); | |
48bae050 EB |
1182 | } |
1183 | ||
7ee9ded2 | 1184 | static ssize_t xillybus_write(struct file *filp, const char __user *userbuf, |
48bae050 EB |
1185 | size_t count, loff_t *f_pos) |
1186 | { | |
1187 | ssize_t rc; | |
1188 | unsigned long flags; | |
1189 | int bytes_done = 0; | |
1190 | struct xilly_channel *channel = filp->private_data; | |
1191 | ||
1192 | int full, exhausted; | |
1193 | /* Initializations are there only to silence warnings */ | |
1194 | ||
1195 | int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0; | |
1196 | int end_offset_plus1 = 0; | |
1197 | ||
1198 | if (channel->endpoint->fatal_error) | |
1199 | return -EIO; | |
1200 | ||
1201 | rc = mutex_lock_interruptible(&channel->rd_mutex); | |
48bae050 EB |
1202 | if (rc) |
1203 | return rc; | |
1204 | ||
48bae050 EB |
1205 | while (1) { |
1206 | int bytes_to_do = count - bytes_done; | |
1207 | ||
1208 | spin_lock_irqsave(&channel->rd_spinlock, flags); | |
1209 | ||
1210 | full = channel->rd_full; | |
1211 | ||
1212 | if (!full) { | |
1213 | bufidx = channel->rd_host_buf_idx; | |
1214 | bufpos = channel->rd_host_buf_pos; | |
1215 | howmany = channel->rd_buf_size - bufpos; | |
1216 | ||
1217 | /* | |
1218 | * Update rd_host_* to its state after this operation. | |
1219 | * count=0 means committing the buffer immediately, | |
1220 | * which is like flushing, but not necessarily block. | |
1221 | */ | |
1222 | ||
1223 | if ((howmany > bytes_to_do) && | |
1224 | (count || | |
1225 | ((bufpos >> channel->log2_element_size) == 0))) { | |
1226 | bufferdone = 0; | |
1227 | ||
1228 | howmany = bytes_to_do; | |
1229 | channel->rd_host_buf_pos += howmany; | |
1230 | } else { | |
1231 | bufferdone = 1; | |
1232 | ||
1233 | if (count) { | |
1234 | end_offset_plus1 = | |
1235 | channel->rd_buf_size >> | |
1236 | channel->log2_element_size; | |
1237 | channel->rd_host_buf_pos = 0; | |
1238 | } else { | |
1239 | unsigned char *tail; | |
1240 | int i; | |
1241 | ||
127af882 EB |
1242 | howmany = 0; |
1243 | ||
48bae050 EB |
1244 | end_offset_plus1 = bufpos >> |
1245 | channel->log2_element_size; | |
1246 | ||
1247 | channel->rd_host_buf_pos -= | |
1248 | end_offset_plus1 << | |
1249 | channel->log2_element_size; | |
1250 | ||
1251 | tail = channel-> | |
1252 | rd_buffers[bufidx]->addr + | |
1253 | (end_offset_plus1 << | |
1254 | channel->log2_element_size); | |
1255 | ||
1256 | for (i = 0; | |
1257 | i < channel->rd_host_buf_pos; | |
1258 | i++) | |
1259 | channel->rd_leftovers[i] = | |
1260 | *tail++; | |
1261 | } | |
1262 | ||
1263 | if (bufidx == channel->rd_fpga_buf_idx) | |
1264 | channel->rd_full = 1; | |
1265 | ||
1266 | if (bufidx >= (channel->num_rd_buffers - 1)) | |
1267 | channel->rd_host_buf_idx = 0; | |
1268 | else | |
1269 | channel->rd_host_buf_idx++; | |
1270 | } | |
1271 | } | |
1272 | ||
1273 | /* | |
1274 | * Marking our situation after the possible changes above, | |
1275 | * for use after releasing the spinlock. | |
1276 | * | |
1277 | * full = full before change | |
1278 | * exhasted = full after possible change | |
1279 | */ | |
1280 | ||
1281 | exhausted = channel->rd_full; | |
1282 | ||
1283 | spin_unlock_irqrestore(&channel->rd_spinlock, flags); | |
1284 | ||
1285 | if (!full) { /* Go on, now without the spinlock */ | |
1286 | unsigned char *head = | |
1287 | channel->rd_buffers[bufidx]->addr; | |
1288 | int i; | |
1289 | ||
1290 | if ((bufpos == 0) || /* Zero means it's virgin */ | |
1291 | (channel->rd_leftovers[3] != 0)) { | |
7ee9ded2 | 1292 | channel->endpoint->ephw->hw_sync_sgl_for_cpu( |
48bae050 EB |
1293 | channel->endpoint, |
1294 | channel->rd_buffers[bufidx]->dma_addr, | |
1295 | channel->rd_buf_size, | |
1296 | DMA_TO_DEVICE); | |
1297 | ||
1298 | /* Virgin, but leftovers are due */ | |
1299 | for (i = 0; i < bufpos; i++) | |
1300 | *head++ = channel->rd_leftovers[i]; | |
1301 | ||
1302 | channel->rd_leftovers[3] = 0; /* Clear flag */ | |
1303 | } | |
1304 | ||
1305 | if (copy_from_user( | |
1306 | channel->rd_buffers[bufidx]->addr + bufpos, | |
1307 | userbuf, howmany)) | |
1308 | rc = -EFAULT; | |
1309 | ||
1310 | userbuf += howmany; | |
1311 | bytes_done += howmany; | |
1312 | ||
1313 | if (bufferdone) { | |
d3274f20 EB |
1314 | channel->endpoint->ephw->hw_sync_sgl_for_device( |
1315 | channel->endpoint, | |
1316 | channel->rd_buffers[bufidx]->dma_addr, | |
1317 | channel->rd_buf_size, | |
1318 | DMA_TO_DEVICE); | |
48bae050 EB |
1319 | |
1320 | mutex_lock(&channel->endpoint->register_mutex); | |
1321 | ||
1322 | iowrite32(end_offset_plus1 - 1, | |
539889ee EB |
1323 | channel->endpoint->registers + |
1324 | fpga_buf_offset_reg); | |
9fdde366 | 1325 | |
48bae050 | 1326 | iowrite32((channel->chan_num << 1) | |
91a2dea8 EB |
1327 | (2 << 24) | /* 2 = submit buffer */ |
1328 | (bufidx << 12), | |
1329 | channel->endpoint->registers + | |
1330 | fpga_buf_ctrl_reg); | |
48bae050 EB |
1331 | |
1332 | mutex_unlock(&channel->endpoint-> | |
1333 | register_mutex); | |
1334 | ||
1335 | channel->rd_leftovers[3] = | |
1336 | (channel->rd_host_buf_pos != 0); | |
1337 | } | |
1338 | ||
1339 | if (rc) { | |
1340 | mutex_unlock(&channel->rd_mutex); | |
1341 | ||
1342 | if (channel->endpoint->fatal_error) | |
1343 | return -EIO; | |
1344 | ||
1345 | if (!channel->rd_synchronous) | |
1346 | queue_delayed_work( | |
1347 | xillybus_wq, | |
1348 | &channel->rd_workitem, | |
1349 | XILLY_RX_TIMEOUT); | |
1350 | ||
1351 | return rc; | |
1352 | } | |
1353 | } | |
1354 | ||
1355 | if (bytes_done >= count) | |
1356 | break; | |
1357 | ||
1358 | if (!exhausted) | |
1359 | continue; /* If there's more space, just go on */ | |
1360 | ||
1361 | if ((bytes_done > 0) && channel->rd_allow_partial) | |
1362 | break; | |
1363 | ||
1364 | /* | |
1365 | * Indefinite sleep with mutex taken. With data waiting for | |
1366 | * flushing, user should not be surprised if open() for write | |
1367 | * sleeps. | |
1368 | */ | |
1369 | ||
1370 | if (filp->f_flags & O_NONBLOCK) { | |
06bda66b | 1371 | rc = -EAGAIN; |
48bae050 EB |
1372 | break; |
1373 | } | |
1374 | ||
ae870e5d EB |
1375 | if (wait_event_interruptible(channel->rd_wait, |
1376 | (!channel->rd_full))) { | |
48bae050 EB |
1377 | mutex_unlock(&channel->rd_mutex); |
1378 | ||
1379 | if (channel->endpoint->fatal_error) | |
1380 | return -EIO; | |
1381 | ||
1382 | if (bytes_done) | |
1383 | return bytes_done; | |
1384 | return -EINTR; | |
1385 | } | |
1386 | } | |
1387 | ||
1388 | mutex_unlock(&channel->rd_mutex); | |
1389 | ||
1390 | if (!channel->rd_synchronous) | |
1391 | queue_delayed_work(xillybus_wq, | |
1392 | &channel->rd_workitem, | |
1393 | XILLY_RX_TIMEOUT); | |
1394 | ||
06bda66b EB |
1395 | if (channel->endpoint->fatal_error) |
1396 | return -EIO; | |
1397 | ||
1398 | if (rc) | |
1399 | return rc; | |
1400 | ||
48bae050 EB |
1401 | if ((channel->rd_synchronous) && (bytes_done > 0)) { |
1402 | rc = xillybus_myflush(filp->private_data, 0); /* No timeout */ | |
1403 | ||
1404 | if (rc && (rc != -EINTR)) | |
1405 | return rc; | |
1406 | } | |
1407 | ||
48bae050 EB |
1408 | return bytes_done; |
1409 | } | |
1410 | ||
1411 | static int xillybus_open(struct inode *inode, struct file *filp) | |
1412 | { | |
1413 | int rc = 0; | |
1414 | unsigned long flags; | |
1415 | int minor = iminor(inode); | |
1416 | int major = imajor(inode); | |
1417 | struct xilly_endpoint *ep_iter, *endpoint = NULL; | |
1418 | struct xilly_channel *channel; | |
1419 | ||
1420 | mutex_lock(&ep_list_lock); | |
1421 | ||
1422 | list_for_each_entry(ep_iter, &list_of_endpoints, ep_list) { | |
1423 | if ((ep_iter->major == major) && | |
1424 | (minor >= ep_iter->lowest_minor) && | |
1425 | (minor < (ep_iter->lowest_minor + | |
1426 | ep_iter->num_channels))) { | |
1427 | endpoint = ep_iter; | |
1428 | break; | |
1429 | } | |
1430 | } | |
1431 | mutex_unlock(&ep_list_lock); | |
1432 | ||
1433 | if (!endpoint) { | |
ae1dd9bc EA |
1434 | pr_err("xillybus: open() failed to find a device for major=%d and minor=%d\n", |
1435 | major, minor); | |
48bae050 EB |
1436 | return -ENODEV; |
1437 | } | |
1438 | ||
1439 | if (endpoint->fatal_error) | |
1440 | return -EIO; | |
1441 | ||
1442 | channel = endpoint->channels[1 + minor - endpoint->lowest_minor]; | |
1443 | filp->private_data = channel; | |
1444 | ||
48bae050 EB |
1445 | /* |
1446 | * It gets complicated because: | |
1447 | * 1. We don't want to take a mutex we don't have to | |
1448 | * 2. We don't want to open one direction if the other will fail. | |
1449 | */ | |
1450 | ||
1451 | if ((filp->f_mode & FMODE_READ) && (!channel->num_wr_buffers)) | |
1452 | return -ENODEV; | |
1453 | ||
1454 | if ((filp->f_mode & FMODE_WRITE) && (!channel->num_rd_buffers)) | |
1455 | return -ENODEV; | |
1456 | ||
1457 | if ((filp->f_mode & FMODE_READ) && (filp->f_flags & O_NONBLOCK) && | |
1458 | (channel->wr_synchronous || !channel->wr_allow_partial || | |
1459 | !channel->wr_supports_nonempty)) { | |
35fcf7e3 EB |
1460 | dev_err(endpoint->dev, |
1461 | "open() failed: O_NONBLOCK not allowed for read on this device\n"); | |
48bae050 EB |
1462 | return -ENODEV; |
1463 | } | |
1464 | ||
1465 | if ((filp->f_mode & FMODE_WRITE) && (filp->f_flags & O_NONBLOCK) && | |
1466 | (channel->rd_synchronous || !channel->rd_allow_partial)) { | |
35fcf7e3 EB |
1467 | dev_err(endpoint->dev, |
1468 | "open() failed: O_NONBLOCK not allowed for write on this device\n"); | |
48bae050 EB |
1469 | return -ENODEV; |
1470 | } | |
1471 | ||
1472 | /* | |
1473 | * Note: open() may block on getting mutexes despite O_NONBLOCK. | |
1474 | * This shouldn't occur normally, since multiple open of the same | |
1475 | * file descriptor is almost always prohibited anyhow | |
1476 | * (*_exclusive_open is normally set in real-life systems). | |
1477 | */ | |
1478 | ||
1479 | if (filp->f_mode & FMODE_READ) { | |
1480 | rc = mutex_lock_interruptible(&channel->wr_mutex); | |
1481 | if (rc) | |
1482 | return rc; | |
1483 | } | |
1484 | ||
1485 | if (filp->f_mode & FMODE_WRITE) { | |
1486 | rc = mutex_lock_interruptible(&channel->rd_mutex); | |
1487 | if (rc) | |
1488 | goto unlock_wr; | |
1489 | } | |
1490 | ||
1491 | if ((filp->f_mode & FMODE_READ) && | |
1492 | (channel->wr_ref_count != 0) && | |
1493 | (channel->wr_exclusive_open)) { | |
1494 | rc = -EBUSY; | |
1495 | goto unlock; | |
1496 | } | |
1497 | ||
1498 | if ((filp->f_mode & FMODE_WRITE) && | |
1499 | (channel->rd_ref_count != 0) && | |
1500 | (channel->rd_exclusive_open)) { | |
1501 | rc = -EBUSY; | |
1502 | goto unlock; | |
1503 | } | |
1504 | ||
48bae050 EB |
1505 | if (filp->f_mode & FMODE_READ) { |
1506 | if (channel->wr_ref_count == 0) { /* First open of file */ | |
1507 | /* Move the host to first buffer */ | |
1508 | spin_lock_irqsave(&channel->wr_spinlock, flags); | |
1509 | channel->wr_host_buf_idx = 0; | |
1510 | channel->wr_host_buf_pos = 0; | |
1511 | channel->wr_fpga_buf_idx = -1; | |
1512 | channel->wr_empty = 1; | |
1513 | channel->wr_ready = 0; | |
1514 | channel->wr_sleepy = 1; | |
1515 | channel->wr_eof = -1; | |
1516 | channel->wr_hangup = 0; | |
1517 | ||
1518 | spin_unlock_irqrestore(&channel->wr_spinlock, flags); | |
1519 | ||
1520 | iowrite32(1 | (channel->chan_num << 1) | | |
1521 | (4 << 24) | /* Opcode 4, open channel */ | |
1522 | ((channel->wr_synchronous & 1) << 23), | |
539889ee EB |
1523 | channel->endpoint->registers + |
1524 | fpga_buf_ctrl_reg); | |
48bae050 EB |
1525 | } |
1526 | ||
1527 | channel->wr_ref_count++; | |
1528 | } | |
1529 | ||
1530 | if (filp->f_mode & FMODE_WRITE) { | |
1531 | if (channel->rd_ref_count == 0) { /* First open of file */ | |
1532 | /* Move the host to first buffer */ | |
1533 | spin_lock_irqsave(&channel->rd_spinlock, flags); | |
1534 | channel->rd_host_buf_idx = 0; | |
1535 | channel->rd_host_buf_pos = 0; | |
1536 | channel->rd_leftovers[3] = 0; /* No leftovers. */ | |
1537 | channel->rd_fpga_buf_idx = channel->num_rd_buffers - 1; | |
1538 | channel->rd_full = 0; | |
1539 | ||
1540 | spin_unlock_irqrestore(&channel->rd_spinlock, flags); | |
1541 | ||
1542 | iowrite32((channel->chan_num << 1) | | |
1543 | (4 << 24), /* Opcode 4, open channel */ | |
539889ee EB |
1544 | channel->endpoint->registers + |
1545 | fpga_buf_ctrl_reg); | |
48bae050 EB |
1546 | } |
1547 | ||
1548 | channel->rd_ref_count++; | |
1549 | } | |
1550 | ||
1551 | unlock: | |
1552 | if (filp->f_mode & FMODE_WRITE) | |
1553 | mutex_unlock(&channel->rd_mutex); | |
1554 | unlock_wr: | |
1555 | if (filp->f_mode & FMODE_READ) | |
1556 | mutex_unlock(&channel->wr_mutex); | |
1557 | ||
1558 | if (!rc && (!channel->seekable)) | |
1559 | return nonseekable_open(inode, filp); | |
1560 | ||
1561 | return rc; | |
1562 | } | |
1563 | ||
1564 | static int xillybus_release(struct inode *inode, struct file *filp) | |
1565 | { | |
48bae050 EB |
1566 | unsigned long flags; |
1567 | struct xilly_channel *channel = filp->private_data; | |
1568 | ||
1569 | int buf_idx; | |
1570 | int eof; | |
1571 | ||
1572 | if (channel->endpoint->fatal_error) | |
1573 | return -EIO; | |
1574 | ||
1575 | if (filp->f_mode & FMODE_WRITE) { | |
a983dd5d | 1576 | mutex_lock(&channel->rd_mutex); |
48bae050 EB |
1577 | |
1578 | channel->rd_ref_count--; | |
1579 | ||
1580 | if (channel->rd_ref_count == 0) { | |
48bae050 EB |
1581 | /* |
1582 | * We rely on the kernel calling flush() | |
1583 | * before we get here. | |
1584 | */ | |
1585 | ||
1586 | iowrite32((channel->chan_num << 1) | /* Channel ID */ | |
1587 | (5 << 24), /* Opcode 5, close channel */ | |
539889ee EB |
1588 | channel->endpoint->registers + |
1589 | fpga_buf_ctrl_reg); | |
48bae050 EB |
1590 | } |
1591 | mutex_unlock(&channel->rd_mutex); | |
1592 | } | |
1593 | ||
1594 | if (filp->f_mode & FMODE_READ) { | |
a983dd5d | 1595 | mutex_lock(&channel->wr_mutex); |
48bae050 EB |
1596 | |
1597 | channel->wr_ref_count--; | |
1598 | ||
1599 | if (channel->wr_ref_count == 0) { | |
48bae050 | 1600 | iowrite32(1 | (channel->chan_num << 1) | |
91a2dea8 EB |
1601 | (5 << 24), /* Opcode 5, close channel */ |
1602 | channel->endpoint->registers + | |
1603 | fpga_buf_ctrl_reg); | |
48bae050 EB |
1604 | |
1605 | /* | |
1606 | * This is crazily cautious: We make sure that not | |
1607 | * only that we got an EOF (be it because we closed | |
1608 | * the channel or because of a user's EOF), but verify | |
1609 | * that it's one beyond the last buffer arrived, so | |
1610 | * we have no leftover buffers pending before wrapping | |
1611 | * up (which can only happen in asynchronous channels, | |
1612 | * BTW) | |
1613 | */ | |
1614 | ||
1615 | while (1) { | |
1616 | spin_lock_irqsave(&channel->wr_spinlock, | |
1617 | flags); | |
1618 | buf_idx = channel->wr_fpga_buf_idx; | |
1619 | eof = channel->wr_eof; | |
1620 | channel->wr_sleepy = 1; | |
1621 | spin_unlock_irqrestore(&channel->wr_spinlock, | |
1622 | flags); | |
1623 | ||
1624 | /* | |
1625 | * Check if eof points at the buffer after | |
1626 | * the last one the FPGA submitted. Note that | |
1627 | * no EOF is marked by negative eof. | |
1628 | */ | |
1629 | ||
1630 | buf_idx++; | |
1631 | if (buf_idx == channel->num_wr_buffers) | |
1632 | buf_idx = 0; | |
1633 | ||
1634 | if (buf_idx == eof) | |
1635 | break; | |
1636 | ||
1637 | /* | |
1638 | * Steal extra 100 ms if awaken by interrupt. | |
1639 | * This is a simple workaround for an | |
1640 | * interrupt pending when entering, which would | |
1641 | * otherwise result in declaring the hardware | |
1642 | * non-responsive. | |
1643 | */ | |
1644 | ||
1645 | if (wait_event_interruptible( | |
1646 | channel->wr_wait, | |
1647 | (!channel->wr_sleepy))) | |
1648 | msleep(100); | |
1649 | ||
1650 | if (channel->wr_sleepy) { | |
1651 | mutex_unlock(&channel->wr_mutex); | |
35fcf7e3 EB |
1652 | dev_warn(channel->endpoint->dev, |
1653 | "Hardware failed to respond to close command, therefore left in messy state.\n"); | |
48bae050 EB |
1654 | return -EINTR; |
1655 | } | |
1656 | } | |
1657 | } | |
1658 | ||
1659 | mutex_unlock(&channel->wr_mutex); | |
1660 | } | |
1661 | ||
1662 | return 0; | |
1663 | } | |
64fa2b1b | 1664 | |
7ee9ded2 | 1665 | static loff_t xillybus_llseek(struct file *filp, loff_t offset, int whence) |
48bae050 EB |
1666 | { |
1667 | struct xilly_channel *channel = filp->private_data; | |
1668 | loff_t pos = filp->f_pos; | |
1669 | int rc = 0; | |
1670 | ||
1671 | /* | |
1672 | * Take both mutexes not allowing interrupts, since it seems like | |
1673 | * common applications don't expect an -EINTR here. Besides, multiple | |
9379fd54 | 1674 | * access to a single file descriptor on seekable devices is a mess |
48bae050 EB |
1675 | * anyhow. |
1676 | */ | |
1677 | ||
1678 | if (channel->endpoint->fatal_error) | |
1679 | return -EIO; | |
1680 | ||
1681 | mutex_lock(&channel->wr_mutex); | |
1682 | mutex_lock(&channel->rd_mutex); | |
1683 | ||
1684 | switch (whence) { | |
3cbc7479 | 1685 | case SEEK_SET: |
48bae050 EB |
1686 | pos = offset; |
1687 | break; | |
3cbc7479 | 1688 | case SEEK_CUR: |
48bae050 EB |
1689 | pos += offset; |
1690 | break; | |
3cbc7479 | 1691 | case SEEK_END: |
48bae050 EB |
1692 | pos = offset; /* Going to the end => to the beginning */ |
1693 | break; | |
1694 | default: | |
1695 | rc = -EINVAL; | |
1696 | goto end; | |
1697 | } | |
1698 | ||
1699 | /* In any case, we must finish on an element boundary */ | |
1700 | if (pos & ((1 << channel->log2_element_size) - 1)) { | |
1701 | rc = -EINVAL; | |
1702 | goto end; | |
1703 | } | |
1704 | ||
1705 | mutex_lock(&channel->endpoint->register_mutex); | |
1706 | ||
1707 | iowrite32(pos >> channel->log2_element_size, | |
539889ee | 1708 | channel->endpoint->registers + fpga_buf_offset_reg); |
9fdde366 | 1709 | |
48bae050 EB |
1710 | iowrite32((channel->chan_num << 1) | |
1711 | (6 << 24), /* Opcode 6, set address */ | |
539889ee | 1712 | channel->endpoint->registers + fpga_buf_ctrl_reg); |
48bae050 EB |
1713 | |
1714 | mutex_unlock(&channel->endpoint->register_mutex); | |
1715 | ||
1716 | end: | |
1717 | mutex_unlock(&channel->rd_mutex); | |
1718 | mutex_unlock(&channel->wr_mutex); | |
1719 | ||
1720 | if (rc) /* Return error after releasing mutexes */ | |
1721 | return rc; | |
1722 | ||
1723 | filp->f_pos = pos; | |
1724 | ||
1725 | /* | |
1726 | * Since seekable devices are allowed only when the channel is | |
1727 | * synchronous, we assume that there is no data pending in either | |
1728 | * direction (which holds true as long as no concurrent access on the | |
1729 | * file descriptor takes place). | |
1730 | * The only thing we may need to throw away is leftovers from partial | |
1731 | * write() flush. | |
1732 | */ | |
1733 | ||
1734 | channel->rd_leftovers[3] = 0; | |
1735 | ||
1736 | return pos; | |
1737 | } | |
1738 | ||
1739 | static unsigned int xillybus_poll(struct file *filp, poll_table *wait) | |
1740 | { | |
1741 | struct xilly_channel *channel = filp->private_data; | |
1742 | unsigned int mask = 0; | |
1743 | unsigned long flags; | |
1744 | ||
1745 | poll_wait(filp, &channel->endpoint->ep_wait, wait); | |
1746 | ||
1747 | /* | |
1748 | * poll() won't play ball regarding read() channels which | |
1749 | * aren't asynchronous and support the nonempty message. Allowing | |
1750 | * that will create situations where data has been delivered at | |
1751 | * the FPGA, and users expecting select() to wake up, which it may | |
1752 | * not. | |
1753 | */ | |
1754 | ||
1755 | if (!channel->wr_synchronous && channel->wr_supports_nonempty) { | |
1756 | poll_wait(filp, &channel->wr_wait, wait); | |
1757 | poll_wait(filp, &channel->wr_ready_wait, wait); | |
1758 | ||
1759 | spin_lock_irqsave(&channel->wr_spinlock, flags); | |
1760 | if (!channel->wr_empty || channel->wr_ready) | |
1761 | mask |= POLLIN | POLLRDNORM; | |
1762 | ||
1763 | if (channel->wr_hangup) | |
1764 | /* | |
1765 | * Not POLLHUP, because its behavior is in the | |
1766 | * mist, and POLLIN does what we want: Wake up | |
1767 | * the read file descriptor so it sees EOF. | |
1768 | */ | |
1769 | mask |= POLLIN | POLLRDNORM; | |
1770 | spin_unlock_irqrestore(&channel->wr_spinlock, flags); | |
1771 | } | |
1772 | ||
1773 | /* | |
1774 | * If partial data write is disallowed on a write() channel, | |
1775 | * it's pointless to ever signal OK to write, because is could | |
1776 | * block despite some space being available. | |
1777 | */ | |
1778 | ||
1779 | if (channel->rd_allow_partial) { | |
1780 | poll_wait(filp, &channel->rd_wait, wait); | |
1781 | ||
1782 | spin_lock_irqsave(&channel->rd_spinlock, flags); | |
1783 | if (!channel->rd_full) | |
1784 | mask |= POLLOUT | POLLWRNORM; | |
1785 | spin_unlock_irqrestore(&channel->rd_spinlock, flags); | |
1786 | } | |
1787 | ||
1788 | if (channel->endpoint->fatal_error) | |
1789 | mask |= POLLERR; | |
1790 | ||
1791 | return mask; | |
1792 | } | |
1793 | ||
1794 | static const struct file_operations xillybus_fops = { | |
1795 | .owner = THIS_MODULE, | |
1796 | .read = xillybus_read, | |
1797 | .write = xillybus_write, | |
1798 | .open = xillybus_open, | |
1799 | .flush = xillybus_flush, | |
1800 | .release = xillybus_release, | |
1801 | .llseek = xillybus_llseek, | |
1802 | .poll = xillybus_poll, | |
1803 | }; | |
1804 | ||
1805 | static int xillybus_init_chrdev(struct xilly_endpoint *endpoint, | |
1806 | const unsigned char *idt) | |
1807 | { | |
1808 | int rc; | |
1809 | dev_t dev; | |
1810 | int devnum, i, minor, major; | |
1811 | char devname[48]; | |
1812 | struct device *device; | |
1813 | ||
1814 | rc = alloc_chrdev_region(&dev, 0, /* minor start */ | |
1815 | endpoint->num_channels, | |
1816 | xillyname); | |
48bae050 | 1817 | if (rc) { |
35fcf7e3 | 1818 | dev_warn(endpoint->dev, "Failed to obtain major/minors"); |
5b09fc96 | 1819 | return rc; |
48bae050 EB |
1820 | } |
1821 | ||
1822 | endpoint->major = major = MAJOR(dev); | |
1823 | endpoint->lowest_minor = minor = MINOR(dev); | |
1824 | ||
1825 | cdev_init(&endpoint->cdev, &xillybus_fops); | |
1826 | endpoint->cdev.owner = endpoint->ephw->owner; | |
1827 | rc = cdev_add(&endpoint->cdev, MKDEV(major, minor), | |
1828 | endpoint->num_channels); | |
1829 | if (rc) { | |
35fcf7e3 | 1830 | dev_warn(endpoint->dev, "Failed to add cdev. Aborting.\n"); |
5b09fc96 | 1831 | goto unregister_chrdev; |
48bae050 EB |
1832 | } |
1833 | ||
1834 | idt++; | |
1835 | ||
1836 | for (i = minor, devnum = 0; | |
1837 | devnum < endpoint->num_channels; | |
1838 | devnum++, i++) { | |
1839 | snprintf(devname, sizeof(devname)-1, "xillybus_%s", idt); | |
1840 | ||
1841 | devname[sizeof(devname)-1] = 0; /* Should never matter */ | |
1842 | ||
1843 | while (*idt++) | |
1844 | /* Skip to next */; | |
1845 | ||
1846 | device = device_create(xillybus_class, | |
1847 | NULL, | |
1848 | MKDEV(major, i), | |
1849 | NULL, | |
e72b9da0 | 1850 | "%s", devname); |
48bae050 EB |
1851 | |
1852 | if (IS_ERR(device)) { | |
35fcf7e3 EB |
1853 | dev_warn(endpoint->dev, |
1854 | "Failed to create %s device. Aborting.\n", | |
1855 | devname); | |
5b09fc96 EB |
1856 | rc = -ENODEV; |
1857 | goto unroll_device_create; | |
48bae050 EB |
1858 | } |
1859 | } | |
1860 | ||
35fcf7e3 EB |
1861 | dev_info(endpoint->dev, "Created %d device files.\n", |
1862 | endpoint->num_channels); | |
48bae050 EB |
1863 | return 0; /* succeed */ |
1864 | ||
5b09fc96 | 1865 | unroll_device_create: |
48bae050 EB |
1866 | devnum--; i--; |
1867 | for (; devnum >= 0; devnum--, i--) | |
1868 | device_destroy(xillybus_class, MKDEV(major, i)); | |
1869 | ||
1870 | cdev_del(&endpoint->cdev); | |
5b09fc96 | 1871 | unregister_chrdev: |
48bae050 | 1872 | unregister_chrdev_region(MKDEV(major, minor), endpoint->num_channels); |
48bae050 EB |
1873 | |
1874 | return rc; | |
1875 | } | |
1876 | ||
1877 | static void xillybus_cleanup_chrdev(struct xilly_endpoint *endpoint) | |
1878 | { | |
1879 | int minor; | |
1880 | ||
1881 | for (minor = endpoint->lowest_minor; | |
1882 | minor < (endpoint->lowest_minor + endpoint->num_channels); | |
1883 | minor++) | |
1884 | device_destroy(xillybus_class, MKDEV(endpoint->major, minor)); | |
1885 | cdev_del(&endpoint->cdev); | |
1886 | unregister_chrdev_region(MKDEV(endpoint->major, | |
1887 | endpoint->lowest_minor), | |
1888 | endpoint->num_channels); | |
1889 | ||
35fcf7e3 EB |
1890 | dev_info(endpoint->dev, "Removed %d device files.\n", |
1891 | endpoint->num_channels); | |
48bae050 EB |
1892 | } |
1893 | ||
48bae050 EB |
1894 | struct xilly_endpoint *xillybus_init_endpoint(struct pci_dev *pdev, |
1895 | struct device *dev, | |
1896 | struct xilly_endpoint_hardware | |
1897 | *ephw) | |
1898 | { | |
1899 | struct xilly_endpoint *endpoint; | |
1900 | ||
9267462e | 1901 | endpoint = devm_kzalloc(dev, sizeof(*endpoint), GFP_KERNEL); |
91d42194 | 1902 | if (!endpoint) |
48bae050 | 1903 | return NULL; |
48bae050 EB |
1904 | |
1905 | endpoint->pdev = pdev; | |
1906 | endpoint->dev = dev; | |
1907 | endpoint->ephw = ephw; | |
48bae050 EB |
1908 | endpoint->msg_counter = 0x0b; |
1909 | endpoint->failed_messages = 0; | |
1910 | endpoint->fatal_error = 0; | |
1911 | ||
1912 | init_waitqueue_head(&endpoint->ep_wait); | |
1913 | mutex_init(&endpoint->register_mutex); | |
1914 | ||
1915 | return endpoint; | |
1916 | } | |
1917 | EXPORT_SYMBOL(xillybus_init_endpoint); | |
1918 | ||
1919 | static int xilly_quiesce(struct xilly_endpoint *endpoint) | |
1920 | { | |
ae870e5d EB |
1921 | long t; |
1922 | ||
48bae050 | 1923 | endpoint->idtlen = -1; |
21fc0b9f | 1924 | |
48bae050 | 1925 | iowrite32((u32) (endpoint->dma_using_dac & 0x0001), |
539889ee | 1926 | endpoint->registers + fpga_dma_control_reg); |
48bae050 | 1927 | |
ae870e5d EB |
1928 | t = wait_event_interruptible_timeout(endpoint->ep_wait, |
1929 | (endpoint->idtlen >= 0), | |
1930 | XILLY_TIMEOUT); | |
1931 | if (t <= 0) { | |
35fcf7e3 | 1932 | dev_err(endpoint->dev, |
525be905 | 1933 | "Failed to quiesce the device on exit.\n"); |
48bae050 EB |
1934 | return -ENODEV; |
1935 | } | |
21c3184c | 1936 | return 0; |
48bae050 EB |
1937 | } |
1938 | ||
1939 | int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint) | |
1940 | { | |
40931bbb | 1941 | int rc; |
ae870e5d | 1942 | long t; |
48bae050 | 1943 | |
525be905 | 1944 | void *bootstrap_resources; |
48bae050 | 1945 | int idtbuffersize = (1 << PAGE_SHIFT); |
525be905 | 1946 | struct device *dev = endpoint->dev; |
48bae050 EB |
1947 | |
1948 | /* | |
1949 | * The bogus IDT is used during bootstrap for allocating the initial | |
1950 | * message buffer, and then the message buffer and space for the IDT | |
1951 | * itself. The initial message buffer is of a single page's size, but | |
1952 | * it's soon replaced with a more modest one (and memory is freed). | |
1953 | */ | |
1954 | ||
1955 | unsigned char bogus_idt[8] = { 1, 224, (PAGE_SHIFT)-2, 0, | |
1956 | 3, 192, PAGE_SHIFT, 0 }; | |
1957 | struct xilly_idt_handle idt_handle; | |
1958 | ||
48bae050 | 1959 | /* |
9379fd54 MI |
1960 | * Writing the value 0x00000001 to Endianness register signals which |
1961 | * endianness this processor is using, so the FPGA can swap words as | |
48bae050 EB |
1962 | * necessary. |
1963 | */ | |
1964 | ||
539889ee | 1965 | iowrite32(1, endpoint->registers + fpga_endian_reg); |
48bae050 EB |
1966 | |
1967 | /* Bootstrap phase I: Allocate temporary message buffer */ | |
1968 | ||
525be905 EB |
1969 | bootstrap_resources = devres_open_group(dev, NULL, GFP_KERNEL); |
1970 | if (!bootstrap_resources) | |
1971 | return -ENOMEM; | |
1972 | ||
48bae050 EB |
1973 | endpoint->num_channels = 0; |
1974 | ||
525be905 | 1975 | rc = xilly_setupchannels(endpoint, bogus_idt, 1); |
48bae050 | 1976 | if (rc) |
525be905 | 1977 | return rc; |
48bae050 EB |
1978 | |
1979 | /* Clear the message subsystem (and counter in particular) */ | |
539889ee | 1980 | iowrite32(0x04, endpoint->registers + fpga_msg_ctrl_reg); |
48bae050 EB |
1981 | |
1982 | endpoint->idtlen = -1; | |
1983 | ||
48bae050 EB |
1984 | /* |
1985 | * Set DMA 32/64 bit mode, quiesce the device (?!) and get IDT | |
1986 | * buffer size. | |
1987 | */ | |
1988 | iowrite32((u32) (endpoint->dma_using_dac & 0x0001), | |
91a2dea8 | 1989 | endpoint->registers + fpga_dma_control_reg); |
48bae050 | 1990 | |
ae870e5d EB |
1991 | t = wait_event_interruptible_timeout(endpoint->ep_wait, |
1992 | (endpoint->idtlen >= 0), | |
1993 | XILLY_TIMEOUT); | |
1994 | if (t <= 0) { | |
35fcf7e3 | 1995 | dev_err(endpoint->dev, "No response from FPGA. Aborting.\n"); |
525be905 | 1996 | return -ENODEV; |
48bae050 EB |
1997 | } |
1998 | ||
1999 | /* Enable DMA */ | |
2000 | iowrite32((u32) (0x0002 | (endpoint->dma_using_dac & 0x0001)), | |
91a2dea8 | 2001 | endpoint->registers + fpga_dma_control_reg); |
48bae050 EB |
2002 | |
2003 | /* Bootstrap phase II: Allocate buffer for IDT and obtain it */ | |
2004 | while (endpoint->idtlen >= idtbuffersize) { | |
2005 | idtbuffersize *= 2; | |
2006 | bogus_idt[6]++; | |
2007 | } | |
2008 | ||
2009 | endpoint->num_channels = 1; | |
2010 | ||
525be905 | 2011 | rc = xilly_setupchannels(endpoint, bogus_idt, 2); |
48bae050 EB |
2012 | if (rc) |
2013 | goto failed_idt; | |
2014 | ||
48bae050 | 2015 | rc = xilly_obtain_idt(endpoint); |
48bae050 EB |
2016 | if (rc) |
2017 | goto failed_idt; | |
2018 | ||
9ac77ec6 EB |
2019 | rc = xilly_scan_idt(endpoint, &idt_handle); |
2020 | if (rc) | |
48bae050 | 2021 | goto failed_idt; |
525be905 EB |
2022 | |
2023 | devres_close_group(dev, bootstrap_resources); | |
2024 | ||
48bae050 EB |
2025 | /* Bootstrap phase III: Allocate buffers according to IDT */ |
2026 | ||
2027 | rc = xilly_setupchannels(endpoint, | |
48bae050 EB |
2028 | idt_handle.chandesc, |
2029 | idt_handle.entries); | |
48bae050 EB |
2030 | if (rc) |
2031 | goto failed_idt; | |
2032 | ||
48bae050 EB |
2033 | /* |
2034 | * endpoint is now completely configured. We put it on the list | |
2035 | * available to open() before registering the char device(s) | |
2036 | */ | |
2037 | ||
2038 | mutex_lock(&ep_list_lock); | |
2039 | list_add_tail(&endpoint->ep_list, &list_of_endpoints); | |
2040 | mutex_unlock(&ep_list_lock); | |
2041 | ||
2042 | rc = xillybus_init_chrdev(endpoint, idt_handle.idt); | |
48bae050 EB |
2043 | if (rc) |
2044 | goto failed_chrdevs; | |
2045 | ||
525be905 | 2046 | devres_release_group(dev, bootstrap_resources); |
48bae050 EB |
2047 | |
2048 | return 0; | |
2049 | ||
2050 | failed_chrdevs: | |
2051 | mutex_lock(&ep_list_lock); | |
2052 | list_del(&endpoint->ep_list); | |
2053 | mutex_unlock(&ep_list_lock); | |
2054 | ||
2055 | failed_idt: | |
525be905 | 2056 | xilly_quiesce(endpoint); |
48bae050 | 2057 | flush_workqueue(xillybus_wq); |
48bae050 EB |
2058 | |
2059 | return rc; | |
2060 | } | |
2061 | EXPORT_SYMBOL(xillybus_endpoint_discovery); | |
2062 | ||
2063 | void xillybus_endpoint_remove(struct xilly_endpoint *endpoint) | |
2064 | { | |
2065 | xillybus_cleanup_chrdev(endpoint); | |
2066 | ||
2067 | mutex_lock(&ep_list_lock); | |
2068 | list_del(&endpoint->ep_list); | |
2069 | mutex_unlock(&ep_list_lock); | |
2070 | ||
2071 | xilly_quiesce(endpoint); | |
2072 | ||
2073 | /* | |
2074 | * Flushing is done upon endpoint release to prevent access to memory | |
2075 | * just about to be released. This makes the quiesce complete. | |
2076 | */ | |
2077 | flush_workqueue(xillybus_wq); | |
2078 | } | |
2079 | EXPORT_SYMBOL(xillybus_endpoint_remove); | |
2080 | ||
2081 | static int __init xillybus_init(void) | |
2082 | { | |
48bae050 EB |
2083 | mutex_init(&ep_list_lock); |
2084 | ||
2085 | xillybus_class = class_create(THIS_MODULE, xillyname); | |
2531f6cc EB |
2086 | if (IS_ERR(xillybus_class)) |
2087 | return PTR_ERR(xillybus_class); | |
48bae050 EB |
2088 | |
2089 | xillybus_wq = alloc_workqueue(xillyname, 0, 0); | |
3e67dee2 RW |
2090 | if (!xillybus_wq) { |
2091 | class_destroy(xillybus_class); | |
40931bbb | 2092 | return -ENOMEM; |
3e67dee2 | 2093 | } |
48bae050 | 2094 | |
40931bbb | 2095 | return 0; |
48bae050 EB |
2096 | } |
2097 | ||
2098 | static void __exit xillybus_exit(void) | |
2099 | { | |
2100 | /* flush_workqueue() was called for each endpoint released */ | |
2101 | destroy_workqueue(xillybus_wq); | |
2102 | ||
2103 | class_destroy(xillybus_class); | |
2104 | } | |
2105 | ||
2106 | module_init(xillybus_init); | |
2107 | module_exit(xillybus_exit); |