firewire: Configure channel and speed at context creation time.
[deliverable/linux.git] / drivers / firewire / fw-device-cdev.c
CommitLineData
19a15b93
KH
1/* -*- c-basic-offset: 8 -*-
2 *
3 * fw-device-cdev.c - Char device for device raw access
4 *
5 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/wait.h>
25#include <linux/errno.h>
26#include <linux/device.h>
27#include <linux/vmalloc.h>
28#include <linux/poll.h>
29#include <linux/delay.h>
30#include <linux/mm.h>
31#include <linux/compat.h>
32#include <asm/uaccess.h>
33#include "fw-transaction.h"
34#include "fw-topology.h"
35#include "fw-device.h"
36#include "fw-device-cdev.h"
37
38/*
39 * todo
40 *
41 * - bus resets sends a new packet with new generation and node id
42 *
43 */
44
45/* dequeue_event() just kfree()'s the event, so the event has to be
46 * the first field in the struct. */
47
48struct event {
49 struct { void *data; size_t size; } v[2];
50 struct list_head link;
51};
52
53struct response {
54 struct event event;
55 struct fw_transaction transaction;
56 struct client *client;
57 struct fw_cdev_event_response response;
58};
59
60struct iso_interrupt {
61 struct event event;
62 struct fw_cdev_event_iso_interrupt interrupt;
63};
64
65struct client {
66 struct fw_device *device;
67 spinlock_t lock;
68 struct list_head handler_list;
69 struct list_head request_list;
70 u32 request_serial;
71 struct list_head event_list;
72 struct semaphore event_list_sem;
73 wait_queue_head_t wait;
9aad8125 74
19a15b93 75 struct fw_iso_context *iso_context;
9aad8125
KH
76 struct fw_iso_buffer buffer;
77 unsigned long vm_start;
19a15b93
KH
78};
79
80static inline void __user *
81u64_to_uptr(__u64 value)
82{
83 return (void __user *)(unsigned long)value;
84}
85
86static inline __u64
87uptr_to_u64(void __user *ptr)
88{
89 return (__u64)(unsigned long)ptr;
90}
91
92static int fw_device_op_open(struct inode *inode, struct file *file)
93{
94 struct fw_device *device;
95 struct client *client;
96
97 device = container_of(inode->i_cdev, struct fw_device, cdev);
98
99 client = kzalloc(sizeof *client, GFP_KERNEL);
100 if (client == NULL)
101 return -ENOMEM;
102
103 client->device = fw_device_get(device);
104 INIT_LIST_HEAD(&client->event_list);
105 sema_init(&client->event_list_sem, 0);
106 INIT_LIST_HEAD(&client->handler_list);
107 INIT_LIST_HEAD(&client->request_list);
108 spin_lock_init(&client->lock);
109 init_waitqueue_head(&client->wait);
110
111 file->private_data = client;
112
113 return 0;
114}
115
116static void queue_event(struct client *client, struct event *event,
117 void *data0, size_t size0, void *data1, size_t size1)
118{
119 unsigned long flags;
120
121 event->v[0].data = data0;
122 event->v[0].size = size0;
123 event->v[1].data = data1;
124 event->v[1].size = size1;
125
126 spin_lock_irqsave(&client->lock, flags);
127
128 list_add_tail(&event->link, &client->event_list);
129
130 up(&client->event_list_sem);
131 wake_up_interruptible(&client->wait);
132
133 spin_unlock_irqrestore(&client->lock, flags);
134}
135
136static int dequeue_event(struct client *client, char __user *buffer, size_t count)
137{
138 unsigned long flags;
139 struct event *event;
140 size_t size, total;
141 int i, retval = -EFAULT;
142
143 if (down_interruptible(&client->event_list_sem) < 0)
144 return -EINTR;
145
146 spin_lock_irqsave(&client->lock, flags);
147
148 event = container_of(client->event_list.next, struct event, link);
149 list_del(&event->link);
150
151 spin_unlock_irqrestore(&client->lock, flags);
152
153 if (buffer == NULL)
154 goto out;
155
156 total = 0;
157 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
158 size = min(event->v[i].size, count - total);
159 if (copy_to_user(buffer + total, event->v[i].data, size))
160 goto out;
161 total += size;
162 }
163 retval = total;
164
165 out:
166 kfree(event);
167
168 return retval;
169}
170
171static ssize_t
172fw_device_op_read(struct file *file,
173 char __user *buffer, size_t count, loff_t *offset)
174{
175 struct client *client = file->private_data;
176
177 return dequeue_event(client, buffer, count);
178}
179
180static int ioctl_config_rom(struct client *client, void __user *arg)
181{
182 struct fw_cdev_get_config_rom rom;
183
184 rom.length = client->device->config_rom_length;
185 memcpy(rom.data, client->device->config_rom, rom.length * 4);
186 if (copy_to_user(arg, &rom,
187 (char *)&rom.data[rom.length] - (char *)&rom))
188 return -EFAULT;
189
190 return 0;
191}
192
193static void
194complete_transaction(struct fw_card *card, int rcode,
195 void *payload, size_t length, void *data)
196{
197 struct response *response = data;
198 struct client *client = response->client;
199
200 if (length < response->response.length)
201 response->response.length = length;
202 if (rcode == RCODE_COMPLETE)
203 memcpy(response->response.data, payload,
204 response->response.length);
205
206 response->response.type = FW_CDEV_EVENT_RESPONSE;
207 response->response.rcode = rcode;
208 queue_event(client, &response->event,
209 &response->response, sizeof response->response,
210 response->response.data, response->response.length);
211}
212
213static ssize_t ioctl_send_request(struct client *client, void __user *arg)
214{
215 struct fw_device *device = client->device;
216 struct fw_cdev_send_request request;
217 struct response *response;
218
219 if (copy_from_user(&request, arg, sizeof request))
220 return -EFAULT;
221
222 /* What is the biggest size we'll accept, really? */
223 if (request.length > 4096)
224 return -EINVAL;
225
226 response = kmalloc(sizeof *response + request.length, GFP_KERNEL);
227 if (response == NULL)
228 return -ENOMEM;
229
230 response->client = client;
231 response->response.length = request.length;
232 response->response.closure = request.closure;
233
234 if (request.data &&
235 copy_from_user(response->response.data,
236 u64_to_uptr(request.data), request.length)) {
237 kfree(response);
238 return -EFAULT;
239 }
240
241 fw_send_request(device->card, &response->transaction,
242 request.tcode,
907293d7 243 device->node->node_id,
19a15b93
KH
244 device->card->generation,
245 device->node->max_speed,
246 request.offset,
247 response->response.data, request.length,
248 complete_transaction, response);
249
250 if (request.data)
251 return sizeof request + request.length;
252 else
253 return sizeof request;
254}
255
256struct address_handler {
257 struct fw_address_handler handler;
258 __u64 closure;
259 struct client *client;
260 struct list_head link;
261};
262
263struct request {
264 struct fw_request *request;
265 void *data;
266 size_t length;
267 u32 serial;
268 struct list_head link;
269};
270
271struct request_event {
272 struct event event;
273 struct fw_cdev_event_request request;
274};
275
276static void
277handle_request(struct fw_card *card, struct fw_request *r,
278 int tcode, int destination, int source,
279 int generation, int speed,
280 unsigned long long offset,
281 void *payload, size_t length, void *callback_data)
282{
283 struct address_handler *handler = callback_data;
284 struct request *request;
285 struct request_event *e;
286 unsigned long flags;
287 struct client *client = handler->client;
288
289 request = kmalloc(sizeof *request, GFP_ATOMIC);
290 e = kmalloc(sizeof *e, GFP_ATOMIC);
291 if (request == NULL || e == NULL) {
292 kfree(request);
293 kfree(e);
294 fw_send_response(card, r, RCODE_CONFLICT_ERROR);
295 return;
296 }
297
298 request->request = r;
299 request->data = payload;
300 request->length = length;
301
302 spin_lock_irqsave(&client->lock, flags);
303 request->serial = client->request_serial++;
304 list_add_tail(&request->link, &client->request_list);
305 spin_unlock_irqrestore(&client->lock, flags);
306
307 e->request.type = FW_CDEV_EVENT_REQUEST;
308 e->request.tcode = tcode;
309 e->request.offset = offset;
310 e->request.length = length;
311 e->request.serial = request->serial;
312 e->request.closure = handler->closure;
313
314 queue_event(client, &e->event,
315 &e->request, sizeof e->request, payload, length);
316}
317
318static int ioctl_allocate(struct client *client, void __user *arg)
319{
320 struct fw_cdev_allocate request;
321 struct address_handler *handler;
322 unsigned long flags;
323 struct fw_address_region region;
324
325 if (copy_from_user(&request, arg, sizeof request))
326 return -EFAULT;
327
328 handler = kmalloc(sizeof *handler, GFP_KERNEL);
329 if (handler == NULL)
330 return -ENOMEM;
331
332 region.start = request.offset;
333 region.end = request.offset + request.length;
334 handler->handler.length = request.length;
335 handler->handler.address_callback = handle_request;
336 handler->handler.callback_data = handler;
337 handler->closure = request.closure;
338 handler->client = client;
339
340 if (fw_core_add_address_handler(&handler->handler, &region) < 0) {
341 kfree(handler);
342 return -EBUSY;
343 }
344
345 spin_lock_irqsave(&client->lock, flags);
346 list_add_tail(&handler->link, &client->handler_list);
347 spin_unlock_irqrestore(&client->lock, flags);
348
349 return 0;
350}
351
352static int ioctl_send_response(struct client *client, void __user *arg)
353{
354 struct fw_cdev_send_response request;
355 struct request *r;
356 unsigned long flags;
357
358 if (copy_from_user(&request, arg, sizeof request))
359 return -EFAULT;
360
361 spin_lock_irqsave(&client->lock, flags);
362 list_for_each_entry(r, &client->request_list, link) {
363 if (r->serial == request.serial) {
364 list_del(&r->link);
365 break;
366 }
367 }
368 spin_unlock_irqrestore(&client->lock, flags);
369
370 if (&r->link == &client->request_list)
371 return -EINVAL;
372
373 if (request.length < r->length)
374 r->length = request.length;
375 if (copy_from_user(r->data, u64_to_uptr(request.data), r->length))
376 return -EFAULT;
377
378 fw_send_response(client->device->card, r->request, request.rcode);
379
380 kfree(r);
381
382 return 0;
383}
384
385static void
9b32d5f3
KH
386iso_callback(struct fw_iso_context *context, u32 cycle,
387 size_t header_length, void *header, void *data)
19a15b93
KH
388{
389 struct client *client = data;
390 struct iso_interrupt *interrupt;
391
9b32d5f3 392 interrupt = kzalloc(sizeof *interrupt + header_length, GFP_ATOMIC);
19a15b93
KH
393 if (interrupt == NULL)
394 return;
395
396 interrupt->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
397 interrupt->interrupt.closure = 0;
398 interrupt->interrupt.cycle = cycle;
9b32d5f3
KH
399 interrupt->interrupt.header_length = header_length;
400 memcpy(interrupt->interrupt.header, header, header_length);
19a15b93 401 queue_event(client, &interrupt->event,
9b32d5f3
KH
402 &interrupt->interrupt,
403 sizeof interrupt->interrupt + header_length, NULL, 0);
19a15b93
KH
404}
405
406static int ioctl_create_iso_context(struct client *client, void __user *arg)
407{
408 struct fw_cdev_create_iso_context request;
409
410 if (copy_from_user(&request, arg, sizeof request))
411 return -EFAULT;
412
295e3feb
KH
413 if (request.type > FW_ISO_CONTEXT_RECEIVE)
414 return -EINVAL;
415
21efb3cf
KH
416 if (request.channel > 63)
417 return -EINVAL;
418
419 if (request.speed > SCODE_3200)
420 return -EINVAL;
421
19a15b93 422 client->iso_context = fw_iso_context_create(client->device->card,
295e3feb 423 request.type,
21efb3cf
KH
424 request.channel,
425 request.speed,
295e3feb 426 request.header_size,
19a15b93
KH
427 iso_callback, client);
428 if (IS_ERR(client->iso_context))
429 return PTR_ERR(client->iso_context);
430
431 return 0;
432}
433
434static int ioctl_queue_iso(struct client *client, void __user *arg)
435{
436 struct fw_cdev_queue_iso request;
437 struct fw_cdev_iso_packet __user *p, *end, *next;
9b32d5f3 438 struct fw_iso_context *ctx = client->iso_context;
295e3feb 439 unsigned long payload, payload_end, header_length;
19a15b93
KH
440 int count;
441 struct {
442 struct fw_iso_packet packet;
443 u8 header[256];
444 } u;
445
9b32d5f3 446 if (ctx == NULL)
19a15b93
KH
447 return -EINVAL;
448 if (copy_from_user(&request, arg, sizeof request))
449 return -EFAULT;
450
451 /* If the user passes a non-NULL data pointer, has mmap()'ed
452 * the iso buffer, and the pointer points inside the buffer,
453 * we setup the payload pointers accordingly. Otherwise we
9aad8125 454 * set them both to 0, which will still let packets with
19a15b93
KH
455 * payload_length == 0 through. In other words, if no packets
456 * use the indirect payload, the iso buffer need not be mapped
457 * and the request.data pointer is ignored.*/
458
9aad8125
KH
459 payload = (unsigned long)request.data - client->vm_start;
460 payload_end = payload + (client->buffer.page_count << PAGE_SHIFT);
461 if (request.data == 0 || client->buffer.pages == NULL ||
462 payload >= payload_end) {
463 payload = 0;
464 payload_end = 0;
19a15b93
KH
465 }
466
467 if (!access_ok(VERIFY_READ, request.packets, request.size))
468 return -EFAULT;
469
470 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request.packets);
471 end = (void __user *)p + request.size;
472 count = 0;
473 while (p < end) {
474 if (__copy_from_user(&u.packet, p, sizeof *p))
475 return -EFAULT;
295e3feb 476
9b32d5f3 477 if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
295e3feb
KH
478 header_length = u.packet.header_length;
479 } else {
480 /* We require that header_length is a multiple of
481 * the fixed header size, ctx->header_size */
9b32d5f3
KH
482 if (ctx->header_size == 0) {
483 if (u.packet.header_length > 0)
484 return -EINVAL;
485 } else if (u.packet.header_length % ctx->header_size != 0) {
295e3feb 486 return -EINVAL;
9b32d5f3 487 }
295e3feb
KH
488 header_length = 0;
489 }
490
19a15b93 491 next = (struct fw_cdev_iso_packet __user *)
295e3feb 492 &p->header[header_length / 4];
19a15b93
KH
493 if (next > end)
494 return -EINVAL;
495 if (__copy_from_user
295e3feb 496 (u.packet.header, p->header, header_length))
19a15b93
KH
497 return -EFAULT;
498 if (u.packet.skip &&
499 u.packet.header_length + u.packet.payload_length > 0)
500 return -EINVAL;
501 if (payload + u.packet.payload_length > payload_end)
502 return -EINVAL;
503
9b32d5f3
KH
504 if (fw_iso_context_queue(ctx, &u.packet,
505 &client->buffer, payload))
19a15b93
KH
506 break;
507
508 p = next;
509 payload += u.packet.payload_length;
510 count++;
511 }
512
513 request.size -= uptr_to_u64(p) - request.packets;
514 request.packets = uptr_to_u64(p);
9aad8125 515 request.data = client->vm_start + payload;
19a15b93
KH
516
517 if (copy_to_user(arg, &request, sizeof request))
518 return -EFAULT;
519
520 return count;
521}
522
69cdb726 523static int ioctl_start_iso(struct client *client, void __user *arg)
19a15b93 524{
69cdb726 525 struct fw_cdev_start_iso request;
19a15b93
KH
526
527 if (copy_from_user(&request, arg, sizeof request))
528 return -EFAULT;
529
21efb3cf 530 return fw_iso_context_start(client->iso_context, request.cycle);
19a15b93
KH
531}
532
b8295668
KH
533static int ioctl_stop_iso(struct client *client, void __user *arg)
534{
535 return fw_iso_context_stop(client->iso_context);
536}
537
19a15b93
KH
538static int
539dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
540{
541 switch (cmd) {
542 case FW_CDEV_IOC_GET_CONFIG_ROM:
543 return ioctl_config_rom(client, arg);
544 case FW_CDEV_IOC_SEND_REQUEST:
545 return ioctl_send_request(client, arg);
546 case FW_CDEV_IOC_ALLOCATE:
547 return ioctl_allocate(client, arg);
548 case FW_CDEV_IOC_SEND_RESPONSE:
549 return ioctl_send_response(client, arg);
550 case FW_CDEV_IOC_CREATE_ISO_CONTEXT:
551 return ioctl_create_iso_context(client, arg);
552 case FW_CDEV_IOC_QUEUE_ISO:
553 return ioctl_queue_iso(client, arg);
69cdb726
KH
554 case FW_CDEV_IOC_START_ISO:
555 return ioctl_start_iso(client, arg);
b8295668
KH
556 case FW_CDEV_IOC_STOP_ISO:
557 return ioctl_stop_iso(client, arg);
19a15b93
KH
558 default:
559 return -EINVAL;
560 }
561}
562
563static long
564fw_device_op_ioctl(struct file *file,
565 unsigned int cmd, unsigned long arg)
566{
567 struct client *client = file->private_data;
568
569 return dispatch_ioctl(client, cmd, (void __user *) arg);
570}
571
572#ifdef CONFIG_COMPAT
573static long
574fw_device_op_compat_ioctl(struct file *file,
575 unsigned int cmd, unsigned long arg)
576{
577 struct client *client = file->private_data;
578
579 return dispatch_ioctl(client, cmd, compat_ptr(arg));
580}
581#endif
582
583static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
584{
585 struct client *client = file->private_data;
9aad8125
KH
586 enum dma_data_direction direction;
587 unsigned long size;
588 int page_count, retval;
589
590 /* FIXME: We could support multiple buffers, but we don't. */
591 if (client->buffer.pages != NULL)
592 return -EBUSY;
593
594 if (!(vma->vm_flags & VM_SHARED))
595 return -EINVAL;
19a15b93 596
9aad8125 597 if (vma->vm_start & ~PAGE_MASK)
19a15b93
KH
598 return -EINVAL;
599
600 client->vm_start = vma->vm_start;
9aad8125
KH
601 size = vma->vm_end - vma->vm_start;
602 page_count = size >> PAGE_SHIFT;
603 if (size & ~PAGE_MASK)
604 return -EINVAL;
605
606 if (vma->vm_flags & VM_WRITE)
607 direction = DMA_TO_DEVICE;
608 else
609 direction = DMA_FROM_DEVICE;
610
611 retval = fw_iso_buffer_init(&client->buffer, client->device->card,
612 page_count, direction);
613 if (retval < 0)
614 return retval;
19a15b93 615
9aad8125
KH
616 retval = fw_iso_buffer_map(&client->buffer, vma);
617 if (retval < 0)
618 fw_iso_buffer_destroy(&client->buffer, client->device->card);
619
620 return retval;
19a15b93
KH
621}
622
623static int fw_device_op_release(struct inode *inode, struct file *file)
624{
625 struct client *client = file->private_data;
626 struct address_handler *h, *next;
627 struct request *r, *next_r;
628
9aad8125
KH
629 if (client->buffer.pages)
630 fw_iso_buffer_destroy(&client->buffer, client->device->card);
631
19a15b93
KH
632 if (client->iso_context)
633 fw_iso_context_destroy(client->iso_context);
634
635 list_for_each_entry_safe(h, next, &client->handler_list, link) {
636 fw_core_remove_address_handler(&h->handler);
637 kfree(h);
638 }
639
640 list_for_each_entry_safe(r, next_r, &client->request_list, link) {
641 fw_send_response(client->device->card, r->request,
642 RCODE_CONFLICT_ERROR);
643 kfree(r);
644 }
645
646 /* TODO: wait for all transactions to finish so
647 * complete_transaction doesn't try to queue up responses
648 * after we free client. */
649 while (!list_empty(&client->event_list))
650 dequeue_event(client, NULL, 0);
651
652 fw_device_put(client->device);
653 kfree(client);
654
655 return 0;
656}
657
658static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
659{
660 struct client *client = file->private_data;
661
662 poll_wait(file, &client->wait, pt);
663
664 if (!list_empty(&client->event_list))
665 return POLLIN | POLLRDNORM;
666 else
667 return 0;
668}
669
21ebcd12 670const struct file_operations fw_device_ops = {
19a15b93
KH
671 .owner = THIS_MODULE,
672 .open = fw_device_op_open,
673 .read = fw_device_op_read,
674 .unlocked_ioctl = fw_device_op_ioctl,
675 .poll = fw_device_op_poll,
676 .release = fw_device_op_release,
677 .mmap = fw_device_op_mmap,
678
679#ifdef CONFIG_COMPAT
5af4e5ea 680 .compat_ioctl = fw_device_op_compat_ioctl,
19a15b93
KH
681#endif
682};
This page took 0.05356 seconds and 5 git commands to generate.