Merge branches 'cxgb4' and 'mlx5' into k.o/for-4.8
[deliverable/linux.git] / drivers / usb / usbip / vudc_transfer.c
CommitLineData
abdb2957
IK
1/*
2 * Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
3 * Copyright (C) 2015-2016 Samsung Electronics
4 * Igor Kotrasinski <i.kotrasinsk@samsung.com>
5 *
6 * Based on dummy_hcd.c, which is:
7 * Copyright (C) 2003 David Brownell
8 * Copyright (C) 2003-2005 Alan Stern
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 */
23
24#include <linux/usb.h>
25#include <linux/timer.h>
26#include <linux/usb/ch9.h>
27
28#include "vudc.h"
29
30#define DEV_REQUEST (USB_TYPE_STANDARD | USB_RECIP_DEVICE)
31#define DEV_INREQUEST (DEV_REQUEST | USB_DIR_IN)
32#define INTF_REQUEST (USB_TYPE_STANDARD | USB_RECIP_INTERFACE)
33#define INTF_INREQUEST (INTF_REQUEST | USB_DIR_IN)
34#define EP_REQUEST (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT)
35#define EP_INREQUEST (EP_REQUEST | USB_DIR_IN)
36
37static int get_frame_limit(enum usb_device_speed speed)
38{
39 switch (speed) {
40 case USB_SPEED_LOW:
41 return 8 /*bytes*/ * 12 /*packets*/;
42 case USB_SPEED_FULL:
43 return 64 /*bytes*/ * 19 /*packets*/;
44 case USB_SPEED_HIGH:
45 return 512 /*bytes*/ * 13 /*packets*/ * 8 /*uframes*/;
46 case USB_SPEED_SUPER:
47 /* Bus speed is 500000 bytes/ms, so use a little less */
48 return 490000;
49 default:
50 /* error */
51 return -1;
52 }
53
54}
55
56/*
57 * handle_control_request() - handles all control transfers
58 * @udc: pointer to vudc
59 * @urb: the urb request to handle
60 * @setup: pointer to the setup data for a USB device control
61 * request
62 * @status: pointer to request handling status
63 *
64 * Return 0 - if the request was handled
65 * 1 - if the request wasn't handles
66 * error code on error
67 *
68 * Adapted from drivers/usb/gadget/udc/dummy_hcd.c
69 */
70static int handle_control_request(struct vudc *udc, struct urb *urb,
71 struct usb_ctrlrequest *setup,
72 int *status)
73{
74 struct vep *ep2;
75 int ret_val = 1;
76 unsigned w_index;
77 unsigned w_value;
78
79 w_index = le16_to_cpu(setup->wIndex);
80 w_value = le16_to_cpu(setup->wValue);
81 switch (setup->bRequest) {
82 case USB_REQ_SET_ADDRESS:
83 if (setup->bRequestType != DEV_REQUEST)
84 break;
85 udc->address = w_value;
86 ret_val = 0;
87 *status = 0;
88 break;
89 case USB_REQ_SET_FEATURE:
90 if (setup->bRequestType == DEV_REQUEST) {
91 ret_val = 0;
92 switch (w_value) {
93 case USB_DEVICE_REMOTE_WAKEUP:
94 break;
95 case USB_DEVICE_B_HNP_ENABLE:
96 udc->gadget.b_hnp_enable = 1;
97 break;
98 case USB_DEVICE_A_HNP_SUPPORT:
99 udc->gadget.a_hnp_support = 1;
100 break;
101 case USB_DEVICE_A_ALT_HNP_SUPPORT:
102 udc->gadget.a_alt_hnp_support = 1;
103 break;
104 default:
105 ret_val = -EOPNOTSUPP;
106 }
107 if (ret_val == 0) {
108 udc->devstatus |= (1 << w_value);
109 *status = 0;
110 }
111 } else if (setup->bRequestType == EP_REQUEST) {
112 /* endpoint halt */
0255cf9e 113 ep2 = vudc_find_endpoint(udc, w_index);
abdb2957
IK
114 if (!ep2 || ep2->ep.name == udc->ep[0].ep.name) {
115 ret_val = -EOPNOTSUPP;
116 break;
117 }
118 ep2->halted = 1;
119 ret_val = 0;
120 *status = 0;
121 }
122 break;
123 case USB_REQ_CLEAR_FEATURE:
124 if (setup->bRequestType == DEV_REQUEST) {
125 ret_val = 0;
126 switch (w_value) {
127 case USB_DEVICE_REMOTE_WAKEUP:
128 w_value = USB_DEVICE_REMOTE_WAKEUP;
129 break;
130
131 case USB_DEVICE_U1_ENABLE:
132 case USB_DEVICE_U2_ENABLE:
133 case USB_DEVICE_LTM_ENABLE:
134 ret_val = -EOPNOTSUPP;
135 break;
136 default:
137 ret_val = -EOPNOTSUPP;
138 break;
139 }
140 if (ret_val == 0) {
141 udc->devstatus &= ~(1 << w_value);
142 *status = 0;
143 }
144 } else if (setup->bRequestType == EP_REQUEST) {
145 /* endpoint halt */
0255cf9e 146 ep2 = vudc_find_endpoint(udc, w_index);
abdb2957
IK
147 if (!ep2) {
148 ret_val = -EOPNOTSUPP;
149 break;
150 }
151 if (!ep2->wedged)
152 ep2->halted = 0;
153 ret_val = 0;
154 *status = 0;
155 }
156 break;
157 case USB_REQ_GET_STATUS:
158 if (setup->bRequestType == DEV_INREQUEST
159 || setup->bRequestType == INTF_INREQUEST
160 || setup->bRequestType == EP_INREQUEST) {
161 char *buf;
162 /*
163 * device: remote wakeup, selfpowered
164 * interface: nothing
165 * endpoint: halt
166 */
167 buf = (char *)urb->transfer_buffer;
168 if (urb->transfer_buffer_length > 0) {
169 if (setup->bRequestType == EP_INREQUEST) {
0255cf9e 170 ep2 = vudc_find_endpoint(udc, w_index);
abdb2957
IK
171 if (!ep2) {
172 ret_val = -EOPNOTSUPP;
173 break;
174 }
175 buf[0] = ep2->halted;
176 } else if (setup->bRequestType ==
177 DEV_INREQUEST) {
178 buf[0] = (u8)udc->devstatus;
179 } else
180 buf[0] = 0;
181 }
182 if (urb->transfer_buffer_length > 1)
183 buf[1] = 0;
184 urb->actual_length = min_t(u32, 2,
185 urb->transfer_buffer_length);
186 ret_val = 0;
187 *status = 0;
188 }
189 break;
190 }
191 return ret_val;
192}
193
194/* Adapted from dummy_hcd.c ; caller must hold lock */
195static int transfer(struct vudc *udc,
196 struct urb *urb, struct vep *ep, int limit)
197{
198 struct vrequest *req;
199 int sent = 0;
200top:
201 /* if there's no request queued, the device is NAKing; return */
202 list_for_each_entry(req, &ep->req_queue, req_entry) {
203 unsigned host_len, dev_len, len;
204 void *ubuf_pos, *rbuf_pos;
205 int is_short, to_host;
206 int rescan = 0;
207
208 /*
209 * 1..N packets of ep->ep.maxpacket each ... the last one
210 * may be short (including zero length).
211 *
212 * writer can send a zlp explicitly (length 0) or implicitly
213 * (length mod maxpacket zero, and 'zero' flag); they always
214 * terminate reads.
215 */
216 host_len = urb->transfer_buffer_length - urb->actual_length;
217 dev_len = req->req.length - req->req.actual;
218 len = min(host_len, dev_len);
219
220 to_host = usb_pipein(urb->pipe);
221 if (unlikely(len == 0))
222 is_short = 1;
223 else {
224 /* send multiple of maxpacket first, then remainder */
225 if (len >= ep->ep.maxpacket) {
226 is_short = 0;
227 if (len % ep->ep.maxpacket > 0)
228 rescan = 1;
229 len -= len % ep->ep.maxpacket;
230 } else {
231 is_short = 1;
232 }
233
234 ubuf_pos = urb->transfer_buffer + urb->actual_length;
235 rbuf_pos = req->req.buf + req->req.actual;
236
237 if (urb->pipe & USB_DIR_IN)
238 memcpy(ubuf_pos, rbuf_pos, len);
239 else
240 memcpy(rbuf_pos, ubuf_pos, len);
241
242 urb->actual_length += len;
243 req->req.actual += len;
244 sent += len;
245 }
246
247 /*
248 * short packets terminate, maybe with overflow/underflow.
249 * it's only really an error to write too much.
250 *
251 * partially filling a buffer optionally blocks queue advances
252 * (so completion handlers can clean up the queue) but we don't
253 * need to emulate such data-in-flight.
254 */
255 if (is_short) {
256 if (host_len == dev_len) {
257 req->req.status = 0;
258 urb->status = 0;
259 } else if (to_host) {
260 req->req.status = 0;
261 if (dev_len > host_len)
262 urb->status = -EOVERFLOW;
263 else
264 urb->status = 0;
265 } else {
266 urb->status = 0;
267 if (host_len > dev_len)
268 req->req.status = -EOVERFLOW;
269 else
270 req->req.status = 0;
271 }
272
273 /* many requests terminate without a short packet */
274 /* also check if we need to send zlp */
275 } else {
276 if (req->req.length == req->req.actual) {
277 if (req->req.zero && to_host)
278 rescan = 1;
279 else
280 req->req.status = 0;
281 }
282 if (urb->transfer_buffer_length == urb->actual_length) {
283 if (urb->transfer_flags & URB_ZERO_PACKET &&
284 !to_host)
285 rescan = 1;
286 else
287 urb->status = 0;
288 }
289 }
290
291 /* device side completion --> continuable */
292 if (req->req.status != -EINPROGRESS) {
293
294 list_del_init(&req->req_entry);
295 spin_unlock(&udc->lock);
296 usb_gadget_giveback_request(&ep->ep, &req->req);
297 spin_lock(&udc->lock);
298
299 /* requests might have been unlinked... */
300 rescan = 1;
301 }
302
303 /* host side completion --> terminate */
304 if (urb->status != -EINPROGRESS)
305 break;
306
307 /* rescan to continue with any other queued i/o */
308 if (rescan)
309 goto top;
310 }
311 return sent;
312}
313
314static void v_timer(unsigned long _vudc)
315{
316 struct vudc *udc = (struct vudc *) _vudc;
317 struct transfer_timer *timer = &udc->tr_timer;
318 struct urbp *urb_p, *tmp;
319 unsigned long flags;
320 struct usb_ep *_ep;
321 struct vep *ep;
322 int ret = 0;
323 int total, limit;
324
325 spin_lock_irqsave(&udc->lock, flags);
326
327 total = get_frame_limit(udc->gadget.speed);
328 if (total < 0) { /* unknown speed, or not set yet */
329 timer->state = VUDC_TR_IDLE;
330 spin_unlock_irqrestore(&udc->lock, flags);
331 return;
332 }
333 /* is it next frame now? */
334 if (time_after(jiffies, timer->frame_start + msecs_to_jiffies(1))) {
335 timer->frame_limit = total;
336 /* FIXME: how to make it accurate? */
337 timer->frame_start = jiffies;
338 } else {
339 total = timer->frame_limit;
340 }
341
342 list_for_each_entry(_ep, &udc->gadget.ep_list, ep_list) {
343 ep = to_vep(_ep);
344 ep->already_seen = 0;
345 }
346
347restart:
348 list_for_each_entry_safe(urb_p, tmp, &udc->urb_queue, urb_entry) {
349 struct urb *urb = urb_p->urb;
350
351 ep = urb_p->ep;
352 if (urb->unlinked)
353 goto return_urb;
354 if (timer->state != VUDC_TR_RUNNING)
355 continue;
356
357 if (!ep) {
358 urb->status = -EPROTO;
359 goto return_urb;
360 }
361
362 /* Used up bandwidth? */
363 if (total <= 0 && ep->type == USB_ENDPOINT_XFER_BULK)
364 continue;
365
366 if (ep->already_seen)
367 continue;
368 ep->already_seen = 1;
369 if (ep == &udc->ep[0] && urb_p->new) {
370 ep->setup_stage = 1;
371 urb_p->new = 0;
372 }
373 if (ep->halted && !ep->setup_stage) {
374 urb->status = -EPIPE;
375 goto return_urb;
376 }
377
378 if (ep == &udc->ep[0] && ep->setup_stage) {
379 /* TODO - flush any stale requests */
380 ep->setup_stage = 0;
381 ep->halted = 0;
382
383 ret = handle_control_request(udc, urb,
384 (struct usb_ctrlrequest *) urb->setup_packet,
385 (&urb->status));
386 if (ret > 0) {
387 spin_unlock(&udc->lock);
388 ret = udc->driver->setup(&udc->gadget,
389 (struct usb_ctrlrequest *)
390 urb->setup_packet);
391 spin_lock(&udc->lock);
392 }
393 if (ret >= 0) {
394 /* no delays (max 64kb data stage) */
395 limit = 64 * 1024;
396 goto treat_control_like_bulk;
397 } else {
398 urb->status = -EPIPE;
399 urb->actual_length = 0;
400 goto return_urb;
401 }
402 }
403
404 limit = total;
405 switch (ep->type) {
406 case USB_ENDPOINT_XFER_ISOC:
407 /* TODO: support */
408 urb->status = -EXDEV;
409 break;
410
411 case USB_ENDPOINT_XFER_INT:
412 /*
413 * TODO: figure out bandwidth guarantees
414 * for now, give unlimited bandwidth
415 */
416 limit += urb->transfer_buffer_length;
417 /* fallthrough */
418 default:
419treat_control_like_bulk:
420 total -= transfer(udc, urb, ep, limit);
421 }
422 if (urb->status == -EINPROGRESS)
423 continue;
424
425return_urb:
426 if (ep)
427 ep->already_seen = ep->setup_stage = 0;
428
429 spin_lock(&udc->lock_tx);
430 list_del(&urb_p->urb_entry);
431 if (!urb->unlinked) {
432 v_enqueue_ret_submit(udc, urb_p);
433 } else {
434 v_enqueue_ret_unlink(udc, urb_p->seqnum,
435 urb->unlinked);
436 free_urbp_and_urb(urb_p);
437 }
438 wake_up(&udc->tx_waitq);
439 spin_unlock(&udc->lock_tx);
440
441 goto restart;
442 }
443
444 /* TODO - also wait on empty usb_request queues? */
445 if (list_empty(&udc->urb_queue))
446 timer->state = VUDC_TR_IDLE;
447 else
448 mod_timer(&timer->timer,
449 timer->frame_start + msecs_to_jiffies(1));
450
451 spin_unlock_irqrestore(&udc->lock, flags);
452}
453
454/* All timer functions are run with udc->lock held */
455
456void v_init_timer(struct vudc *udc)
457{
458 struct transfer_timer *t = &udc->tr_timer;
459
460 setup_timer(&t->timer, v_timer, (unsigned long) udc);
461 t->state = VUDC_TR_STOPPED;
462}
463
464void v_start_timer(struct vudc *udc)
465{
466 struct transfer_timer *t = &udc->tr_timer;
467
468 dev_dbg(&udc->pdev->dev, "timer start");
469 switch (t->state) {
470 case VUDC_TR_RUNNING:
471 return;
472 case VUDC_TR_IDLE:
473 return v_kick_timer(udc, jiffies);
474 case VUDC_TR_STOPPED:
475 t->state = VUDC_TR_IDLE;
476 t->frame_start = jiffies;
477 t->frame_limit = get_frame_limit(udc->gadget.speed);
478 return v_kick_timer(udc, jiffies);
479 }
480}
481
482void v_kick_timer(struct vudc *udc, unsigned long time)
483{
484 struct transfer_timer *t = &udc->tr_timer;
485
486 dev_dbg(&udc->pdev->dev, "timer kick");
487 switch (t->state) {
488 case VUDC_TR_RUNNING:
489 return;
490 case VUDC_TR_IDLE:
491 t->state = VUDC_TR_RUNNING;
492 /* fallthrough */
493 case VUDC_TR_STOPPED:
494 /* we may want to kick timer to unqueue urbs */
495 mod_timer(&t->timer, time);
496 }
497}
498
499void v_stop_timer(struct vudc *udc)
500{
501 struct transfer_timer *t = &udc->tr_timer;
502
503 /* timer itself will take care of stopping */
504 dev_dbg(&udc->pdev->dev, "timer stop");
505 t->state = VUDC_TR_STOPPED;
506}
This page took 0.050941 seconds and 5 git commands to generate.