Commit | Line | Data |
---|---|---|
d62ba981 IK |
1 | /* |
2 | * Copyright (C) 2015 Karol Kosik <karo9@interia.eu> | |
3 | * Copyright (C) 2015-2016 Samsung Electronics | |
4 | * Igor Kotrasinski <i.kotrasinsk@samsung.com> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include <net/sock.h> | |
21 | #include <linux/list.h> | |
22 | #include <linux/kthread.h> | |
23 | ||
24 | #include "usbip_common.h" | |
25 | #include "vudc.h" | |
26 | ||
27 | static inline void setup_base_pdu(struct usbip_header_basic *base, | |
28 | __u32 command, __u32 seqnum) | |
29 | { | |
30 | base->command = command; | |
31 | base->seqnum = seqnum; | |
32 | base->devid = 0; | |
33 | base->ep = 0; | |
34 | base->direction = 0; | |
35 | } | |
36 | ||
37 | static void setup_ret_submit_pdu(struct usbip_header *rpdu, struct urbp *urb_p) | |
38 | { | |
39 | setup_base_pdu(&rpdu->base, USBIP_RET_SUBMIT, urb_p->seqnum); | |
40 | usbip_pack_pdu(rpdu, urb_p->urb, USBIP_RET_SUBMIT, 1); | |
41 | } | |
42 | ||
43 | static void setup_ret_unlink_pdu(struct usbip_header *rpdu, | |
44 | struct v_unlink *unlink) | |
45 | { | |
46 | setup_base_pdu(&rpdu->base, USBIP_RET_UNLINK, unlink->seqnum); | |
47 | rpdu->u.ret_unlink.status = unlink->status; | |
48 | } | |
49 | ||
50 | static int v_send_ret_unlink(struct vudc *udc, struct v_unlink *unlink) | |
51 | { | |
52 | struct msghdr msg; | |
53 | struct kvec iov[1]; | |
54 | size_t txsize; | |
55 | ||
56 | int ret; | |
57 | struct usbip_header pdu_header; | |
58 | ||
59 | txsize = 0; | |
60 | memset(&pdu_header, 0, sizeof(pdu_header)); | |
61 | memset(&msg, 0, sizeof(msg)); | |
62 | memset(&iov, 0, sizeof(iov)); | |
63 | ||
64 | /* 1. setup usbip_header */ | |
65 | setup_ret_unlink_pdu(&pdu_header, unlink); | |
66 | usbip_header_correct_endian(&pdu_header, 1); | |
67 | ||
68 | iov[0].iov_base = &pdu_header; | |
69 | iov[0].iov_len = sizeof(pdu_header); | |
70 | txsize += sizeof(pdu_header); | |
71 | ||
72 | ret = kernel_sendmsg(udc->ud.tcp_socket, &msg, iov, | |
73 | 1, txsize); | |
74 | if (ret != txsize) { | |
75 | usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP); | |
76 | if (ret >= 0) | |
77 | return -EPIPE; | |
78 | return ret; | |
79 | } | |
80 | kfree(unlink); | |
81 | ||
82 | return txsize; | |
83 | } | |
84 | ||
85 | static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p) | |
86 | { | |
87 | struct urb *urb = urb_p->urb; | |
88 | struct usbip_header pdu_header; | |
89 | struct usbip_iso_packet_descriptor *iso_buffer = NULL; | |
90 | struct kvec *iov = NULL; | |
91 | int iovnum = 0; | |
92 | int ret = 0; | |
93 | size_t txsize; | |
94 | struct msghdr msg; | |
95 | ||
96 | txsize = 0; | |
97 | memset(&pdu_header, 0, sizeof(pdu_header)); | |
98 | memset(&msg, 0, sizeof(msg)); | |
99 | ||
100 | if (urb_p->type == USB_ENDPOINT_XFER_ISOC) | |
101 | iovnum = 2 + urb->number_of_packets; | |
102 | else | |
103 | iovnum = 2; | |
104 | ||
105 | iov = kcalloc(iovnum, sizeof(*iov), GFP_KERNEL); | |
106 | if (!iov) { | |
107 | usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC); | |
108 | ret = -ENOMEM; | |
109 | goto out; | |
110 | } | |
111 | iovnum = 0; | |
112 | ||
113 | /* 1. setup usbip_header */ | |
114 | setup_ret_submit_pdu(&pdu_header, urb_p); | |
115 | usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n", | |
116 | pdu_header.base.seqnum, urb); | |
117 | usbip_header_correct_endian(&pdu_header, 1); | |
118 | ||
119 | iov[iovnum].iov_base = &pdu_header; | |
120 | iov[iovnum].iov_len = sizeof(pdu_header); | |
121 | iovnum++; | |
122 | txsize += sizeof(pdu_header); | |
123 | ||
124 | /* 2. setup transfer buffer */ | |
125 | if (urb_p->type != USB_ENDPOINT_XFER_ISOC && | |
126 | usb_pipein(urb->pipe) && urb->actual_length > 0) { | |
127 | iov[iovnum].iov_base = urb->transfer_buffer; | |
128 | iov[iovnum].iov_len = urb->actual_length; | |
129 | iovnum++; | |
130 | txsize += urb->actual_length; | |
131 | } else if (urb_p->type == USB_ENDPOINT_XFER_ISOC && | |
132 | usb_pipein(urb->pipe)) { | |
133 | /* FIXME - copypasted from stub_tx, refactor */ | |
134 | int i; | |
135 | ||
136 | for (i = 0; i < urb->number_of_packets; i++) { | |
137 | iov[iovnum].iov_base = urb->transfer_buffer + | |
138 | urb->iso_frame_desc[i].offset; | |
139 | iov[iovnum].iov_len = | |
140 | urb->iso_frame_desc[i].actual_length; | |
141 | iovnum++; | |
142 | txsize += urb->iso_frame_desc[i].actual_length; | |
143 | } | |
144 | ||
145 | if (txsize != sizeof(pdu_header) + urb->actual_length) { | |
146 | usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP); | |
147 | ret = -EPIPE; | |
148 | goto out; | |
149 | } | |
150 | } | |
151 | /* else - no buffer to send */ | |
152 | ||
153 | /* 3. setup iso_packet_descriptor */ | |
154 | if (urb_p->type == USB_ENDPOINT_XFER_ISOC) { | |
155 | ssize_t len = 0; | |
156 | ||
157 | iso_buffer = usbip_alloc_iso_desc_pdu(urb, &len); | |
158 | if (!iso_buffer) { | |
159 | usbip_event_add(&udc->ud, | |
160 | VUDC_EVENT_ERROR_MALLOC); | |
161 | ret = -ENOMEM; | |
162 | goto out; | |
163 | } | |
164 | ||
165 | iov[iovnum].iov_base = iso_buffer; | |
166 | iov[iovnum].iov_len = len; | |
167 | txsize += len; | |
168 | iovnum++; | |
169 | } | |
170 | ||
171 | ret = kernel_sendmsg(udc->ud.tcp_socket, &msg, | |
172 | iov, iovnum, txsize); | |
173 | if (ret != txsize) { | |
174 | usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP); | |
175 | if (ret >= 0) | |
176 | ret = -EPIPE; | |
177 | goto out; | |
178 | } | |
179 | ||
180 | out: | |
181 | kfree(iov); | |
182 | kfree(iso_buffer); | |
183 | free_urbp_and_urb(urb_p); | |
184 | if (ret < 0) | |
185 | return ret; | |
186 | return txsize; | |
187 | } | |
188 | ||
189 | static int v_send_ret(struct vudc *udc) | |
190 | { | |
191 | unsigned long flags; | |
192 | struct tx_item *txi; | |
193 | size_t total_size = 0; | |
194 | int ret = 0; | |
195 | ||
196 | spin_lock_irqsave(&udc->lock_tx, flags); | |
197 | while (!list_empty(&udc->tx_queue)) { | |
198 | txi = list_first_entry(&udc->tx_queue, struct tx_item, | |
199 | tx_entry); | |
200 | list_del(&txi->tx_entry); | |
201 | spin_unlock_irqrestore(&udc->lock_tx, flags); | |
202 | ||
203 | switch (txi->type) { | |
204 | case TX_SUBMIT: | |
205 | ret = v_send_ret_submit(udc, txi->s); | |
206 | break; | |
207 | case TX_UNLINK: | |
208 | ret = v_send_ret_unlink(udc, txi->u); | |
209 | break; | |
210 | } | |
211 | kfree(txi); | |
212 | ||
213 | if (ret < 0) | |
214 | return ret; | |
215 | ||
216 | total_size += ret; | |
217 | ||
218 | spin_lock_irqsave(&udc->lock_tx, flags); | |
219 | } | |
220 | ||
221 | spin_unlock_irqrestore(&udc->lock_tx, flags); | |
222 | return total_size; | |
223 | } | |
224 | ||
225 | ||
226 | int v_tx_loop(void *data) | |
227 | { | |
228 | struct usbip_device *ud = (struct usbip_device *) data; | |
229 | struct vudc *udc = container_of(ud, struct vudc, ud); | |
230 | int ret; | |
231 | ||
232 | while (!kthread_should_stop()) { | |
233 | if (usbip_event_happened(&udc->ud)) | |
234 | break; | |
235 | ret = v_send_ret(udc); | |
236 | if (ret < 0) { | |
237 | pr_warn("v_tx exit with error %d", ret); | |
238 | break; | |
239 | } | |
240 | wait_event_interruptible(udc->tx_waitq, | |
241 | (!list_empty(&udc->tx_queue) || | |
242 | kthread_should_stop())); | |
243 | } | |
244 | ||
245 | return 0; | |
246 | } | |
247 | ||
248 | /* called with spinlocks held */ | |
249 | void v_enqueue_ret_unlink(struct vudc *udc, __u32 seqnum, __u32 status) | |
250 | { | |
251 | struct tx_item *txi; | |
252 | struct v_unlink *unlink; | |
253 | ||
254 | txi = kzalloc(sizeof(*txi), GFP_ATOMIC); | |
255 | if (!txi) { | |
256 | usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC); | |
257 | return; | |
258 | } | |
259 | unlink = kzalloc(sizeof(*unlink), GFP_ATOMIC); | |
260 | if (!unlink) { | |
261 | kfree(txi); | |
262 | usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC); | |
263 | return; | |
264 | } | |
265 | ||
266 | unlink->seqnum = seqnum; | |
267 | unlink->status = status; | |
268 | txi->type = TX_UNLINK; | |
269 | txi->u = unlink; | |
270 | ||
271 | list_add_tail(&txi->tx_entry, &udc->tx_queue); | |
272 | } | |
273 | ||
274 | /* called with spinlocks held */ | |
275 | void v_enqueue_ret_submit(struct vudc *udc, struct urbp *urb_p) | |
276 | { | |
277 | struct tx_item *txi; | |
278 | ||
279 | txi = kzalloc(sizeof(*txi), GFP_ATOMIC); | |
280 | if (!txi) { | |
281 | usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC); | |
282 | return; | |
283 | } | |
284 | ||
285 | txi->type = TX_SUBMIT; | |
286 | txi->s = urb_p; | |
287 | ||
288 | list_add_tail(&txi->tx_entry, &udc->tx_queue); | |
289 | } |