Commit | Line | Data |
---|---|---|
66d4eadd SS |
1 | /* |
2 | * xHCI host controller driver | |
3 | * | |
4 | * Copyright (C) 2008 Intel Corp. | |
5 | * | |
6 | * Author: Sarah Sharp | |
7 | * Some code borrowed from the Linux EHCI driver. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
15 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | * for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software Foundation, | |
20 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | */ | |
22 | ||
23 | #include <linux/usb.h> | |
0ebbab37 | 24 | #include <linux/pci.h> |
527c6d7f | 25 | #include <linux/dmapool.h> |
66d4eadd SS |
26 | |
27 | #include "xhci.h" | |
28 | ||
0ebbab37 SS |
29 | /* |
30 | * Allocates a generic ring segment from the ring pool, sets the dma address, | |
31 | * initializes the segment to zero, and sets the private next pointer to NULL. | |
32 | * | |
33 | * Section 4.11.1.1: | |
34 | * "All components of all Command and Transfer TRBs shall be initialized to '0'" | |
35 | */ | |
36 | static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags) | |
37 | { | |
38 | struct xhci_segment *seg; | |
39 | dma_addr_t dma; | |
40 | ||
41 | seg = kzalloc(sizeof *seg, flags); | |
42 | if (!seg) | |
43 | return 0; | |
700e2052 | 44 | xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg); |
0ebbab37 SS |
45 | |
46 | seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma); | |
47 | if (!seg->trbs) { | |
48 | kfree(seg); | |
49 | return 0; | |
50 | } | |
700e2052 GKH |
51 | xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n", |
52 | seg->trbs, (unsigned long long)dma); | |
0ebbab37 SS |
53 | |
54 | memset(seg->trbs, 0, SEGMENT_SIZE); | |
55 | seg->dma = dma; | |
56 | seg->next = NULL; | |
57 | ||
58 | return seg; | |
59 | } | |
60 | ||
61 | static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) | |
62 | { | |
63 | if (!seg) | |
64 | return; | |
65 | if (seg->trbs) { | |
700e2052 GKH |
66 | xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n", |
67 | seg->trbs, (unsigned long long)seg->dma); | |
0ebbab37 SS |
68 | dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); |
69 | seg->trbs = NULL; | |
70 | } | |
700e2052 | 71 | xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg); |
0ebbab37 SS |
72 | kfree(seg); |
73 | } | |
74 | ||
75 | /* | |
76 | * Make the prev segment point to the next segment. | |
77 | * | |
78 | * Change the last TRB in the prev segment to be a Link TRB which points to the | |
79 | * DMA address of the next segment. The caller needs to set any Link TRB | |
80 | * related flags, such as End TRB, Toggle Cycle, and no snoop. | |
81 | */ | |
82 | static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, | |
83 | struct xhci_segment *next, bool link_trbs) | |
84 | { | |
85 | u32 val; | |
86 | ||
87 | if (!prev || !next) | |
88 | return; | |
89 | prev->next = next; | |
90 | if (link_trbs) { | |
8e595a5d | 91 | prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma; |
0ebbab37 SS |
92 | |
93 | /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ | |
94 | val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; | |
95 | val &= ~TRB_TYPE_BITMASK; | |
96 | val |= TRB_TYPE(TRB_LINK); | |
b0567b3f SS |
97 | /* Always set the chain bit with 0.95 hardware */ |
98 | if (xhci_link_trb_quirk(xhci)) | |
99 | val |= TRB_CHAIN; | |
0ebbab37 SS |
100 | prev->trbs[TRBS_PER_SEGMENT-1].link.control = val; |
101 | } | |
700e2052 GKH |
102 | xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n", |
103 | (unsigned long long)prev->dma, | |
104 | (unsigned long long)next->dma); | |
0ebbab37 SS |
105 | } |
106 | ||
107 | /* XXX: Do we need the hcd structure in all these functions? */ | |
f94e0186 | 108 | void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) |
0ebbab37 SS |
109 | { |
110 | struct xhci_segment *seg; | |
111 | struct xhci_segment *first_seg; | |
112 | ||
113 | if (!ring || !ring->first_seg) | |
114 | return; | |
115 | first_seg = ring->first_seg; | |
116 | seg = first_seg->next; | |
700e2052 | 117 | xhci_dbg(xhci, "Freeing ring at %p\n", ring); |
0ebbab37 SS |
118 | while (seg != first_seg) { |
119 | struct xhci_segment *next = seg->next; | |
120 | xhci_segment_free(xhci, seg); | |
121 | seg = next; | |
122 | } | |
123 | xhci_segment_free(xhci, first_seg); | |
124 | ring->first_seg = NULL; | |
125 | kfree(ring); | |
126 | } | |
127 | ||
128 | /** | |
129 | * Create a new ring with zero or more segments. | |
130 | * | |
131 | * Link each segment together into a ring. | |
132 | * Set the end flag and the cycle toggle bit on the last segment. | |
133 | * See section 4.9.1 and figures 15 and 16. | |
134 | */ | |
135 | static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, | |
136 | unsigned int num_segs, bool link_trbs, gfp_t flags) | |
137 | { | |
138 | struct xhci_ring *ring; | |
139 | struct xhci_segment *prev; | |
140 | ||
141 | ring = kzalloc(sizeof *(ring), flags); | |
700e2052 | 142 | xhci_dbg(xhci, "Allocating ring at %p\n", ring); |
0ebbab37 SS |
143 | if (!ring) |
144 | return 0; | |
145 | ||
d0e96f5a | 146 | INIT_LIST_HEAD(&ring->td_list); |
ae636747 | 147 | INIT_LIST_HEAD(&ring->cancelled_td_list); |
0ebbab37 SS |
148 | if (num_segs == 0) |
149 | return ring; | |
150 | ||
151 | ring->first_seg = xhci_segment_alloc(xhci, flags); | |
152 | if (!ring->first_seg) | |
153 | goto fail; | |
154 | num_segs--; | |
155 | ||
156 | prev = ring->first_seg; | |
157 | while (num_segs > 0) { | |
158 | struct xhci_segment *next; | |
159 | ||
160 | next = xhci_segment_alloc(xhci, flags); | |
161 | if (!next) | |
162 | goto fail; | |
163 | xhci_link_segments(xhci, prev, next, link_trbs); | |
164 | ||
165 | prev = next; | |
166 | num_segs--; | |
167 | } | |
168 | xhci_link_segments(xhci, prev, ring->first_seg, link_trbs); | |
169 | ||
170 | if (link_trbs) { | |
171 | /* See section 4.9.2.1 and 6.4.4.1 */ | |
172 | prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE); | |
173 | xhci_dbg(xhci, "Wrote link toggle flag to" | |
700e2052 GKH |
174 | " segment %p (virtual), 0x%llx (DMA)\n", |
175 | prev, (unsigned long long)prev->dma); | |
0ebbab37 SS |
176 | } |
177 | /* The ring is empty, so the enqueue pointer == dequeue pointer */ | |
178 | ring->enqueue = ring->first_seg->trbs; | |
7f84eef0 | 179 | ring->enq_seg = ring->first_seg; |
0ebbab37 | 180 | ring->dequeue = ring->enqueue; |
7f84eef0 | 181 | ring->deq_seg = ring->first_seg; |
0ebbab37 SS |
182 | /* The ring is initialized to 0. The producer must write 1 to the cycle |
183 | * bit to handover ownership of the TRB, so PCS = 1. The consumer must | |
184 | * compare CCS to the cycle bit to check ownership, so CCS = 1. | |
185 | */ | |
186 | ring->cycle_state = 1; | |
187 | ||
188 | return ring; | |
189 | ||
190 | fail: | |
191 | xhci_ring_free(xhci, ring); | |
192 | return 0; | |
193 | } | |
194 | ||
d115b048 JY |
195 | #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) |
196 | ||
197 | struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, | |
198 | int type, gfp_t flags) | |
199 | { | |
200 | struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags); | |
201 | if (!ctx) | |
202 | return NULL; | |
203 | ||
204 | BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)); | |
205 | ctx->type = type; | |
206 | ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024; | |
207 | if (type == XHCI_CTX_TYPE_INPUT) | |
208 | ctx->size += CTX_SIZE(xhci->hcc_params); | |
209 | ||
210 | ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma); | |
211 | memset(ctx->bytes, 0, ctx->size); | |
212 | return ctx; | |
213 | } | |
214 | ||
215 | void xhci_free_container_ctx(struct xhci_hcd *xhci, | |
216 | struct xhci_container_ctx *ctx) | |
217 | { | |
218 | dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); | |
219 | kfree(ctx); | |
220 | } | |
221 | ||
222 | struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, | |
223 | struct xhci_container_ctx *ctx) | |
224 | { | |
225 | BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT); | |
226 | return (struct xhci_input_control_ctx *)ctx->bytes; | |
227 | } | |
228 | ||
229 | struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, | |
230 | struct xhci_container_ctx *ctx) | |
231 | { | |
232 | if (ctx->type == XHCI_CTX_TYPE_DEVICE) | |
233 | return (struct xhci_slot_ctx *)ctx->bytes; | |
234 | ||
235 | return (struct xhci_slot_ctx *) | |
236 | (ctx->bytes + CTX_SIZE(xhci->hcc_params)); | |
237 | } | |
238 | ||
239 | struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, | |
240 | struct xhci_container_ctx *ctx, | |
241 | unsigned int ep_index) | |
242 | { | |
243 | /* increment ep index by offset of start of ep ctx array */ | |
244 | ep_index++; | |
245 | if (ctx->type == XHCI_CTX_TYPE_INPUT) | |
246 | ep_index++; | |
247 | ||
248 | return (struct xhci_ep_ctx *) | |
249 | (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); | |
250 | } | |
251 | ||
d0e96f5a | 252 | /* All the xhci_tds in the ring's TD list should be freed at this point */ |
3ffbba95 SS |
253 | void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) |
254 | { | |
255 | struct xhci_virt_device *dev; | |
256 | int i; | |
257 | ||
258 | /* Slot ID 0 is reserved */ | |
259 | if (slot_id == 0 || !xhci->devs[slot_id]) | |
260 | return; | |
261 | ||
262 | dev = xhci->devs[slot_id]; | |
8e595a5d | 263 | xhci->dcbaa->dev_context_ptrs[slot_id] = 0; |
3ffbba95 SS |
264 | if (!dev) |
265 | return; | |
266 | ||
267 | for (i = 0; i < 31; ++i) | |
268 | if (dev->ep_rings[i]) | |
269 | xhci_ring_free(xhci, dev->ep_rings[i]); | |
270 | ||
271 | if (dev->in_ctx) | |
d115b048 | 272 | xhci_free_container_ctx(xhci, dev->in_ctx); |
3ffbba95 | 273 | if (dev->out_ctx) |
d115b048 JY |
274 | xhci_free_container_ctx(xhci, dev->out_ctx); |
275 | ||
3ffbba95 SS |
276 | kfree(xhci->devs[slot_id]); |
277 | xhci->devs[slot_id] = 0; | |
278 | } | |
279 | ||
280 | int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |
281 | struct usb_device *udev, gfp_t flags) | |
282 | { | |
3ffbba95 SS |
283 | struct xhci_virt_device *dev; |
284 | ||
285 | /* Slot ID 0 is reserved */ | |
286 | if (slot_id == 0 || xhci->devs[slot_id]) { | |
287 | xhci_warn(xhci, "Bad Slot ID %d\n", slot_id); | |
288 | return 0; | |
289 | } | |
290 | ||
291 | xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags); | |
292 | if (!xhci->devs[slot_id]) | |
293 | return 0; | |
294 | dev = xhci->devs[slot_id]; | |
295 | ||
d115b048 JY |
296 | /* Allocate the (output) device context that will be used in the HC. */ |
297 | dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); | |
3ffbba95 SS |
298 | if (!dev->out_ctx) |
299 | goto fail; | |
d115b048 | 300 | |
700e2052 | 301 | xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, |
d115b048 | 302 | (unsigned long long)dev->out_ctx->dma); |
3ffbba95 SS |
303 | |
304 | /* Allocate the (input) device context for address device command */ | |
d115b048 | 305 | dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags); |
3ffbba95 SS |
306 | if (!dev->in_ctx) |
307 | goto fail; | |
d115b048 | 308 | |
700e2052 | 309 | xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, |
d115b048 | 310 | (unsigned long long)dev->in_ctx->dma); |
3ffbba95 SS |
311 | |
312 | /* Allocate endpoint 0 ring */ | |
313 | dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); | |
314 | if (!dev->ep_rings[0]) | |
315 | goto fail; | |
316 | ||
f94e0186 SS |
317 | init_completion(&dev->cmd_completion); |
318 | ||
28c2d2ef | 319 | /* Point to output device context in dcbaa. */ |
d115b048 | 320 | xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma; |
700e2052 | 321 | xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", |
3ffbba95 | 322 | slot_id, |
8e595a5d | 323 | &xhci->dcbaa->dev_context_ptrs[slot_id], |
28c2d2ef | 324 | (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]); |
3ffbba95 SS |
325 | |
326 | return 1; | |
327 | fail: | |
328 | xhci_free_virt_device(xhci, slot_id); | |
329 | return 0; | |
330 | } | |
331 | ||
332 | /* Setup an xHCI virtual device for a Set Address command */ | |
333 | int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev) | |
334 | { | |
335 | struct xhci_virt_device *dev; | |
336 | struct xhci_ep_ctx *ep0_ctx; | |
337 | struct usb_device *top_dev; | |
d115b048 JY |
338 | struct xhci_slot_ctx *slot_ctx; |
339 | struct xhci_input_control_ctx *ctrl_ctx; | |
3ffbba95 SS |
340 | |
341 | dev = xhci->devs[udev->slot_id]; | |
342 | /* Slot ID 0 is reserved */ | |
343 | if (udev->slot_id == 0 || !dev) { | |
344 | xhci_warn(xhci, "Slot ID %d is not assigned to this device\n", | |
345 | udev->slot_id); | |
346 | return -EINVAL; | |
347 | } | |
d115b048 JY |
348 | ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); |
349 | ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx); | |
350 | slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); | |
3ffbba95 SS |
351 | |
352 | /* 2) New slot context and endpoint 0 context are valid*/ | |
d115b048 | 353 | ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG; |
3ffbba95 SS |
354 | |
355 | /* 3) Only the control endpoint is valid - one endpoint context */ | |
d115b048 | 356 | slot_ctx->dev_info |= LAST_CTX(1); |
3ffbba95 SS |
357 | |
358 | switch (udev->speed) { | |
359 | case USB_SPEED_SUPER: | |
d115b048 JY |
360 | slot_ctx->dev_info |= (u32) udev->route; |
361 | slot_ctx->dev_info |= (u32) SLOT_SPEED_SS; | |
3ffbba95 SS |
362 | break; |
363 | case USB_SPEED_HIGH: | |
d115b048 | 364 | slot_ctx->dev_info |= (u32) SLOT_SPEED_HS; |
3ffbba95 SS |
365 | break; |
366 | case USB_SPEED_FULL: | |
d115b048 | 367 | slot_ctx->dev_info |= (u32) SLOT_SPEED_FS; |
3ffbba95 SS |
368 | break; |
369 | case USB_SPEED_LOW: | |
d115b048 | 370 | slot_ctx->dev_info |= (u32) SLOT_SPEED_LS; |
3ffbba95 SS |
371 | break; |
372 | case USB_SPEED_VARIABLE: | |
373 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); | |
374 | return -EINVAL; | |
375 | break; | |
376 | default: | |
377 | /* Speed was set earlier, this shouldn't happen. */ | |
378 | BUG(); | |
379 | } | |
380 | /* Find the root hub port this device is under */ | |
381 | for (top_dev = udev; top_dev->parent && top_dev->parent->parent; | |
382 | top_dev = top_dev->parent) | |
383 | /* Found device below root hub */; | |
d115b048 | 384 | slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); |
3ffbba95 SS |
385 | xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); |
386 | ||
387 | /* Is this a LS/FS device under a HS hub? */ | |
388 | /* | |
389 | * FIXME: I don't think this is right, where does the TT info for the | |
390 | * roothub or parent hub come from? | |
391 | */ | |
392 | if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && | |
393 | udev->tt) { | |
d115b048 JY |
394 | slot_ctx->tt_info = udev->tt->hub->slot_id; |
395 | slot_ctx->tt_info |= udev->ttport << 8; | |
3ffbba95 | 396 | } |
700e2052 | 397 | xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); |
3ffbba95 SS |
398 | xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); |
399 | ||
400 | /* Step 4 - ring already allocated */ | |
401 | /* Step 5 */ | |
402 | ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP); | |
403 | /* | |
404 | * See section 4.3 bullet 6: | |
405 | * The default Max Packet size for ep0 is "8 bytes for a USB2 | |
406 | * LS/FS/HS device or 512 bytes for a USB3 SS device" | |
407 | * XXX: Not sure about wireless USB devices. | |
408 | */ | |
409 | if (udev->speed == USB_SPEED_SUPER) | |
410 | ep0_ctx->ep_info2 |= MAX_PACKET(512); | |
411 | else | |
412 | ep0_ctx->ep_info2 |= MAX_PACKET(8); | |
413 | /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ | |
414 | ep0_ctx->ep_info2 |= MAX_BURST(0); | |
415 | ep0_ctx->ep_info2 |= ERROR_COUNT(3); | |
416 | ||
8e595a5d | 417 | ep0_ctx->deq = |
3ffbba95 | 418 | dev->ep_rings[0]->first_seg->dma; |
8e595a5d | 419 | ep0_ctx->deq |= dev->ep_rings[0]->cycle_state; |
3ffbba95 SS |
420 | |
421 | /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ | |
422 | ||
423 | return 0; | |
424 | } | |
425 | ||
f94e0186 SS |
426 | /* Return the polling or NAK interval. |
427 | * | |
428 | * The polling interval is expressed in "microframes". If xHCI's Interval field | |
429 | * is set to N, it will service the endpoint every 2^(Interval)*125us. | |
430 | * | |
431 | * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval | |
432 | * is set to 0. | |
433 | */ | |
434 | static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, | |
435 | struct usb_host_endpoint *ep) | |
436 | { | |
437 | unsigned int interval = 0; | |
438 | ||
439 | switch (udev->speed) { | |
440 | case USB_SPEED_HIGH: | |
441 | /* Max NAK rate */ | |
442 | if (usb_endpoint_xfer_control(&ep->desc) || | |
443 | usb_endpoint_xfer_bulk(&ep->desc)) | |
444 | interval = ep->desc.bInterval; | |
445 | /* Fall through - SS and HS isoc/int have same decoding */ | |
446 | case USB_SPEED_SUPER: | |
447 | if (usb_endpoint_xfer_int(&ep->desc) || | |
448 | usb_endpoint_xfer_isoc(&ep->desc)) { | |
449 | if (ep->desc.bInterval == 0) | |
450 | interval = 0; | |
451 | else | |
452 | interval = ep->desc.bInterval - 1; | |
453 | if (interval > 15) | |
454 | interval = 15; | |
455 | if (interval != ep->desc.bInterval + 1) | |
456 | dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n", | |
457 | ep->desc.bEndpointAddress, 1 << interval); | |
458 | } | |
459 | break; | |
460 | /* Convert bInterval (in 1-255 frames) to microframes and round down to | |
461 | * nearest power of 2. | |
462 | */ | |
463 | case USB_SPEED_FULL: | |
464 | case USB_SPEED_LOW: | |
465 | if (usb_endpoint_xfer_int(&ep->desc) || | |
466 | usb_endpoint_xfer_isoc(&ep->desc)) { | |
467 | interval = fls(8*ep->desc.bInterval) - 1; | |
468 | if (interval > 10) | |
469 | interval = 10; | |
470 | if (interval < 3) | |
471 | interval = 3; | |
472 | if ((1 << interval) != 8*ep->desc.bInterval) | |
473 | dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n", | |
474 | ep->desc.bEndpointAddress, 1 << interval); | |
475 | } | |
476 | break; | |
477 | default: | |
478 | BUG(); | |
479 | } | |
480 | return EP_INTERVAL(interval); | |
481 | } | |
482 | ||
483 | static inline u32 xhci_get_endpoint_type(struct usb_device *udev, | |
484 | struct usb_host_endpoint *ep) | |
485 | { | |
486 | int in; | |
487 | u32 type; | |
488 | ||
489 | in = usb_endpoint_dir_in(&ep->desc); | |
490 | if (usb_endpoint_xfer_control(&ep->desc)) { | |
491 | type = EP_TYPE(CTRL_EP); | |
492 | } else if (usb_endpoint_xfer_bulk(&ep->desc)) { | |
493 | if (in) | |
494 | type = EP_TYPE(BULK_IN_EP); | |
495 | else | |
496 | type = EP_TYPE(BULK_OUT_EP); | |
497 | } else if (usb_endpoint_xfer_isoc(&ep->desc)) { | |
498 | if (in) | |
499 | type = EP_TYPE(ISOC_IN_EP); | |
500 | else | |
501 | type = EP_TYPE(ISOC_OUT_EP); | |
502 | } else if (usb_endpoint_xfer_int(&ep->desc)) { | |
503 | if (in) | |
504 | type = EP_TYPE(INT_IN_EP); | |
505 | else | |
506 | type = EP_TYPE(INT_OUT_EP); | |
507 | } else { | |
508 | BUG(); | |
509 | } | |
510 | return type; | |
511 | } | |
512 | ||
513 | int xhci_endpoint_init(struct xhci_hcd *xhci, | |
514 | struct xhci_virt_device *virt_dev, | |
515 | struct usb_device *udev, | |
f88ba78d SS |
516 | struct usb_host_endpoint *ep, |
517 | gfp_t mem_flags) | |
f94e0186 SS |
518 | { |
519 | unsigned int ep_index; | |
520 | struct xhci_ep_ctx *ep_ctx; | |
521 | struct xhci_ring *ep_ring; | |
522 | unsigned int max_packet; | |
523 | unsigned int max_burst; | |
524 | ||
525 | ep_index = xhci_get_endpoint_index(&ep->desc); | |
d115b048 | 526 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); |
f94e0186 SS |
527 | |
528 | /* Set up the endpoint ring */ | |
f88ba78d | 529 | virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags); |
f94e0186 SS |
530 | if (!virt_dev->new_ep_rings[ep_index]) |
531 | return -ENOMEM; | |
532 | ep_ring = virt_dev->new_ep_rings[ep_index]; | |
8e595a5d | 533 | ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; |
f94e0186 SS |
534 | |
535 | ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); | |
536 | ||
537 | /* FIXME dig Mult and streams info out of ep companion desc */ | |
538 | ||
47692d17 SS |
539 | /* Allow 3 retries for everything but isoc; |
540 | * error count = 0 means infinite retries. | |
541 | */ | |
f94e0186 SS |
542 | if (!usb_endpoint_xfer_isoc(&ep->desc)) |
543 | ep_ctx->ep_info2 = ERROR_COUNT(3); | |
544 | else | |
47692d17 | 545 | ep_ctx->ep_info2 = ERROR_COUNT(1); |
f94e0186 SS |
546 | |
547 | ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); | |
548 | ||
549 | /* Set the max packet size and max burst */ | |
550 | switch (udev->speed) { | |
551 | case USB_SPEED_SUPER: | |
552 | max_packet = ep->desc.wMaxPacketSize; | |
553 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet); | |
b10de142 | 554 | /* dig out max burst from ep companion desc */ |
b7d6d998 SS |
555 | if (!ep->ss_ep_comp) { |
556 | xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n"); | |
557 | max_packet = 0; | |
558 | } else { | |
559 | max_packet = ep->ss_ep_comp->desc.bMaxBurst; | |
560 | } | |
b10de142 | 561 | ep_ctx->ep_info2 |= MAX_BURST(max_packet); |
f94e0186 SS |
562 | break; |
563 | case USB_SPEED_HIGH: | |
564 | /* bits 11:12 specify the number of additional transaction | |
565 | * opportunities per microframe (USB 2.0, section 9.6.6) | |
566 | */ | |
567 | if (usb_endpoint_xfer_isoc(&ep->desc) || | |
568 | usb_endpoint_xfer_int(&ep->desc)) { | |
569 | max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; | |
570 | ep_ctx->ep_info2 |= MAX_BURST(max_burst); | |
571 | } | |
572 | /* Fall through */ | |
573 | case USB_SPEED_FULL: | |
574 | case USB_SPEED_LOW: | |
575 | max_packet = ep->desc.wMaxPacketSize & 0x3ff; | |
576 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet); | |
577 | break; | |
578 | default: | |
579 | BUG(); | |
580 | } | |
581 | /* FIXME Debug endpoint context */ | |
582 | return 0; | |
583 | } | |
584 | ||
585 | void xhci_endpoint_zero(struct xhci_hcd *xhci, | |
586 | struct xhci_virt_device *virt_dev, | |
587 | struct usb_host_endpoint *ep) | |
588 | { | |
589 | unsigned int ep_index; | |
590 | struct xhci_ep_ctx *ep_ctx; | |
591 | ||
592 | ep_index = xhci_get_endpoint_index(&ep->desc); | |
d115b048 | 593 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); |
f94e0186 SS |
594 | |
595 | ep_ctx->ep_info = 0; | |
596 | ep_ctx->ep_info2 = 0; | |
8e595a5d | 597 | ep_ctx->deq = 0; |
f94e0186 SS |
598 | ep_ctx->tx_info = 0; |
599 | /* Don't free the endpoint ring until the set interface or configuration | |
600 | * request succeeds. | |
601 | */ | |
602 | } | |
603 | ||
254c80a3 JY |
604 | /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */ |
605 | static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) | |
606 | { | |
607 | int i; | |
608 | struct device *dev = xhci_to_hcd(xhci)->self.controller; | |
609 | int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); | |
610 | ||
611 | xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp); | |
612 | ||
613 | if (!num_sp) | |
614 | return 0; | |
615 | ||
616 | xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags); | |
617 | if (!xhci->scratchpad) | |
618 | goto fail_sp; | |
619 | ||
620 | xhci->scratchpad->sp_array = | |
621 | pci_alloc_consistent(to_pci_dev(dev), | |
622 | num_sp * sizeof(u64), | |
623 | &xhci->scratchpad->sp_dma); | |
624 | if (!xhci->scratchpad->sp_array) | |
625 | goto fail_sp2; | |
626 | ||
627 | xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags); | |
628 | if (!xhci->scratchpad->sp_buffers) | |
629 | goto fail_sp3; | |
630 | ||
631 | xhci->scratchpad->sp_dma_buffers = | |
632 | kzalloc(sizeof(dma_addr_t) * num_sp, flags); | |
633 | ||
634 | if (!xhci->scratchpad->sp_dma_buffers) | |
635 | goto fail_sp4; | |
636 | ||
637 | xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma; | |
638 | for (i = 0; i < num_sp; i++) { | |
639 | dma_addr_t dma; | |
640 | void *buf = pci_alloc_consistent(to_pci_dev(dev), | |
641 | xhci->page_size, &dma); | |
642 | if (!buf) | |
643 | goto fail_sp5; | |
644 | ||
645 | xhci->scratchpad->sp_array[i] = dma; | |
646 | xhci->scratchpad->sp_buffers[i] = buf; | |
647 | xhci->scratchpad->sp_dma_buffers[i] = dma; | |
648 | } | |
649 | ||
650 | return 0; | |
651 | ||
652 | fail_sp5: | |
653 | for (i = i - 1; i >= 0; i--) { | |
654 | pci_free_consistent(to_pci_dev(dev), xhci->page_size, | |
655 | xhci->scratchpad->sp_buffers[i], | |
656 | xhci->scratchpad->sp_dma_buffers[i]); | |
657 | } | |
658 | kfree(xhci->scratchpad->sp_dma_buffers); | |
659 | ||
660 | fail_sp4: | |
661 | kfree(xhci->scratchpad->sp_buffers); | |
662 | ||
663 | fail_sp3: | |
664 | pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64), | |
665 | xhci->scratchpad->sp_array, | |
666 | xhci->scratchpad->sp_dma); | |
667 | ||
668 | fail_sp2: | |
669 | kfree(xhci->scratchpad); | |
670 | xhci->scratchpad = NULL; | |
671 | ||
672 | fail_sp: | |
673 | return -ENOMEM; | |
674 | } | |
675 | ||
676 | static void scratchpad_free(struct xhci_hcd *xhci) | |
677 | { | |
678 | int num_sp; | |
679 | int i; | |
680 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | |
681 | ||
682 | if (!xhci->scratchpad) | |
683 | return; | |
684 | ||
685 | num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); | |
686 | ||
687 | for (i = 0; i < num_sp; i++) { | |
688 | pci_free_consistent(pdev, xhci->page_size, | |
689 | xhci->scratchpad->sp_buffers[i], | |
690 | xhci->scratchpad->sp_dma_buffers[i]); | |
691 | } | |
692 | kfree(xhci->scratchpad->sp_dma_buffers); | |
693 | kfree(xhci->scratchpad->sp_buffers); | |
694 | pci_free_consistent(pdev, num_sp * sizeof(u64), | |
695 | xhci->scratchpad->sp_array, | |
696 | xhci->scratchpad->sp_dma); | |
697 | kfree(xhci->scratchpad); | |
698 | xhci->scratchpad = NULL; | |
699 | } | |
700 | ||
66d4eadd SS |
701 | void xhci_mem_cleanup(struct xhci_hcd *xhci) |
702 | { | |
0ebbab37 SS |
703 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
704 | int size; | |
3ffbba95 | 705 | int i; |
0ebbab37 SS |
706 | |
707 | /* Free the Event Ring Segment Table and the actual Event Ring */ | |
708 | xhci_writel(xhci, 0, &xhci->ir_set->erst_size); | |
8e595a5d SS |
709 | xhci_write_64(xhci, 0, &xhci->ir_set->erst_base); |
710 | xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue); | |
0ebbab37 SS |
711 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); |
712 | if (xhci->erst.entries) | |
713 | pci_free_consistent(pdev, size, | |
714 | xhci->erst.entries, xhci->erst.erst_dma_addr); | |
715 | xhci->erst.entries = NULL; | |
716 | xhci_dbg(xhci, "Freed ERST\n"); | |
717 | if (xhci->event_ring) | |
718 | xhci_ring_free(xhci, xhci->event_ring); | |
719 | xhci->event_ring = NULL; | |
720 | xhci_dbg(xhci, "Freed event ring\n"); | |
721 | ||
8e595a5d | 722 | xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); |
0ebbab37 SS |
723 | if (xhci->cmd_ring) |
724 | xhci_ring_free(xhci, xhci->cmd_ring); | |
725 | xhci->cmd_ring = NULL; | |
726 | xhci_dbg(xhci, "Freed command ring\n"); | |
3ffbba95 SS |
727 | |
728 | for (i = 1; i < MAX_HC_SLOTS; ++i) | |
729 | xhci_free_virt_device(xhci, i); | |
730 | ||
0ebbab37 SS |
731 | if (xhci->segment_pool) |
732 | dma_pool_destroy(xhci->segment_pool); | |
733 | xhci->segment_pool = NULL; | |
734 | xhci_dbg(xhci, "Freed segment pool\n"); | |
3ffbba95 SS |
735 | |
736 | if (xhci->device_pool) | |
737 | dma_pool_destroy(xhci->device_pool); | |
738 | xhci->device_pool = NULL; | |
739 | xhci_dbg(xhci, "Freed device context pool\n"); | |
740 | ||
8e595a5d | 741 | xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); |
a74588f9 SS |
742 | if (xhci->dcbaa) |
743 | pci_free_consistent(pdev, sizeof(*xhci->dcbaa), | |
744 | xhci->dcbaa, xhci->dcbaa->dma); | |
745 | xhci->dcbaa = NULL; | |
3ffbba95 | 746 | |
66d4eadd SS |
747 | xhci->page_size = 0; |
748 | xhci->page_shift = 0; | |
254c80a3 | 749 | scratchpad_free(xhci); |
66d4eadd SS |
750 | } |
751 | ||
752 | int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |
753 | { | |
0ebbab37 SS |
754 | dma_addr_t dma; |
755 | struct device *dev = xhci_to_hcd(xhci)->self.controller; | |
66d4eadd | 756 | unsigned int val, val2; |
8e595a5d | 757 | u64 val_64; |
0ebbab37 | 758 | struct xhci_segment *seg; |
66d4eadd SS |
759 | u32 page_size; |
760 | int i; | |
761 | ||
762 | page_size = xhci_readl(xhci, &xhci->op_regs->page_size); | |
763 | xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size); | |
764 | for (i = 0; i < 16; i++) { | |
765 | if ((0x1 & page_size) != 0) | |
766 | break; | |
767 | page_size = page_size >> 1; | |
768 | } | |
769 | if (i < 16) | |
770 | xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024); | |
771 | else | |
772 | xhci_warn(xhci, "WARN: no supported page size\n"); | |
773 | /* Use 4K pages, since that's common and the minimum the HC supports */ | |
774 | xhci->page_shift = 12; | |
775 | xhci->page_size = 1 << xhci->page_shift; | |
776 | xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024); | |
777 | ||
778 | /* | |
779 | * Program the Number of Device Slots Enabled field in the CONFIG | |
780 | * register with the max value of slots the HC can handle. | |
781 | */ | |
782 | val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1)); | |
783 | xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n", | |
784 | (unsigned int) val); | |
785 | val2 = xhci_readl(xhci, &xhci->op_regs->config_reg); | |
786 | val |= (val2 & ~HCS_SLOTS_MASK); | |
787 | xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n", | |
788 | (unsigned int) val); | |
789 | xhci_writel(xhci, val, &xhci->op_regs->config_reg); | |
790 | ||
a74588f9 SS |
791 | /* |
792 | * Section 5.4.8 - doorbell array must be | |
793 | * "physically contiguous and 64-byte (cache line) aligned". | |
794 | */ | |
795 | xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev), | |
796 | sizeof(*xhci->dcbaa), &dma); | |
797 | if (!xhci->dcbaa) | |
798 | goto fail; | |
799 | memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); | |
800 | xhci->dcbaa->dma = dma; | |
700e2052 GKH |
801 | xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", |
802 | (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); | |
8e595a5d | 803 | xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); |
a74588f9 | 804 | |
0ebbab37 SS |
805 | /* |
806 | * Initialize the ring segment pool. The ring must be a contiguous | |
807 | * structure comprised of TRBs. The TRBs must be 16 byte aligned, | |
808 | * however, the command ring segment needs 64-byte aligned segments, | |
809 | * so we pick the greater alignment need. | |
810 | */ | |
811 | xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, | |
812 | SEGMENT_SIZE, 64, xhci->page_size); | |
d115b048 | 813 | |
3ffbba95 | 814 | /* See Table 46 and Note on Figure 55 */ |
3ffbba95 | 815 | xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, |
d115b048 | 816 | 2112, 64, xhci->page_size); |
3ffbba95 | 817 | if (!xhci->segment_pool || !xhci->device_pool) |
0ebbab37 SS |
818 | goto fail; |
819 | ||
820 | /* Set up the command ring to have one segments for now. */ | |
821 | xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags); | |
822 | if (!xhci->cmd_ring) | |
823 | goto fail; | |
700e2052 GKH |
824 | xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); |
825 | xhci_dbg(xhci, "First segment DMA is 0x%llx\n", | |
826 | (unsigned long long)xhci->cmd_ring->first_seg->dma); | |
0ebbab37 SS |
827 | |
828 | /* Set the address in the Command Ring Control register */ | |
8e595a5d SS |
829 | val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
830 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | | |
831 | (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | | |
0ebbab37 | 832 | xhci->cmd_ring->cycle_state; |
8e595a5d SS |
833 | xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val); |
834 | xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); | |
0ebbab37 SS |
835 | xhci_dbg_cmd_ptrs(xhci); |
836 | ||
837 | val = xhci_readl(xhci, &xhci->cap_regs->db_off); | |
838 | val &= DBOFF_MASK; | |
839 | xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" | |
840 | " from cap regs base addr\n", val); | |
841 | xhci->dba = (void *) xhci->cap_regs + val; | |
842 | xhci_dbg_regs(xhci); | |
843 | xhci_print_run_regs(xhci); | |
844 | /* Set ir_set to interrupt register set 0 */ | |
845 | xhci->ir_set = (void *) xhci->run_regs->ir_set; | |
846 | ||
847 | /* | |
848 | * Event ring setup: Allocate a normal ring, but also setup | |
849 | * the event ring segment table (ERST). Section 4.9.3. | |
850 | */ | |
851 | xhci_dbg(xhci, "// Allocating event ring\n"); | |
852 | xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags); | |
853 | if (!xhci->event_ring) | |
854 | goto fail; | |
855 | ||
856 | xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev), | |
857 | sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma); | |
858 | if (!xhci->erst.entries) | |
859 | goto fail; | |
700e2052 GKH |
860 | xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n", |
861 | (unsigned long long)dma); | |
0ebbab37 SS |
862 | |
863 | memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS); | |
864 | xhci->erst.num_entries = ERST_NUM_SEGS; | |
865 | xhci->erst.erst_dma_addr = dma; | |
700e2052 | 866 | xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n", |
0ebbab37 | 867 | xhci->erst.num_entries, |
700e2052 GKH |
868 | xhci->erst.entries, |
869 | (unsigned long long)xhci->erst.erst_dma_addr); | |
0ebbab37 SS |
870 | |
871 | /* set ring base address and size for each segment table entry */ | |
872 | for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { | |
873 | struct xhci_erst_entry *entry = &xhci->erst.entries[val]; | |
8e595a5d | 874 | entry->seg_addr = seg->dma; |
0ebbab37 SS |
875 | entry->seg_size = TRBS_PER_SEGMENT; |
876 | entry->rsvd = 0; | |
877 | seg = seg->next; | |
878 | } | |
879 | ||
880 | /* set ERST count with the number of entries in the segment table */ | |
881 | val = xhci_readl(xhci, &xhci->ir_set->erst_size); | |
882 | val &= ERST_SIZE_MASK; | |
883 | val |= ERST_NUM_SEGS; | |
884 | xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n", | |
885 | val); | |
886 | xhci_writel(xhci, val, &xhci->ir_set->erst_size); | |
887 | ||
888 | xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n"); | |
889 | /* set the segment table base address */ | |
700e2052 GKH |
890 | xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", |
891 | (unsigned long long)xhci->erst.erst_dma_addr); | |
8e595a5d SS |
892 | val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); |
893 | val_64 &= ERST_PTR_MASK; | |
894 | val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); | |
895 | xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); | |
0ebbab37 SS |
896 | |
897 | /* Set the event ring dequeue address */ | |
23e3be11 | 898 | xhci_set_hc_event_deq(xhci); |
0ebbab37 SS |
899 | xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); |
900 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | |
901 | ||
902 | /* | |
903 | * XXX: Might need to set the Interrupter Moderation Register to | |
904 | * something other than the default (~1ms minimum between interrupts). | |
905 | * See section 5.5.1.2. | |
906 | */ | |
3ffbba95 SS |
907 | init_completion(&xhci->addr_dev); |
908 | for (i = 0; i < MAX_HC_SLOTS; ++i) | |
909 | xhci->devs[i] = 0; | |
66d4eadd | 910 | |
254c80a3 JY |
911 | if (scratchpad_alloc(xhci, flags)) |
912 | goto fail; | |
913 | ||
66d4eadd | 914 | return 0; |
254c80a3 | 915 | |
66d4eadd SS |
916 | fail: |
917 | xhci_warn(xhci, "Couldn't initialize memory\n"); | |
918 | xhci_mem_cleanup(xhci); | |
919 | return -ENOMEM; | |
920 | } |