staging: most: fix USB babble on IN pipe
[deliverable/linux.git] / drivers / staging / most / hdm-usb / hdm_usb.c
1 /*
2 * hdm_usb.c - Hardware dependent module for USB
3 *
4 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * This file is licensed under GPLv2.
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/fs.h>
17 #include <linux/usb.h>
18 #include <linux/slab.h>
19 #include <linux/init.h>
20 #include <linux/cdev.h>
21 #include <linux/device.h>
22 #include <linux/list.h>
23 #include <linux/completion.h>
24 #include <linux/mutex.h>
25 #include <linux/spinlock.h>
26 #include <linux/interrupt.h>
27 #include <linux/workqueue.h>
28 #include <linux/sysfs.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/etherdevice.h>
31 #include <linux/uaccess.h>
32 #include "mostcore.h"
33 #include "networking.h"
34
35 #define USB_MTU 512
36 #define NO_ISOCHRONOUS_URB 0
37 #define AV_PACKETS_PER_XACT 2
38 #define BUF_CHAIN_SIZE 0xFFFF
39 #define MAX_NUM_ENDPOINTS 30
40 #define MAX_SUFFIX_LEN 10
41 #define MAX_STRING_LEN 80
42 #define MAX_BUF_SIZE 0xFFFF
43 #define CEILING(x, y) (((x) + (y) - 1) / (y))
44
45 #define USB_VENDOR_ID_SMSC 0x0424 /* VID: SMSC */
46 #define USB_DEV_ID_BRDG 0xC001 /* PID: USB Bridge */
47 #define USB_DEV_ID_INIC 0xCF18 /* PID: USB INIC */
48 #define HW_RESYNC 0x0000
49 /* DRCI Addresses */
50 #define DRCI_REG_NI_STATE 0x0100
51 #define DRCI_REG_PACKET_BW 0x0101
52 #define DRCI_REG_NODE_ADDR 0x0102
53 #define DRCI_REG_NODE_POS 0x0103
54 #define DRCI_REG_MEP_FILTER 0x0140
55 #define DRCI_REG_HASH_TBL0 0x0141
56 #define DRCI_REG_HASH_TBL1 0x0142
57 #define DRCI_REG_HASH_TBL2 0x0143
58 #define DRCI_REG_HASH_TBL3 0x0144
59 #define DRCI_REG_HW_ADDR_HI 0x0145
60 #define DRCI_REG_HW_ADDR_MI 0x0146
61 #define DRCI_REG_HW_ADDR_LO 0x0147
62 #define DRCI_READ_REQ 0xA0
63 #define DRCI_WRITE_REQ 0xA1
64
65 /**
66 * struct buf_anchor - used to create a list of pending URBs
67 * @urb: pointer to USB request block
68 * @clear_work_obj:
69 * @list: linked list
70 * @urb_completion:
71 */
72 struct buf_anchor {
73 struct urb *urb;
74 struct work_struct clear_work_obj;
75 struct list_head list;
76 struct completion urb_compl;
77 };
78 #define to_buf_anchor(w) container_of(w, struct buf_anchor, clear_work_obj)
79
80 /**
81 * struct most_dci_obj - Direct Communication Interface
82 * @kobj:position in sysfs
83 * @usb_device: pointer to the usb device
84 */
85 struct most_dci_obj {
86 struct kobject kobj;
87 struct usb_device *usb_device;
88 };
89 #define to_dci_obj(p) container_of(p, struct most_dci_obj, kobj)
90
91 /**
92 * struct most_dev - holds all usb interface specific stuff
93 * @parent: parent object in sysfs
94 * @usb_device: pointer to usb device
95 * @iface: hardware interface
96 * @cap: channel capabilities
97 * @conf: channel configuration
98 * @dci: direct communication interface of hardware
99 * @hw_addr: MAC address of hardware
100 * @ep_address: endpoint address table
101 * @link_stat: link status of hardware
102 * @description: device description
103 * @suffix: suffix for channel name
104 * @anchor_list_lock: locks list access
105 * @padding_active: indicates channel uses padding
106 * @is_channel_healthy: health status table of each channel
107 * @anchor_list: list of anchored items
108 * @io_mutex: synchronize I/O with disconnect
109 * @link_stat_timer: timer for link status reports
110 * @poll_work_obj: work for polling link status
111 */
112 struct most_dev {
113 struct kobject *parent;
114 struct usb_device *usb_device;
115 struct most_interface iface;
116 struct most_channel_capability *cap;
117 struct most_channel_config *conf;
118 struct most_dci_obj *dci;
119 u8 hw_addr[6];
120 u8 *ep_address;
121 u16 link_stat;
122 char description[MAX_STRING_LEN];
123 char suffix[MAX_NUM_ENDPOINTS][MAX_SUFFIX_LEN];
124 spinlock_t anchor_list_lock[MAX_NUM_ENDPOINTS];
125 bool padding_active[MAX_NUM_ENDPOINTS];
126 bool is_channel_healthy[MAX_NUM_ENDPOINTS];
127 struct list_head *anchor_list;
128 struct mutex io_mutex;
129 struct timer_list link_stat_timer;
130 struct work_struct poll_work_obj;
131 };
132 #define to_mdev(d) container_of(d, struct most_dev, iface)
133 #define to_mdev_from_work(w) container_of(w, struct most_dev, poll_work_obj)
134
135 static struct workqueue_struct *schedule_usb_work;
136 static void wq_clear_halt(struct work_struct *wq_obj);
137 static void wq_netinfo(struct work_struct *wq_obj);
138
139 /**
140 * trigger_resync_vr - Vendor request to trigger HW re-sync mechanism
141 * @dev: usb device
142 *
143 */
144 static void trigger_resync_vr(struct usb_device *dev)
145 {
146 int retval;
147 u8 request_type = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT;
148 int *data = kzalloc(sizeof(*data), GFP_KERNEL);
149
150 if (!data)
151 goto error;
152 *data = HW_RESYNC;
153 retval = usb_control_msg(dev,
154 usb_sndctrlpipe(dev, 0),
155 0,
156 request_type,
157 0,
158 0,
159 data,
160 0,
161 5 * HZ);
162 kfree(data);
163 if (retval >= 0)
164 return;
165 error:
166 dev_err(&dev->dev, "Vendor request \"stall\" failed\n");
167 }
168
169 /**
170 * drci_rd_reg - read a DCI register
171 * @dev: usb device
172 * @reg: register address
173 * @buf: buffer to store data
174 *
175 * This is reads data from INIC's direct register communication interface
176 */
177 static inline int drci_rd_reg(struct usb_device *dev, u16 reg, void *buf)
178 {
179 return usb_control_msg(dev,
180 usb_rcvctrlpipe(dev, 0),
181 DRCI_READ_REQ,
182 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
183 0x0000,
184 reg,
185 buf,
186 2,
187 5 * HZ);
188 }
189
190 /**
191 * drci_wr_reg - write a DCI register
192 * @dev: usb device
193 * @reg: register address
194 * @data: data to write
195 *
196 * This is writes data to INIC's direct register communication interface
197 */
198 static inline int drci_wr_reg(struct usb_device *dev, u16 reg, u16 data)
199 {
200 return usb_control_msg(dev,
201 usb_sndctrlpipe(dev, 0),
202 DRCI_WRITE_REQ,
203 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
204 data,
205 reg,
206 NULL,
207 0,
208 5 * HZ);
209 }
210
211 /**
212 * free_anchored_buffers - free device's anchored items
213 * @mdev: the device
214 * @channel: channel ID
215 */
216 static void free_anchored_buffers(struct most_dev *mdev, unsigned int channel)
217 {
218 struct mbo *mbo;
219 struct buf_anchor *anchor, *tmp;
220 unsigned long flags;
221
222 spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
223 list_for_each_entry_safe(anchor, tmp, &mdev->anchor_list[channel], list) {
224 struct urb *urb = anchor->urb;
225
226 spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
227 if (likely(urb)) {
228 mbo = urb->context;
229 if (!irqs_disabled()) {
230 usb_kill_urb(urb);
231 } else {
232 usb_unlink_urb(urb);
233 wait_for_completion(&anchor->urb_compl);
234 }
235 if ((mbo) && (mbo->complete)) {
236 mbo->status = MBO_E_CLOSE;
237 mbo->processed_length = 0;
238 mbo->complete(mbo);
239 }
240 usb_free_urb(urb);
241 }
242 spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
243 list_del(&anchor->list);
244 kfree(anchor);
245 }
246 spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
247 }
248
249 /**
250 * get_stream_frame_size - calculate frame size of current configuration
251 * @cfg: channel configuration
252 */
253 static unsigned int get_stream_frame_size(struct most_channel_config *cfg)
254 {
255 unsigned int frame_size = 0;
256 unsigned int sub_size = cfg->subbuffer_size;
257
258 if (!sub_size) {
259 pr_warn("Misconfig: Subbuffer size zero.\n");
260 return frame_size;
261 }
262 switch (cfg->data_type) {
263 case MOST_CH_ISOC_AVP:
264 frame_size = AV_PACKETS_PER_XACT * sub_size;
265 break;
266 case MOST_CH_SYNC:
267 if (cfg->packets_per_xact == 0) {
268 pr_warn("Misconfig: Packets per XACT zero\n");
269 frame_size = 0;
270 } else if (cfg->packets_per_xact == 0xFF)
271 frame_size = (USB_MTU / sub_size) * sub_size;
272 else
273 frame_size = cfg->packets_per_xact * sub_size;
274 break;
275 default:
276 pr_warn("Query frame size of non-streaming channel\n");
277 break;
278 }
279 return frame_size;
280 }
281
282 /**
283 * hdm_poison_channel - mark buffers of this channel as invalid
284 * @iface: pointer to the interface
285 * @channel: channel ID
286 *
287 * This unlinks all URBs submitted to the HCD,
288 * calls the associated completion function of the core and removes
289 * them from the list.
290 *
291 * Returns 0 on success or error code otherwise.
292 */
293 static int hdm_poison_channel(struct most_interface *iface, int channel)
294 {
295 struct most_dev *mdev;
296
297 mdev = to_mdev(iface);
298 if (unlikely(!iface)) {
299 dev_warn(&mdev->usb_device->dev, "Poison: Bad interface.\n");
300 return -EIO;
301 }
302 if (unlikely((channel < 0) || (channel >= iface->num_channels))) {
303 dev_warn(&mdev->usb_device->dev, "Channel ID out of range.\n");
304 return -ECHRNG;
305 }
306
307 mdev->is_channel_healthy[channel] = false;
308
309 mutex_lock(&mdev->io_mutex);
310 free_anchored_buffers(mdev, channel);
311 if (mdev->padding_active[channel])
312 mdev->padding_active[channel] = false;
313
314 if (mdev->conf[channel].data_type == MOST_CH_ASYNC) {
315 del_timer_sync(&mdev->link_stat_timer);
316 cancel_work_sync(&mdev->poll_work_obj);
317 }
318 mutex_unlock(&mdev->io_mutex);
319 return 0;
320 }
321
322 /**
323 * hdm_add_padding - add padding bytes
324 * @mdev: most device
325 * @channel: channel ID
326 * @mbo: buffer object
327 *
328 * This inserts the INIC hardware specific padding bytes into a streaming
329 * channel's buffer
330 */
331 static int hdm_add_padding(struct most_dev *mdev, int channel, struct mbo *mbo)
332 {
333 struct most_channel_config *conf = &mdev->conf[channel];
334 unsigned int j, num_frames, frame_size;
335 u16 rd_addr, wr_addr;
336
337 frame_size = get_stream_frame_size(conf);
338 if (!frame_size)
339 return -EIO;
340 num_frames = mbo->buffer_length / frame_size;
341
342 if (num_frames < 1) {
343 dev_err(&mdev->usb_device->dev,
344 "Missed minimal transfer unit.\n");
345 return -EIO;
346 }
347
348 for (j = 1; j < num_frames; j++) {
349 wr_addr = (num_frames - j) * USB_MTU;
350 rd_addr = (num_frames - j) * frame_size;
351 memmove(mbo->virt_address + wr_addr,
352 mbo->virt_address + rd_addr,
353 frame_size);
354 }
355 mbo->buffer_length = num_frames * USB_MTU;
356 return 0;
357 }
358
359 /**
360 * hdm_remove_padding - remove padding bytes
361 * @mdev: most device
362 * @channel: channel ID
363 * @mbo: buffer object
364 *
365 * This takes the INIC hardware specific padding bytes off a streaming
366 * channel's buffer.
367 */
368 static int hdm_remove_padding(struct most_dev *mdev, int channel, struct mbo *mbo)
369 {
370 unsigned int j, num_frames, frame_size;
371 struct most_channel_config *const conf = &mdev->conf[channel];
372
373 frame_size = get_stream_frame_size(conf);
374 if (!frame_size)
375 return -EIO;
376 num_frames = mbo->processed_length / USB_MTU;
377
378 for (j = 1; j < num_frames; j++)
379 memmove(mbo->virt_address + frame_size * j,
380 mbo->virt_address + USB_MTU * j,
381 frame_size);
382
383 mbo->processed_length = frame_size * num_frames;
384 return 0;
385 }
386
387 /**
388 * hdm_write_completion - completion function for submitted Tx URBs
389 * @urb: the URB that has been completed
390 *
391 * This checks the status of the completed URB. In case the URB has been
392 * unlinked before, it is immediately freed. On any other error the MBO
393 * transfer flag is set. On success it frees allocated resources and calls
394 * the completion function.
395 *
396 * Context: interrupt!
397 */
398 static void hdm_write_completion(struct urb *urb)
399 {
400 struct mbo *mbo;
401 struct buf_anchor *anchor;
402 struct most_dev *mdev;
403 struct device *dev;
404 unsigned int channel;
405 unsigned long flags;
406
407 mbo = urb->context;
408 anchor = mbo->priv;
409 mdev = to_mdev(mbo->ifp);
410 channel = mbo->hdm_channel_id;
411 dev = &mdev->usb_device->dev;
412
413 if ((urb->status == -ENOENT) || (urb->status == -ECONNRESET) ||
414 (!mdev->is_channel_healthy[channel])) {
415 complete(&anchor->urb_compl);
416 return;
417 }
418
419 if (unlikely(urb->status && !(urb->status == -ENOENT ||
420 urb->status == -ECONNRESET ||
421 urb->status == -ESHUTDOWN))) {
422 mbo->processed_length = 0;
423 switch (urb->status) {
424 case -EPIPE:
425 dev_warn(dev, "Broken OUT pipe detected\n");
426 most_stop_enqueue(&mdev->iface, channel);
427 mbo->status = MBO_E_INVAL;
428 usb_unlink_urb(urb);
429 INIT_WORK(&anchor->clear_work_obj, wq_clear_halt);
430 queue_work(schedule_usb_work, &anchor->clear_work_obj);
431 return;
432 case -ENODEV:
433 case -EPROTO:
434 mbo->status = MBO_E_CLOSE;
435 break;
436 default:
437 mbo->status = MBO_E_INVAL;
438 break;
439 }
440 } else {
441 mbo->status = MBO_SUCCESS;
442 mbo->processed_length = urb->actual_length;
443 }
444
445 spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
446 list_del(&anchor->list);
447 spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
448 kfree(anchor);
449
450 if (likely(mbo->complete))
451 mbo->complete(mbo);
452 usb_free_urb(urb);
453 }
454
455 /**
456 * hdm_read_completion - completion funciton for submitted Rx URBs
457 * @urb: the URB that has been completed
458 *
459 * This checks the status of the completed URB. In case the URB has been
460 * unlinked before it is immediately freed. On any other error the MBO transfer
461 * flag is set. On success it frees allocated resources, removes
462 * padding bytes -if necessary- and calls the completion function.
463 *
464 * Context: interrupt!
465 *
466 * **************************************************************************
467 * Error codes returned by in urb->status
468 * or in iso_frame_desc[n].status (for ISO)
469 * *************************************************************************
470 *
471 * USB device drivers may only test urb status values in completion handlers.
472 * This is because otherwise there would be a race between HCDs updating
473 * these values on one CPU, and device drivers testing them on another CPU.
474 *
475 * A transfer's actual_length may be positive even when an error has been
476 * reported. That's because transfers often involve several packets, so that
477 * one or more packets could finish before an error stops further endpoint I/O.
478 *
479 * For isochronous URBs, the urb status value is non-zero only if the URB is
480 * unlinked, the device is removed, the host controller is disabled or the total
481 * transferred length is less than the requested length and the URB_SHORT_NOT_OK
482 * flag is set. Completion handlers for isochronous URBs should only see
483 * urb->status set to zero, -ENOENT, -ECONNRESET, -ESHUTDOWN, or -EREMOTEIO.
484 * Individual frame descriptor status fields may report more status codes.
485 *
486 *
487 * 0 Transfer completed successfully
488 *
489 * -ENOENT URB was synchronously unlinked by usb_unlink_urb
490 *
491 * -EINPROGRESS URB still pending, no results yet
492 * (That is, if drivers see this it's a bug.)
493 *
494 * -EPROTO (*, **) a) bitstuff error
495 * b) no response packet received within the
496 * prescribed bus turn-around time
497 * c) unknown USB error
498 *
499 * -EILSEQ (*, **) a) CRC mismatch
500 * b) no response packet received within the
501 * prescribed bus turn-around time
502 * c) unknown USB error
503 *
504 * Note that often the controller hardware does not
505 * distinguish among cases a), b), and c), so a
506 * driver cannot tell whether there was a protocol
507 * error, a failure to respond (often caused by
508 * device disconnect), or some other fault.
509 *
510 * -ETIME (**) No response packet received within the prescribed
511 * bus turn-around time. This error may instead be
512 * reported as -EPROTO or -EILSEQ.
513 *
514 * -ETIMEDOUT Synchronous USB message functions use this code
515 * to indicate timeout expired before the transfer
516 * completed, and no other error was reported by HC.
517 *
518 * -EPIPE (**) Endpoint stalled. For non-control endpoints,
519 * reset this status with usb_clear_halt().
520 *
521 * -ECOMM During an IN transfer, the host controller
522 * received data from an endpoint faster than it
523 * could be written to system memory
524 *
525 * -ENOSR During an OUT transfer, the host controller
526 * could not retrieve data from system memory fast
527 * enough to keep up with the USB data rate
528 *
529 * -EOVERFLOW (*) The amount of data returned by the endpoint was
530 * greater than either the max packet size of the
531 * endpoint or the remaining buffer size. "Babble".
532 *
533 * -EREMOTEIO The data read from the endpoint did not fill the
534 * specified buffer, and URB_SHORT_NOT_OK was set in
535 * urb->transfer_flags.
536 *
537 * -ENODEV Device was removed. Often preceded by a burst of
538 * other errors, since the hub driver doesn't detect
539 * device removal events immediately.
540 *
541 * -EXDEV ISO transfer only partially completed
542 * (only set in iso_frame_desc[n].status, not urb->status)
543 *
544 * -EINVAL ISO madness, if this happens: Log off and go home
545 *
546 * -ECONNRESET URB was asynchronously unlinked by usb_unlink_urb
547 *
548 * -ESHUTDOWN The device or host controller has been disabled due
549 * to some problem that could not be worked around,
550 * such as a physical disconnect.
551 *
552 *
553 * (*) Error codes like -EPROTO, -EILSEQ and -EOVERFLOW normally indicate
554 * hardware problems such as bad devices (including firmware) or cables.
555 *
556 * (**) This is also one of several codes that different kinds of host
557 * controller use to indicate a transfer has failed because of device
558 * disconnect. In the interval before the hub driver starts disconnect
559 * processing, devices may receive such fault reports for every request.
560 *
561 * See <https://www.kernel.org/doc/Documentation/usb/error-codes.txt>
562 */
563 static void hdm_read_completion(struct urb *urb)
564 {
565 struct mbo *mbo;
566 struct buf_anchor *anchor;
567 struct most_dev *mdev;
568 struct device *dev;
569 unsigned long flags;
570 unsigned int channel;
571
572 mbo = urb->context;
573 anchor = mbo->priv;
574 mdev = to_mdev(mbo->ifp);
575 channel = mbo->hdm_channel_id;
576 dev = &mdev->usb_device->dev;
577
578 if ((urb->status == -ENOENT) || (urb->status == -ECONNRESET) ||
579 (!mdev->is_channel_healthy[channel])) {
580 complete(&anchor->urb_compl);
581 return;
582 }
583
584 if (unlikely(urb->status && !(urb->status == -ENOENT ||
585 urb->status == -ECONNRESET ||
586 urb->status == -ESHUTDOWN))) {
587 mbo->processed_length = 0;
588 switch (urb->status) {
589 case -EPIPE:
590 dev_warn(dev, "Broken IN pipe detected\n");
591 mbo->status = MBO_E_INVAL;
592 usb_unlink_urb(urb);
593 INIT_WORK(&anchor->clear_work_obj, wq_clear_halt);
594 queue_work(schedule_usb_work, &anchor->clear_work_obj);
595 return;
596 case -ENODEV:
597 case -EPROTO:
598 mbo->status = MBO_E_CLOSE;
599 break;
600 case -EOVERFLOW:
601 dev_warn(dev, "Babble on IN pipe detected\n");
602 default:
603 mbo->status = MBO_E_INVAL;
604 break;
605 }
606 } else {
607 mbo->processed_length = urb->actual_length;
608 if (!mdev->padding_active[channel]) {
609 mbo->status = MBO_SUCCESS;
610 } else {
611 if (hdm_remove_padding(mdev, channel, mbo)) {
612 mbo->processed_length = 0;
613 mbo->status = MBO_E_INVAL;
614 } else {
615 mbo->status = MBO_SUCCESS;
616 }
617 }
618 }
619 spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
620 list_del(&anchor->list);
621 spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
622 kfree(anchor);
623
624 if (likely(mbo->complete))
625 mbo->complete(mbo);
626 usb_free_urb(urb);
627 }
628
629 /**
630 * hdm_enqueue - receive a buffer to be used for data transfer
631 * @iface: interface to enqueue to
632 * @channel: ID of the channel
633 * @mbo: pointer to the buffer object
634 *
635 * This allocates a new URB and fills it according to the channel
636 * that is being used for transmission of data. Before the URB is
637 * submitted it is stored in the private anchor list.
638 *
639 * Returns 0 on success. On any error the URB is freed and a error code
640 * is returned.
641 *
642 * Context: Could in _some_ cases be interrupt!
643 */
644 static int hdm_enqueue(struct most_interface *iface, int channel, struct mbo *mbo)
645 {
646 struct most_dev *mdev;
647 struct buf_anchor *anchor;
648 struct most_channel_config *conf;
649 struct device *dev;
650 int retval = 0;
651 struct urb *urb;
652 unsigned long flags;
653 unsigned long length;
654 void *virt_address;
655
656 if (unlikely(!iface || !mbo))
657 return -EIO;
658 if (unlikely(iface->num_channels <= channel) || (channel < 0))
659 return -ECHRNG;
660
661 mdev = to_mdev(iface);
662 conf = &mdev->conf[channel];
663 dev = &mdev->usb_device->dev;
664
665 if (!mdev->usb_device)
666 return -ENODEV;
667
668 urb = usb_alloc_urb(NO_ISOCHRONOUS_URB, GFP_ATOMIC);
669 if (!urb) {
670 dev_err(dev, "Failed to allocate URB\n");
671 return -ENOMEM;
672 }
673
674 anchor = kzalloc(sizeof(*anchor), GFP_ATOMIC);
675 if (!anchor) {
676 retval = -ENOMEM;
677 goto _error;
678 }
679
680 anchor->urb = urb;
681 init_completion(&anchor->urb_compl);
682 mbo->priv = anchor;
683
684 spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
685 list_add_tail(&anchor->list, &mdev->anchor_list[channel]);
686 spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
687
688 if ((mdev->padding_active[channel]) &&
689 (conf->direction & MOST_CH_TX))
690 if (hdm_add_padding(mdev, channel, mbo)) {
691 retval = -EIO;
692 goto _error_1;
693 }
694
695 urb->transfer_dma = mbo->bus_address;
696 virt_address = mbo->virt_address;
697 length = mbo->buffer_length;
698
699 if (conf->direction & MOST_CH_TX) {
700 usb_fill_bulk_urb(urb, mdev->usb_device,
701 usb_sndbulkpipe(mdev->usb_device,
702 mdev->ep_address[channel]),
703 virt_address,
704 length,
705 hdm_write_completion,
706 mbo);
707 if (conf->data_type != MOST_CH_ISOC_AVP)
708 urb->transfer_flags |= URB_ZERO_PACKET;
709 } else {
710 usb_fill_bulk_urb(urb, mdev->usb_device,
711 usb_rcvbulkpipe(mdev->usb_device,
712 mdev->ep_address[channel]),
713 virt_address,
714 length + conf->extra_len,
715 hdm_read_completion,
716 mbo);
717 }
718 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
719
720 retval = usb_submit_urb(urb, GFP_KERNEL);
721 if (retval) {
722 dev_err(dev, "URB submit failed with error %d.\n", retval);
723 goto _error_1;
724 }
725 return 0;
726
727 _error_1:
728 spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
729 list_del(&anchor->list);
730 spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
731 kfree(anchor);
732 _error:
733 usb_free_urb(urb);
734 return retval;
735 }
736
737 /**
738 * hdm_configure_channel - receive channel configuration from core
739 * @iface: interface
740 * @channel: channel ID
741 * @conf: structure that holds the configuration information
742 */
743 static int hdm_configure_channel(struct most_interface *iface, int channel,
744 struct most_channel_config *conf)
745 {
746 unsigned int num_frames;
747 unsigned int frame_size;
748 unsigned int temp_size;
749 unsigned int tail_space;
750 struct most_dev *mdev;
751 struct device *dev;
752
753 mdev = to_mdev(iface);
754 mdev->is_channel_healthy[channel] = true;
755 dev = &mdev->usb_device->dev;
756
757 if (unlikely(!iface || !conf)) {
758 dev_err(dev, "Bad interface or config pointer.\n");
759 return -EINVAL;
760 }
761 if (unlikely((channel < 0) || (channel >= iface->num_channels))) {
762 dev_err(dev, "Channel ID out of range.\n");
763 return -EINVAL;
764 }
765 if ((!conf->num_buffers) || (!conf->buffer_size)) {
766 dev_err(dev, "Misconfig: buffer size or #buffers zero.\n");
767 return -EINVAL;
768 }
769
770 if (!(conf->data_type == MOST_CH_SYNC) &&
771 !((conf->data_type == MOST_CH_ISOC_AVP) &&
772 (conf->packets_per_xact != 0xFF))) {
773 mdev->padding_active[channel] = false;
774 conf->extra_len = 0;
775 goto exit;
776 }
777
778 mdev->padding_active[channel] = true;
779 temp_size = conf->buffer_size;
780
781 if ((conf->data_type != MOST_CH_SYNC) &&
782 (conf->data_type != MOST_CH_ISOC_AVP)) {
783 dev_warn(dev, "Unsupported data type\n");
784 return -EINVAL;
785 }
786
787 frame_size = get_stream_frame_size(conf);
788 if ((frame_size == 0) || (frame_size > USB_MTU)) {
789 dev_warn(dev, "Misconfig: frame size wrong\n");
790 return -EINVAL;
791 }
792
793 if (conf->buffer_size % frame_size) {
794 u16 tmp_val;
795
796 tmp_val = conf->buffer_size / frame_size;
797 conf->buffer_size = tmp_val * frame_size;
798 dev_notice(dev,
799 "Channel %d - rouding buffer size to %d bytes, "
800 "channel config says %d bytes\n",
801 channel,
802 conf->buffer_size,
803 temp_size);
804 }
805
806 num_frames = conf->buffer_size / frame_size;
807 tail_space = num_frames * (USB_MTU - frame_size);
808 temp_size += tail_space;
809
810 /* calculate extra length to comply w/ HW padding */
811 conf->extra_len = (CEILING(temp_size, USB_MTU) * USB_MTU)
812 - conf->buffer_size;
813 exit:
814 mdev->conf[channel] = *conf;
815 return 0;
816 }
817
818 /**
819 * hdm_update_netinfo - retrieve latest networking information
820 * @mdev: device interface
821 *
822 * This triggers the USB vendor requests to read the hardware address and
823 * the current link status of the attached device.
824 */
825 static int hdm_update_netinfo(struct most_dev *mdev)
826 {
827 struct device *dev = &mdev->usb_device->dev;
828 int i;
829 u16 link;
830 u8 addr[6];
831
832 if (!is_valid_ether_addr(mdev->hw_addr)) {
833 if (0 > drci_rd_reg(mdev->usb_device,
834 DRCI_REG_HW_ADDR_HI, addr)) {
835 dev_err(dev, "Vendor request \"hw_addr_hi\" failed\n");
836 return -1;
837 }
838 if (0 > drci_rd_reg(mdev->usb_device,
839 DRCI_REG_HW_ADDR_MI, addr + 2)) {
840 dev_err(dev, "Vendor request \"hw_addr_mid\" failed\n");
841 return -1;
842 }
843 if (0 > drci_rd_reg(mdev->usb_device,
844 DRCI_REG_HW_ADDR_LO, addr + 4)) {
845 dev_err(dev, "Vendor request \"hw_addr_low\" failed\n");
846 return -1;
847 }
848 mutex_lock(&mdev->io_mutex);
849 for (i = 0; i < 6; i++)
850 mdev->hw_addr[i] = addr[i];
851 mutex_unlock(&mdev->io_mutex);
852
853 }
854 if (0 > drci_rd_reg(mdev->usb_device, DRCI_REG_NI_STATE, &link)) {
855 dev_err(dev, "Vendor request \"link status\" failed\n");
856 return -1;
857 }
858 le16_to_cpus(&link);
859 mutex_lock(&mdev->io_mutex);
860 mdev->link_stat = link;
861 mutex_unlock(&mdev->io_mutex);
862 return 0;
863 }
864
865 /**
866 * hdm_request_netinfo - request network information
867 * @iface: pointer to interface
868 * @channel: channel ID
869 *
870 * This is used as trigger to set up the link status timer that
871 * polls for the NI state of the INIC every 2 seconds.
872 *
873 */
874 static void hdm_request_netinfo(struct most_interface *iface, int channel)
875 {
876 struct most_dev *mdev;
877
878 BUG_ON(!iface);
879 mdev = to_mdev(iface);
880 mdev->link_stat_timer.expires = jiffies + HZ;
881 mod_timer(&mdev->link_stat_timer, mdev->link_stat_timer.expires);
882 }
883
884 /**
885 * link_stat_timer_handler - add work to link_stat work queue
886 * @data: pointer to USB device instance
887 *
888 * The handler runs in interrupt context. That's why we need to defer the
889 * tasks to a work queue.
890 */
891 static void link_stat_timer_handler(unsigned long data)
892 {
893 struct most_dev *mdev = (struct most_dev *)data;
894
895 queue_work(schedule_usb_work, &mdev->poll_work_obj);
896 mdev->link_stat_timer.expires = jiffies + (2 * HZ);
897 add_timer(&mdev->link_stat_timer);
898 }
899
900 /**
901 * wq_netinfo - work queue function
902 * @wq_obj: object that holds data for our deferred work to do
903 *
904 * This retrieves the network interface status of the USB INIC
905 * and compares it with the current status. If the status has
906 * changed, it updates the status of the core.
907 */
908 static void wq_netinfo(struct work_struct *wq_obj)
909 {
910 struct most_dev *mdev;
911 int i, prev_link_stat;
912 u8 prev_hw_addr[6];
913
914 mdev = to_mdev_from_work(wq_obj);
915 prev_link_stat = mdev->link_stat;
916
917 for (i = 0; i < 6; i++)
918 prev_hw_addr[i] = mdev->hw_addr[i];
919
920 if (0 > hdm_update_netinfo(mdev))
921 return;
922 if ((prev_link_stat != mdev->link_stat) ||
923 (prev_hw_addr[0] != mdev->hw_addr[0]) ||
924 (prev_hw_addr[1] != mdev->hw_addr[1]) ||
925 (prev_hw_addr[2] != mdev->hw_addr[2]) ||
926 (prev_hw_addr[3] != mdev->hw_addr[3]) ||
927 (prev_hw_addr[4] != mdev->hw_addr[4]) ||
928 (prev_hw_addr[5] != mdev->hw_addr[5]))
929 most_deliver_netinfo(&mdev->iface, mdev->link_stat,
930 &mdev->hw_addr[0]);
931 }
932
933 /**
934 * wq_clear_halt - work queue function
935 * @wq_obj: work_struct object to execute
936 *
937 * This sends a clear_halt to the given USB pipe.
938 */
939 static void wq_clear_halt(struct work_struct *wq_obj)
940 {
941 struct buf_anchor *anchor;
942 struct most_dev *mdev;
943 struct mbo *mbo;
944 struct urb *urb;
945 unsigned int channel;
946 unsigned long flags;
947
948 anchor = to_buf_anchor(wq_obj);
949 urb = anchor->urb;
950 mbo = urb->context;
951 mdev = to_mdev(mbo->ifp);
952 channel = mbo->hdm_channel_id;
953
954 if (usb_clear_halt(urb->dev, urb->pipe))
955 dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n");
956
957 usb_free_urb(urb);
958 spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
959 list_del(&anchor->list);
960 spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
961
962 if (likely(mbo->complete))
963 mbo->complete(mbo);
964 if (mdev->conf[channel].direction & MOST_CH_TX)
965 most_resume_enqueue(&mdev->iface, channel);
966
967 kfree(anchor);
968 }
969
970 /**
971 * hdm_usb_fops - file operation table for USB driver
972 */
973 static const struct file_operations hdm_usb_fops = {
974 .owner = THIS_MODULE,
975 };
976
977 /**
978 * usb_device_id - ID table for HCD device probing
979 */
980 static struct usb_device_id usbid[] = {
981 { USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_BRDG), },
982 { USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_INIC), },
983 { } /* Terminating entry */
984 };
985
986 #define MOST_DCI_RO_ATTR(_name) \
987 struct most_dci_attribute most_dci_attr_##_name = \
988 __ATTR(_name, S_IRUGO, show_value, NULL)
989
990 #define MOST_DCI_ATTR(_name) \
991 struct most_dci_attribute most_dci_attr_##_name = \
992 __ATTR(_name, S_IRUGO | S_IWUSR, show_value, store_value)
993
994 /**
995 * struct most_dci_attribute - to access the attributes of a dci object
996 * @attr: attributes of a dci object
997 * @show: pointer to the show function
998 * @store: pointer to the store function
999 */
1000 struct most_dci_attribute {
1001 struct attribute attr;
1002 ssize_t (*show)(struct most_dci_obj *d,
1003 struct most_dci_attribute *attr,
1004 char *buf);
1005 ssize_t (*store)(struct most_dci_obj *d,
1006 struct most_dci_attribute *attr,
1007 const char *buf,
1008 size_t count);
1009 };
1010 #define to_dci_attr(a) container_of(a, struct most_dci_attribute, attr)
1011
1012
1013 /**
1014 * dci_attr_show - show function for dci object
1015 * @kobj: pointer to kobject
1016 * @attr: pointer to attribute struct
1017 * @buf: buffer
1018 */
1019 static ssize_t dci_attr_show(struct kobject *kobj, struct attribute *attr,
1020 char *buf)
1021 {
1022 struct most_dci_attribute *dci_attr = to_dci_attr(attr);
1023 struct most_dci_obj *dci_obj = to_dci_obj(kobj);
1024
1025 if (!dci_attr->show)
1026 return -EIO;
1027
1028 return dci_attr->show(dci_obj, dci_attr, buf);
1029 }
1030
1031 /**
1032 * dci_attr_store - store function for dci object
1033 * @kobj: pointer to kobject
1034 * @attr: pointer to attribute struct
1035 * @buf: buffer
1036 * @len: length of buffer
1037 */
1038 static ssize_t dci_attr_store(struct kobject *kobj,
1039 struct attribute *attr,
1040 const char *buf,
1041 size_t len)
1042 {
1043 struct most_dci_attribute *dci_attr = to_dci_attr(attr);
1044 struct most_dci_obj *dci_obj = to_dci_obj(kobj);
1045
1046 if (!dci_attr->store)
1047 return -EIO;
1048
1049 return dci_attr->store(dci_obj, dci_attr, buf, len);
1050 }
1051
1052 static const struct sysfs_ops most_dci_sysfs_ops = {
1053 .show = dci_attr_show,
1054 .store = dci_attr_store,
1055 };
1056
1057 /**
1058 * most_dci_release - release function for dci object
1059 * @kobj: pointer to kobject
1060 *
1061 * This frees the memory allocated for the dci object
1062 */
1063 static void most_dci_release(struct kobject *kobj)
1064 {
1065 struct most_dci_obj *dci_obj = to_dci_obj(kobj);
1066
1067 kfree(dci_obj);
1068 }
1069
1070 static ssize_t show_value(struct most_dci_obj *dci_obj,
1071 struct most_dci_attribute *attr, char *buf)
1072 {
1073 u16 tmp_val;
1074 u16 reg_addr;
1075 int err;
1076
1077 if (!strcmp(attr->attr.name, "ni_state"))
1078 reg_addr = DRCI_REG_NI_STATE;
1079 else if (!strcmp(attr->attr.name, "packet_bandwidth"))
1080 reg_addr = DRCI_REG_PACKET_BW;
1081 else if (!strcmp(attr->attr.name, "node_address"))
1082 reg_addr = DRCI_REG_NODE_ADDR;
1083 else if (!strcmp(attr->attr.name, "node_position"))
1084 reg_addr = DRCI_REG_NODE_POS;
1085 else if (!strcmp(attr->attr.name, "mep_filter"))
1086 reg_addr = DRCI_REG_MEP_FILTER;
1087 else if (!strcmp(attr->attr.name, "mep_hash0"))
1088 reg_addr = DRCI_REG_HASH_TBL0;
1089 else if (!strcmp(attr->attr.name, "mep_hash1"))
1090 reg_addr = DRCI_REG_HASH_TBL1;
1091 else if (!strcmp(attr->attr.name, "mep_hash2"))
1092 reg_addr = DRCI_REG_HASH_TBL2;
1093 else if (!strcmp(attr->attr.name, "mep_hash3"))
1094 reg_addr = DRCI_REG_HASH_TBL3;
1095 else if (!strcmp(attr->attr.name, "mep_eui48_hi"))
1096 reg_addr = DRCI_REG_HW_ADDR_HI;
1097 else if (!strcmp(attr->attr.name, "mep_eui48_mi"))
1098 reg_addr = DRCI_REG_HW_ADDR_MI;
1099 else if (!strcmp(attr->attr.name, "mep_eui48_lo"))
1100 reg_addr = DRCI_REG_HW_ADDR_LO;
1101 else
1102 return -EIO;
1103
1104 err = drci_rd_reg(dci_obj->usb_device, reg_addr, &tmp_val);
1105 if (err < 0)
1106 return err;
1107
1108 return snprintf(buf, PAGE_SIZE, "%04x\n", le16_to_cpu(tmp_val));
1109 }
1110
1111 static ssize_t store_value(struct most_dci_obj *dci_obj,
1112 struct most_dci_attribute *attr,
1113 const char *buf, size_t count)
1114 {
1115 u16 v16;
1116 u16 reg_addr;
1117 int err;
1118
1119 if (!strcmp(attr->attr.name, "mep_filter"))
1120 reg_addr = DRCI_REG_MEP_FILTER;
1121 else if (!strcmp(attr->attr.name, "mep_hash0"))
1122 reg_addr = DRCI_REG_HASH_TBL0;
1123 else if (!strcmp(attr->attr.name, "mep_hash1"))
1124 reg_addr = DRCI_REG_HASH_TBL1;
1125 else if (!strcmp(attr->attr.name, "mep_hash2"))
1126 reg_addr = DRCI_REG_HASH_TBL2;
1127 else if (!strcmp(attr->attr.name, "mep_hash3"))
1128 reg_addr = DRCI_REG_HASH_TBL3;
1129 else if (!strcmp(attr->attr.name, "mep_eui48_hi"))
1130 reg_addr = DRCI_REG_HW_ADDR_HI;
1131 else if (!strcmp(attr->attr.name, "mep_eui48_mi"))
1132 reg_addr = DRCI_REG_HW_ADDR_MI;
1133 else if (!strcmp(attr->attr.name, "mep_eui48_lo"))
1134 reg_addr = DRCI_REG_HW_ADDR_LO;
1135 else
1136 return -EIO;
1137
1138 err = kstrtou16(buf, 16, &v16);
1139 if (err)
1140 return err;
1141
1142 err = drci_wr_reg(dci_obj->usb_device, reg_addr, cpu_to_le16(v16));
1143 if (err < 0)
1144 return err;
1145
1146 return count;
1147 }
1148
1149 static MOST_DCI_RO_ATTR(ni_state);
1150 static MOST_DCI_RO_ATTR(packet_bandwidth);
1151 static MOST_DCI_RO_ATTR(node_address);
1152 static MOST_DCI_RO_ATTR(node_position);
1153 static MOST_DCI_ATTR(mep_filter);
1154 static MOST_DCI_ATTR(mep_hash0);
1155 static MOST_DCI_ATTR(mep_hash1);
1156 static MOST_DCI_ATTR(mep_hash2);
1157 static MOST_DCI_ATTR(mep_hash3);
1158 static MOST_DCI_ATTR(mep_eui48_hi);
1159 static MOST_DCI_ATTR(mep_eui48_mi);
1160 static MOST_DCI_ATTR(mep_eui48_lo);
1161
1162 /**
1163 * most_dci_def_attrs - array of default attribute files of the dci object
1164 */
1165 static struct attribute *most_dci_def_attrs[] = {
1166 &most_dci_attr_ni_state.attr,
1167 &most_dci_attr_packet_bandwidth.attr,
1168 &most_dci_attr_node_address.attr,
1169 &most_dci_attr_node_position.attr,
1170 &most_dci_attr_mep_filter.attr,
1171 &most_dci_attr_mep_hash0.attr,
1172 &most_dci_attr_mep_hash1.attr,
1173 &most_dci_attr_mep_hash2.attr,
1174 &most_dci_attr_mep_hash3.attr,
1175 &most_dci_attr_mep_eui48_hi.attr,
1176 &most_dci_attr_mep_eui48_mi.attr,
1177 &most_dci_attr_mep_eui48_lo.attr,
1178 NULL,
1179 };
1180
1181 /**
1182 * DCI ktype
1183 */
1184 static struct kobj_type most_dci_ktype = {
1185 .sysfs_ops = &most_dci_sysfs_ops,
1186 .release = most_dci_release,
1187 .default_attrs = most_dci_def_attrs,
1188 };
1189
1190 /**
1191 * create_most_dci_obj - allocates a dci object
1192 * @parent: parent kobject
1193 *
1194 * This creates a dci object and registers it with sysfs.
1195 * Returns a pointer to the object or NULL when something went wrong.
1196 */
1197 static struct
1198 most_dci_obj *create_most_dci_obj(struct kobject *parent)
1199 {
1200 struct most_dci_obj *most_dci;
1201 int retval;
1202
1203 most_dci = kzalloc(sizeof(*most_dci), GFP_KERNEL);
1204 if (!most_dci)
1205 return NULL;
1206
1207 retval = kobject_init_and_add(&most_dci->kobj, &most_dci_ktype, parent,
1208 "dci");
1209 if (retval) {
1210 kobject_put(&most_dci->kobj);
1211 return NULL;
1212 }
1213 return most_dci;
1214 }
1215
1216 /**
1217 * destroy_most_dci_obj - DCI object release function
1218 * @p: pointer to dci object
1219 */
1220 static void destroy_most_dci_obj(struct most_dci_obj *p)
1221 {
1222 kobject_put(&p->kobj);
1223 }
1224
1225 /**
1226 * hdm_probe - probe function of USB device driver
1227 * @interface: Interface of the attached USB device
1228 * @id: Pointer to the USB ID table.
1229 *
1230 * This allocates and initializes the device instance, adds the new
1231 * entry to the internal list, scans the USB descriptors and registers
1232 * the interface with the core.
1233 * Additionally, the DCI objects are created and the hardware is sync'd.
1234 *
1235 * Return 0 on success. In case of an error a negative number is returned.
1236 */
1237 static int
1238 hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
1239 {
1240 unsigned int i;
1241 unsigned int num_endpoints;
1242 struct most_channel_capability *tmp_cap;
1243 struct most_dev *mdev;
1244 struct usb_device *usb_dev;
1245 struct device *dev;
1246 struct usb_host_interface *usb_iface_desc;
1247 struct usb_endpoint_descriptor *ep_desc;
1248 int ret = 0;
1249
1250 usb_iface_desc = interface->cur_altsetting;
1251 usb_dev = interface_to_usbdev(interface);
1252 dev = &usb_dev->dev;
1253 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
1254 if (!mdev)
1255 goto exit_ENOMEM;
1256
1257 usb_set_intfdata(interface, mdev);
1258 num_endpoints = usb_iface_desc->desc.bNumEndpoints;
1259 mutex_init(&mdev->io_mutex);
1260 INIT_WORK(&mdev->poll_work_obj, wq_netinfo);
1261 init_timer(&mdev->link_stat_timer);
1262
1263 mdev->usb_device = usb_dev;
1264 mdev->link_stat_timer.function = link_stat_timer_handler;
1265 mdev->link_stat_timer.data = (unsigned long)mdev;
1266 mdev->link_stat_timer.expires = jiffies + (2 * HZ);
1267
1268 mdev->iface.mod = hdm_usb_fops.owner;
1269 mdev->iface.interface = ITYPE_USB;
1270 mdev->iface.configure = hdm_configure_channel;
1271 mdev->iface.request_netinfo = hdm_request_netinfo;
1272 mdev->iface.enqueue = hdm_enqueue;
1273 mdev->iface.poison_channel = hdm_poison_channel;
1274 mdev->iface.description = mdev->description;
1275 mdev->iface.num_channels = num_endpoints;
1276
1277 snprintf(mdev->description, sizeof(mdev->description),
1278 "usb_device %d-%s:%d.%d",
1279 usb_dev->bus->busnum,
1280 usb_dev->devpath,
1281 usb_dev->config->desc.bConfigurationValue,
1282 usb_iface_desc->desc.bInterfaceNumber);
1283
1284 mdev->conf = kcalloc(num_endpoints, sizeof(*mdev->conf), GFP_KERNEL);
1285 if (!mdev->conf)
1286 goto exit_free;
1287
1288 mdev->cap = kcalloc(num_endpoints, sizeof(*mdev->cap), GFP_KERNEL);
1289 if (!mdev->cap)
1290 goto exit_free1;
1291
1292 mdev->iface.channel_vector = mdev->cap;
1293 mdev->iface.priv = NULL;
1294
1295 mdev->ep_address =
1296 kcalloc(num_endpoints, sizeof(*mdev->ep_address), GFP_KERNEL);
1297 if (!mdev->ep_address)
1298 goto exit_free2;
1299
1300 mdev->anchor_list =
1301 kcalloc(num_endpoints, sizeof(*mdev->anchor_list), GFP_KERNEL);
1302 if (!mdev->anchor_list)
1303 goto exit_free3;
1304
1305 tmp_cap = mdev->cap;
1306 for (i = 0; i < num_endpoints; i++) {
1307 ep_desc = &usb_iface_desc->endpoint[i].desc;
1308 mdev->ep_address[i] = ep_desc->bEndpointAddress;
1309 mdev->padding_active[i] = false;
1310 mdev->is_channel_healthy[i] = true;
1311
1312 snprintf(&mdev->suffix[i][0], MAX_SUFFIX_LEN, "ep%02x",
1313 mdev->ep_address[i]);
1314
1315 tmp_cap->name_suffix = &mdev->suffix[i][0];
1316 tmp_cap->buffer_size_packet = MAX_BUF_SIZE;
1317 tmp_cap->buffer_size_streaming = MAX_BUF_SIZE;
1318 tmp_cap->num_buffers_packet = BUF_CHAIN_SIZE;
1319 tmp_cap->num_buffers_streaming = BUF_CHAIN_SIZE;
1320 tmp_cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
1321 MOST_CH_ISOC_AVP | MOST_CH_SYNC;
1322 if (ep_desc->bEndpointAddress & USB_DIR_IN)
1323 tmp_cap->direction = MOST_CH_RX;
1324 else
1325 tmp_cap->direction = MOST_CH_TX;
1326 tmp_cap++;
1327 INIT_LIST_HEAD(&mdev->anchor_list[i]);
1328 spin_lock_init(&mdev->anchor_list_lock[i]);
1329 }
1330 dev_notice(dev, "claimed gadget: Vendor=%4.4x ProdID=%4.4x Bus=%02x Device=%02x\n",
1331 le16_to_cpu(usb_dev->descriptor.idVendor),
1332 le16_to_cpu(usb_dev->descriptor.idProduct),
1333 usb_dev->bus->busnum,
1334 usb_dev->devnum);
1335
1336 dev_notice(dev, "device path: /sys/bus/usb/devices/%d-%s:%d.%d\n",
1337 usb_dev->bus->busnum,
1338 usb_dev->devpath,
1339 usb_dev->config->desc.bConfigurationValue,
1340 usb_iface_desc->desc.bInterfaceNumber);
1341
1342 mdev->parent = most_register_interface(&mdev->iface);
1343 if (IS_ERR(mdev->parent)) {
1344 ret = PTR_ERR(mdev->parent);
1345 goto exit_free4;
1346 }
1347
1348 mutex_lock(&mdev->io_mutex);
1349 if (le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_INIC) {
1350 /* this increments the reference count of the instance
1351 * object of the core
1352 */
1353 mdev->dci = create_most_dci_obj(mdev->parent);
1354 if (!mdev->dci) {
1355 mutex_unlock(&mdev->io_mutex);
1356 most_deregister_interface(&mdev->iface);
1357 ret = -ENOMEM;
1358 goto exit_free4;
1359 }
1360
1361 kobject_uevent(&mdev->dci->kobj, KOBJ_ADD);
1362 mdev->dci->usb_device = mdev->usb_device;
1363 trigger_resync_vr(usb_dev);
1364 }
1365 mutex_unlock(&mdev->io_mutex);
1366 return 0;
1367
1368 exit_free4:
1369 kfree(mdev->anchor_list);
1370 exit_free3:
1371 kfree(mdev->ep_address);
1372 exit_free2:
1373 kfree(mdev->cap);
1374 exit_free1:
1375 kfree(mdev->conf);
1376 exit_free:
1377 kfree(mdev);
1378 exit_ENOMEM:
1379 if (ret == 0 || ret == -ENOMEM) {
1380 ret = -ENOMEM;
1381 dev_err(dev, "out of memory\n");
1382 }
1383 return ret;
1384 }
1385
1386 /**
1387 * hdm_disconnect - disconnect function of USB device driver
1388 * @interface: Interface of the attached USB device
1389 *
1390 * This deregisters the interface with the core, removes the kernel timer
1391 * and frees resources.
1392 *
1393 * Context: hub kernel thread
1394 */
1395 static void hdm_disconnect(struct usb_interface *interface)
1396 {
1397 struct most_dev *mdev;
1398
1399 mdev = usb_get_intfdata(interface);
1400 mutex_lock(&mdev->io_mutex);
1401 usb_set_intfdata(interface, NULL);
1402 mdev->usb_device = NULL;
1403 mutex_unlock(&mdev->io_mutex);
1404
1405 del_timer_sync(&mdev->link_stat_timer);
1406 cancel_work_sync(&mdev->poll_work_obj);
1407
1408 destroy_most_dci_obj(mdev->dci);
1409 most_deregister_interface(&mdev->iface);
1410
1411 kfree(mdev->anchor_list);
1412 kfree(mdev->cap);
1413 kfree(mdev->conf);
1414 kfree(mdev->ep_address);
1415 kfree(mdev);
1416 }
1417
1418 static struct usb_driver hdm_usb = {
1419 .name = "hdm_usb",
1420 .id_table = usbid,
1421 .probe = hdm_probe,
1422 .disconnect = hdm_disconnect,
1423 };
1424
1425 static int __init hdm_usb_init(void)
1426 {
1427 pr_info("hdm_usb_init()\n");
1428 if (usb_register(&hdm_usb)) {
1429 pr_err("could not register hdm_usb driver\n");
1430 return -EIO;
1431 }
1432 schedule_usb_work = create_workqueue("hdmu_work");
1433 if (schedule_usb_work == NULL) {
1434 pr_err("could not create workqueue\n");
1435 usb_deregister(&hdm_usb);
1436 return -ENOMEM;
1437 }
1438 return 0;
1439 }
1440
1441 static void __exit hdm_usb_exit(void)
1442 {
1443 pr_info("hdm_usb_exit()\n");
1444 destroy_workqueue(schedule_usb_work);
1445 usb_deregister(&hdm_usb);
1446 }
1447
1448 module_init(hdm_usb_init);
1449 module_exit(hdm_usb_exit);
1450 MODULE_LICENSE("GPL");
1451 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
1452 MODULE_DESCRIPTION("HDM_4_USB");
This page took 0.065107 seconds and 6 git commands to generate.