usb: musb: fix Kconfig
[deliverable/linux.git] / drivers / usb / gadget / net2272.c
1 /*
2 * Driver for PLX NET2272 USB device controller
3 *
4 * Copyright (C) 2005-2006 PLX Technology, Inc.
5 * Copyright (C) 2006-2011 Analog Devices, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/errno.h>
25 #include <linux/gpio.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/io.h>
29 #include <linux/ioport.h>
30 #include <linux/irq.h>
31 #include <linux/kernel.h>
32 #include <linux/list.h>
33 #include <linux/module.h>
34 #include <linux/moduleparam.h>
35 #include <linux/pci.h>
36 #include <linux/platform_device.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/timer.h>
40 #include <linux/usb.h>
41 #include <linux/usb/ch9.h>
42 #include <linux/usb/gadget.h>
43
44 #include <asm/byteorder.h>
45 #include <asm/system.h>
46 #include <asm/unaligned.h>
47
48 #include "net2272.h"
49
50 #define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
51
52 static const char driver_name[] = "net2272";
53 static const char driver_vers[] = "2006 October 17/mainline";
54 static const char driver_desc[] = DRIVER_DESC;
55
56 static const char ep0name[] = "ep0";
57 static const char * const ep_name[] = {
58 ep0name,
59 "ep-a", "ep-b", "ep-c",
60 };
61
62 #define DMA_ADDR_INVALID (~(dma_addr_t)0)
63 #ifdef CONFIG_USB_GADGET_NET2272_DMA
64 /*
65 * use_dma: the NET2272 can use an external DMA controller.
66 * Note that since there is no generic DMA api, some functions,
67 * notably request_dma, start_dma, and cancel_dma will need to be
68 * modified for your platform's particular dma controller.
69 *
70 * If use_dma is disabled, pio will be used instead.
71 */
72 static int use_dma = 0;
73 module_param(use_dma, bool, 0644);
74
75 /*
76 * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
77 * The NET2272 can only use dma for a single endpoint at a time.
78 * At some point this could be modified to allow either endpoint
79 * to take control of dma as it becomes available.
80 *
81 * Note that DMA should not be used on OUT endpoints unless it can
82 * be guaranteed that no short packets will arrive on an IN endpoint
83 * while the DMA operation is pending. Otherwise the OUT DMA will
84 * terminate prematurely (See NET2272 Errata 630-0213-0101)
85 */
86 static ushort dma_ep = 1;
87 module_param(dma_ep, ushort, 0644);
88
89 /*
90 * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton):
91 * mode 0 == Slow DREQ mode
92 * mode 1 == Fast DREQ mode
93 * mode 2 == Burst mode
94 */
95 static ushort dma_mode = 2;
96 module_param(dma_mode, ushort, 0644);
97 #else
98 #define use_dma 0
99 #define dma_ep 1
100 #define dma_mode 2
101 #endif
102
103 /*
104 * fifo_mode: net2272 buffer configuration:
105 * mode 0 == ep-{a,b,c} 512db each
106 * mode 1 == ep-a 1k, ep-{b,c} 512db
107 * mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
108 * mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
109 */
110 static ushort fifo_mode = 0;
111 module_param(fifo_mode, ushort, 0644);
112
113 /*
114 * enable_suspend: When enabled, the driver will respond to
115 * USB suspend requests by powering down the NET2272. Otherwise,
116 * USB suspend requests will be ignored. This is acceptible for
117 * self-powered devices. For bus powered devices set this to 1.
118 */
119 static ushort enable_suspend = 0;
120 module_param(enable_suspend, ushort, 0644);
121
122 static void assert_out_naking(struct net2272_ep *ep, const char *where)
123 {
124 u8 tmp;
125
126 #ifndef DEBUG
127 return;
128 #endif
129
130 tmp = net2272_ep_read(ep, EP_STAT0);
131 if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
132 dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
133 ep->ep.name, where, tmp);
134 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
135 }
136 }
137 #define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
138
139 static void stop_out_naking(struct net2272_ep *ep)
140 {
141 u8 tmp = net2272_ep_read(ep, EP_STAT0);
142
143 if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
144 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
145 }
146
147 #define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
148
149 static char *type_string(u8 bmAttributes)
150 {
151 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
152 case USB_ENDPOINT_XFER_BULK: return "bulk";
153 case USB_ENDPOINT_XFER_ISOC: return "iso";
154 case USB_ENDPOINT_XFER_INT: return "intr";
155 default: return "control";
156 }
157 }
158
159 static char *buf_state_string(unsigned state)
160 {
161 switch (state) {
162 case BUFF_FREE: return "free";
163 case BUFF_VALID: return "valid";
164 case BUFF_LCL: return "local";
165 case BUFF_USB: return "usb";
166 default: return "unknown";
167 }
168 }
169
170 static char *dma_mode_string(void)
171 {
172 if (!use_dma)
173 return "PIO";
174 switch (dma_mode) {
175 case 0: return "SLOW DREQ";
176 case 1: return "FAST DREQ";
177 case 2: return "BURST";
178 default: return "invalid";
179 }
180 }
181
182 static void net2272_dequeue_all(struct net2272_ep *);
183 static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
184 static int net2272_fifo_status(struct usb_ep *);
185
186 static struct usb_ep_ops net2272_ep_ops;
187
188 /*---------------------------------------------------------------------------*/
189
190 static int
191 net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
192 {
193 struct net2272 *dev;
194 struct net2272_ep *ep;
195 u32 max;
196 u8 tmp;
197 unsigned long flags;
198
199 ep = container_of(_ep, struct net2272_ep, ep);
200 if (!_ep || !desc || ep->desc || _ep->name == ep0name
201 || desc->bDescriptorType != USB_DT_ENDPOINT)
202 return -EINVAL;
203 dev = ep->dev;
204 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
205 return -ESHUTDOWN;
206
207 max = le16_to_cpu(desc->wMaxPacketSize) & 0x1fff;
208
209 spin_lock_irqsave(&dev->lock, flags);
210 _ep->maxpacket = max & 0x7fff;
211 ep->desc = desc;
212
213 /* net2272_ep_reset() has already been called */
214 ep->stopped = 0;
215 ep->wedged = 0;
216
217 /* set speed-dependent max packet */
218 net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
219 net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
220
221 /* set type, direction, address; reset fifo counters */
222 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
223 tmp = usb_endpoint_type(desc);
224 if (usb_endpoint_xfer_bulk(desc)) {
225 /* catch some particularly blatant driver bugs */
226 if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
227 (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
228 spin_unlock_irqrestore(&dev->lock, flags);
229 return -ERANGE;
230 }
231 }
232 ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
233 tmp <<= ENDPOINT_TYPE;
234 tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
235 tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
236 tmp |= (1 << ENDPOINT_ENABLE);
237
238 /* for OUT transfers, block the rx fifo until a read is posted */
239 ep->is_in = usb_endpoint_dir_in(desc);
240 if (!ep->is_in)
241 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
242
243 net2272_ep_write(ep, EP_CFG, tmp);
244
245 /* enable irqs */
246 tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
247 net2272_write(dev, IRQENB0, tmp);
248
249 tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
250 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
251 | net2272_ep_read(ep, EP_IRQENB);
252 net2272_ep_write(ep, EP_IRQENB, tmp);
253
254 tmp = desc->bEndpointAddress;
255 dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
256 _ep->name, tmp & 0x0f, PIPEDIR(tmp),
257 type_string(desc->bmAttributes), max,
258 net2272_ep_read(ep, EP_CFG));
259
260 spin_unlock_irqrestore(&dev->lock, flags);
261 return 0;
262 }
263
264 static void net2272_ep_reset(struct net2272_ep *ep)
265 {
266 u8 tmp;
267
268 ep->desc = NULL;
269 INIT_LIST_HEAD(&ep->queue);
270
271 ep->ep.maxpacket = ~0;
272 ep->ep.ops = &net2272_ep_ops;
273
274 /* disable irqs, endpoint */
275 net2272_ep_write(ep, EP_IRQENB, 0);
276
277 /* init to our chosen defaults, notably so that we NAK OUT
278 * packets until the driver queues a read.
279 */
280 tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
281 net2272_ep_write(ep, EP_RSPSET, tmp);
282
283 tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
284 if (ep->num != 0)
285 tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
286
287 net2272_ep_write(ep, EP_RSPCLR, tmp);
288
289 /* scrub most status bits, and flush any fifo state */
290 net2272_ep_write(ep, EP_STAT0,
291 (1 << DATA_IN_TOKEN_INTERRUPT)
292 | (1 << DATA_OUT_TOKEN_INTERRUPT)
293 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
294 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
295 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
296
297 net2272_ep_write(ep, EP_STAT1,
298 (1 << TIMEOUT)
299 | (1 << USB_OUT_ACK_SENT)
300 | (1 << USB_OUT_NAK_SENT)
301 | (1 << USB_IN_ACK_RCVD)
302 | (1 << USB_IN_NAK_SENT)
303 | (1 << USB_STALL_SENT)
304 | (1 << LOCAL_OUT_ZLP)
305 | (1 << BUFFER_FLUSH));
306
307 /* fifo size is handled seperately */
308 }
309
310 static int net2272_disable(struct usb_ep *_ep)
311 {
312 struct net2272_ep *ep;
313 unsigned long flags;
314
315 ep = container_of(_ep, struct net2272_ep, ep);
316 if (!_ep || !ep->desc || _ep->name == ep0name)
317 return -EINVAL;
318
319 spin_lock_irqsave(&ep->dev->lock, flags);
320 net2272_dequeue_all(ep);
321 net2272_ep_reset(ep);
322
323 dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
324
325 spin_unlock_irqrestore(&ep->dev->lock, flags);
326 return 0;
327 }
328
329 /*---------------------------------------------------------------------------*/
330
331 static struct usb_request *
332 net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
333 {
334 struct net2272_ep *ep;
335 struct net2272_request *req;
336
337 if (!_ep)
338 return NULL;
339 ep = container_of(_ep, struct net2272_ep, ep);
340
341 req = kzalloc(sizeof(*req), gfp_flags);
342 if (!req)
343 return NULL;
344
345 req->req.dma = DMA_ADDR_INVALID;
346 INIT_LIST_HEAD(&req->queue);
347
348 return &req->req;
349 }
350
351 static void
352 net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
353 {
354 struct net2272_ep *ep;
355 struct net2272_request *req;
356
357 ep = container_of(_ep, struct net2272_ep, ep);
358 if (!_ep || !_req)
359 return;
360
361 req = container_of(_req, struct net2272_request, req);
362 WARN_ON(!list_empty(&req->queue));
363 kfree(req);
364 }
365
366 static void
367 net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
368 {
369 struct net2272 *dev;
370 unsigned stopped = ep->stopped;
371
372 if (ep->num == 0) {
373 if (ep->dev->protocol_stall) {
374 ep->stopped = 1;
375 set_halt(ep);
376 }
377 allow_status(ep);
378 }
379
380 list_del_init(&req->queue);
381
382 if (req->req.status == -EINPROGRESS)
383 req->req.status = status;
384 else
385 status = req->req.status;
386
387 dev = ep->dev;
388 if (use_dma && req->mapped) {
389 dma_unmap_single(dev->dev, req->req.dma, req->req.length,
390 ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
391 req->req.dma = DMA_ADDR_INVALID;
392 req->mapped = 0;
393 }
394
395 if (status && status != -ESHUTDOWN)
396 dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
397 ep->ep.name, &req->req, status,
398 req->req.actual, req->req.length, req->req.buf);
399
400 /* don't modify queue heads during completion callback */
401 ep->stopped = 1;
402 spin_unlock(&dev->lock);
403 req->req.complete(&ep->ep, &req->req);
404 spin_lock(&dev->lock);
405 ep->stopped = stopped;
406 }
407
408 static int
409 net2272_write_packet(struct net2272_ep *ep, u8 *buf,
410 struct net2272_request *req, unsigned max)
411 {
412 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
413 u16 *bufp;
414 unsigned length, count;
415 u8 tmp;
416
417 length = min(req->req.length - req->req.actual, max);
418 req->req.actual += length;
419
420 dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
421 ep->ep.name, req, max, length,
422 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
423
424 count = length;
425 bufp = (u16 *)buf;
426
427 while (likely(count >= 2)) {
428 /* no byte-swap required; chip endian set during init */
429 writew(*bufp++, ep_data);
430 count -= 2;
431 }
432 buf = (u8 *)bufp;
433
434 /* write final byte by placing the NET2272 into 8-bit mode */
435 if (unlikely(count)) {
436 tmp = net2272_read(ep->dev, LOCCTL);
437 net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
438 writeb(*buf, ep_data);
439 net2272_write(ep->dev, LOCCTL, tmp);
440 }
441 return length;
442 }
443
444 /* returns: 0: still running, 1: completed, negative: errno */
445 static int
446 net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
447 {
448 u8 *buf;
449 unsigned count, max;
450 int status;
451
452 dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
453 ep->ep.name, req->req.actual, req->req.length);
454
455 /*
456 * Keep loading the endpoint until the final packet is loaded,
457 * or the endpoint buffer is full.
458 */
459 top:
460 /*
461 * Clear interrupt status
462 * - Packet Transmitted interrupt will become set again when the
463 * host successfully takes another packet
464 */
465 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
466 while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
467 buf = req->req.buf + req->req.actual;
468 prefetch(buf);
469
470 /* force pagesel */
471 net2272_ep_read(ep, EP_STAT0);
472
473 max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
474 (net2272_ep_read(ep, EP_AVAIL0));
475
476 if (max < ep->ep.maxpacket)
477 max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
478 | (net2272_ep_read(ep, EP_AVAIL0));
479
480 count = net2272_write_packet(ep, buf, req, max);
481 /* see if we are done */
482 if (req->req.length == req->req.actual) {
483 /* validate short or zlp packet */
484 if (count < ep->ep.maxpacket)
485 set_fifo_bytecount(ep, 0);
486 net2272_done(ep, req, 0);
487
488 if (!list_empty(&ep->queue)) {
489 req = list_entry(ep->queue.next,
490 struct net2272_request,
491 queue);
492 status = net2272_kick_dma(ep, req);
493
494 if (status < 0)
495 if ((net2272_ep_read(ep, EP_STAT0)
496 & (1 << BUFFER_EMPTY)))
497 goto top;
498 }
499 return 1;
500 }
501 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
502 }
503 return 0;
504 }
505
506 static void
507 net2272_out_flush(struct net2272_ep *ep)
508 {
509 ASSERT_OUT_NAKING(ep);
510
511 net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
512 | (1 << DATA_PACKET_RECEIVED_INTERRUPT));
513 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
514 }
515
516 static int
517 net2272_read_packet(struct net2272_ep *ep, u8 *buf,
518 struct net2272_request *req, unsigned avail)
519 {
520 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
521 unsigned is_short;
522 u16 *bufp;
523
524 req->req.actual += avail;
525
526 dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
527 ep->ep.name, req, avail,
528 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
529
530 is_short = (avail < ep->ep.maxpacket);
531
532 if (unlikely(avail == 0)) {
533 /* remove any zlp from the buffer */
534 (void)readw(ep_data);
535 return is_short;
536 }
537
538 /* Ensure we get the final byte */
539 if (unlikely(avail % 2))
540 avail++;
541 bufp = (u16 *)buf;
542
543 do {
544 *bufp++ = readw(ep_data);
545 avail -= 2;
546 } while (avail);
547
548 /*
549 * To avoid false endpoint available race condition must read
550 * ep stat0 twice in the case of a short transfer
551 */
552 if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
553 net2272_ep_read(ep, EP_STAT0);
554
555 return is_short;
556 }
557
558 static int
559 net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
560 {
561 u8 *buf;
562 unsigned is_short;
563 int count;
564 int tmp;
565 int cleanup = 0;
566 int status = -1;
567
568 dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
569 ep->ep.name, req->req.actual, req->req.length);
570
571 top:
572 do {
573 buf = req->req.buf + req->req.actual;
574 prefetchw(buf);
575
576 count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
577 | net2272_ep_read(ep, EP_AVAIL0);
578
579 net2272_ep_write(ep, EP_STAT0,
580 (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
581 (1 << DATA_PACKET_RECEIVED_INTERRUPT));
582
583 tmp = req->req.length - req->req.actual;
584
585 if (count > tmp) {
586 if ((tmp % ep->ep.maxpacket) != 0) {
587 dev_err(ep->dev->dev,
588 "%s out fifo %d bytes, expected %d\n",
589 ep->ep.name, count, tmp);
590 cleanup = 1;
591 }
592 count = (tmp > 0) ? tmp : 0;
593 }
594
595 is_short = net2272_read_packet(ep, buf, req, count);
596
597 /* completion */
598 if (unlikely(cleanup || is_short ||
599 ((req->req.actual == req->req.length)
600 && !req->req.zero))) {
601
602 if (cleanup) {
603 net2272_out_flush(ep);
604 net2272_done(ep, req, -EOVERFLOW);
605 } else
606 net2272_done(ep, req, 0);
607
608 /* re-initialize endpoint transfer registers
609 * otherwise they may result in erroneous pre-validation
610 * for subsequent control reads
611 */
612 if (unlikely(ep->num == 0)) {
613 net2272_ep_write(ep, EP_TRANSFER2, 0);
614 net2272_ep_write(ep, EP_TRANSFER1, 0);
615 net2272_ep_write(ep, EP_TRANSFER0, 0);
616 }
617
618 if (!list_empty(&ep->queue)) {
619 req = list_entry(ep->queue.next,
620 struct net2272_request, queue);
621 status = net2272_kick_dma(ep, req);
622 if ((status < 0) &&
623 !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
624 goto top;
625 }
626 return 1;
627 }
628 } while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
629
630 return 0;
631 }
632
633 static void
634 net2272_pio_advance(struct net2272_ep *ep)
635 {
636 struct net2272_request *req;
637
638 if (unlikely(list_empty(&ep->queue)))
639 return;
640
641 req = list_entry(ep->queue.next, struct net2272_request, queue);
642 (ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
643 }
644
645 /* returns 0 on success, else negative errno */
646 static int
647 net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
648 unsigned len, unsigned dir)
649 {
650 dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
651 ep, buf, len, dir);
652
653 /* The NET2272 only supports a single dma channel */
654 if (dev->dma_busy)
655 return -EBUSY;
656 /*
657 * EP_TRANSFER (used to determine the number of bytes received
658 * in an OUT transfer) is 24 bits wide; don't ask for more than that.
659 */
660 if ((dir == 1) && (len > 0x1000000))
661 return -EINVAL;
662
663 dev->dma_busy = 1;
664
665 /* initialize platform's dma */
666 #ifdef CONFIG_PCI
667 /* NET2272 addr, buffer addr, length, etc. */
668 switch (dev->dev_id) {
669 case PCI_DEVICE_ID_RDK1:
670 /* Setup PLX 9054 DMA mode */
671 writel((1 << LOCAL_BUS_WIDTH) |
672 (1 << TA_READY_INPUT_ENABLE) |
673 (0 << LOCAL_BURST_ENABLE) |
674 (1 << DONE_INTERRUPT_ENABLE) |
675 (1 << LOCAL_ADDRESSING_MODE) |
676 (1 << DEMAND_MODE) |
677 (1 << DMA_EOT_ENABLE) |
678 (1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
679 (1 << DMA_CHANNEL_INTERRUPT_SELECT),
680 dev->rdk1.plx9054_base_addr + DMAMODE0);
681
682 writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
683 writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
684 writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
685 writel((dir << DIRECTION_OF_TRANSFER) |
686 (1 << INTERRUPT_AFTER_TERMINAL_COUNT),
687 dev->rdk1.plx9054_base_addr + DMADPR0);
688 writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
689 readl(dev->rdk1.plx9054_base_addr + INTCSR),
690 dev->rdk1.plx9054_base_addr + INTCSR);
691
692 break;
693 }
694 #endif
695
696 net2272_write(dev, DMAREQ,
697 (0 << DMA_BUFFER_VALID) |
698 (1 << DMA_REQUEST_ENABLE) |
699 (1 << DMA_CONTROL_DACK) |
700 (dev->dma_eot_polarity << EOT_POLARITY) |
701 (dev->dma_dack_polarity << DACK_POLARITY) |
702 (dev->dma_dreq_polarity << DREQ_POLARITY) |
703 ((ep >> 1) << DMA_ENDPOINT_SELECT));
704
705 (void) net2272_read(dev, SCRATCH);
706
707 return 0;
708 }
709
710 static void
711 net2272_start_dma(struct net2272 *dev)
712 {
713 /* start platform's dma controller */
714 #ifdef CONFIG_PCI
715 switch (dev->dev_id) {
716 case PCI_DEVICE_ID_RDK1:
717 writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
718 dev->rdk1.plx9054_base_addr + DMACSR0);
719 break;
720 }
721 #endif
722 }
723
724 /* returns 0 on success, else negative errno */
725 static int
726 net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
727 {
728 unsigned size;
729 u8 tmp;
730
731 if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
732 return -EINVAL;
733
734 /* don't use dma for odd-length transfers
735 * otherwise, we'd need to deal with the last byte with pio
736 */
737 if (req->req.length & 1)
738 return -EINVAL;
739
740 dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n",
741 ep->ep.name, req, (unsigned long long) req->req.dma);
742
743 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
744
745 /* The NET2272 can only use DMA on one endpoint at a time */
746 if (ep->dev->dma_busy)
747 return -EBUSY;
748
749 /* Make sure we only DMA an even number of bytes (we'll use
750 * pio to complete the transfer)
751 */
752 size = req->req.length;
753 size &= ~1;
754
755 /* device-to-host transfer */
756 if (ep->is_in) {
757 /* initialize platform's dma controller */
758 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
759 /* unable to obtain DMA channel; return error and use pio mode */
760 return -EBUSY;
761 req->req.actual += size;
762
763 /* host-to-device transfer */
764 } else {
765 tmp = net2272_ep_read(ep, EP_STAT0);
766
767 /* initialize platform's dma controller */
768 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
769 /* unable to obtain DMA channel; return error and use pio mode */
770 return -EBUSY;
771
772 if (!(tmp & (1 << BUFFER_EMPTY)))
773 ep->not_empty = 1;
774 else
775 ep->not_empty = 0;
776
777
778 /* allow the endpoint's buffer to fill */
779 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
780
781 /* this transfer completed and data's already in the fifo
782 * return error so pio gets used.
783 */
784 if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
785
786 /* deassert dreq */
787 net2272_write(ep->dev, DMAREQ,
788 (0 << DMA_BUFFER_VALID) |
789 (0 << DMA_REQUEST_ENABLE) |
790 (1 << DMA_CONTROL_DACK) |
791 (ep->dev->dma_eot_polarity << EOT_POLARITY) |
792 (ep->dev->dma_dack_polarity << DACK_POLARITY) |
793 (ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
794 ((ep->num >> 1) << DMA_ENDPOINT_SELECT));
795
796 return -EBUSY;
797 }
798 }
799
800 /* Don't use per-packet interrupts: use dma interrupts only */
801 net2272_ep_write(ep, EP_IRQENB, 0);
802
803 net2272_start_dma(ep->dev);
804
805 return 0;
806 }
807
808 static void net2272_cancel_dma(struct net2272 *dev)
809 {
810 #ifdef CONFIG_PCI
811 switch (dev->dev_id) {
812 case PCI_DEVICE_ID_RDK1:
813 writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
814 writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
815 while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
816 (1 << CHANNEL_DONE)))
817 continue; /* wait for dma to stabalize */
818
819 /* dma abort generates an interrupt */
820 writeb(1 << CHANNEL_CLEAR_INTERRUPT,
821 dev->rdk1.plx9054_base_addr + DMACSR0);
822 break;
823 }
824 #endif
825
826 dev->dma_busy = 0;
827 }
828
829 /*---------------------------------------------------------------------------*/
830
831 static int
832 net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
833 {
834 struct net2272_request *req;
835 struct net2272_ep *ep;
836 struct net2272 *dev;
837 unsigned long flags;
838 int status = -1;
839 u8 s;
840
841 req = container_of(_req, struct net2272_request, req);
842 if (!_req || !_req->complete || !_req->buf
843 || !list_empty(&req->queue))
844 return -EINVAL;
845 ep = container_of(_ep, struct net2272_ep, ep);
846 if (!_ep || (!ep->desc && ep->num != 0))
847 return -EINVAL;
848 dev = ep->dev;
849 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
850 return -ESHUTDOWN;
851
852 /* set up dma mapping in case the caller didn't */
853 if (use_dma && ep->dma && _req->dma == DMA_ADDR_INVALID) {
854 _req->dma = dma_map_single(dev->dev, _req->buf, _req->length,
855 ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
856 req->mapped = 1;
857 }
858
859 dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n",
860 _ep->name, _req, _req->length, _req->buf,
861 (unsigned long long) _req->dma, _req->zero ? "zero" : "!zero");
862
863 spin_lock_irqsave(&dev->lock, flags);
864
865 _req->status = -EINPROGRESS;
866 _req->actual = 0;
867
868 /* kickstart this i/o queue? */
869 if (list_empty(&ep->queue) && !ep->stopped) {
870 /* maybe there's no control data, just status ack */
871 if (ep->num == 0 && _req->length == 0) {
872 net2272_done(ep, req, 0);
873 dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
874 goto done;
875 }
876
877 /* Return zlp, don't let it block subsequent packets */
878 s = net2272_ep_read(ep, EP_STAT0);
879 if (s & (1 << BUFFER_EMPTY)) {
880 /* Buffer is empty check for a blocking zlp, handle it */
881 if ((s & (1 << NAK_OUT_PACKETS)) &&
882 net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
883 dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
884 /*
885 * Request is going to terminate with a short packet ...
886 * hope the client is ready for it!
887 */
888 status = net2272_read_fifo(ep, req);
889 /* clear short packet naking */
890 net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
891 goto done;
892 }
893 }
894
895 /* try dma first */
896 status = net2272_kick_dma(ep, req);
897
898 if (status < 0) {
899 /* dma failed (most likely in use by another endpoint)
900 * fallback to pio
901 */
902 status = 0;
903
904 if (ep->is_in)
905 status = net2272_write_fifo(ep, req);
906 else {
907 s = net2272_ep_read(ep, EP_STAT0);
908 if ((s & (1 << BUFFER_EMPTY)) == 0)
909 status = net2272_read_fifo(ep, req);
910 }
911
912 if (unlikely(status != 0)) {
913 if (status > 0)
914 status = 0;
915 req = NULL;
916 }
917 }
918 }
919 if (likely(req != 0))
920 list_add_tail(&req->queue, &ep->queue);
921
922 if (likely(!list_empty(&ep->queue)))
923 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
924 done:
925 spin_unlock_irqrestore(&dev->lock, flags);
926
927 return 0;
928 }
929
930 /* dequeue ALL requests */
931 static void
932 net2272_dequeue_all(struct net2272_ep *ep)
933 {
934 struct net2272_request *req;
935
936 /* called with spinlock held */
937 ep->stopped = 1;
938
939 while (!list_empty(&ep->queue)) {
940 req = list_entry(ep->queue.next,
941 struct net2272_request,
942 queue);
943 net2272_done(ep, req, -ESHUTDOWN);
944 }
945 }
946
947 /* dequeue JUST ONE request */
948 static int
949 net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
950 {
951 struct net2272_ep *ep;
952 struct net2272_request *req;
953 unsigned long flags;
954 int stopped;
955
956 ep = container_of(_ep, struct net2272_ep, ep);
957 if (!_ep || (!ep->desc && ep->num != 0) || !_req)
958 return -EINVAL;
959
960 spin_lock_irqsave(&ep->dev->lock, flags);
961 stopped = ep->stopped;
962 ep->stopped = 1;
963
964 /* make sure it's still queued on this endpoint */
965 list_for_each_entry(req, &ep->queue, queue) {
966 if (&req->req == _req)
967 break;
968 }
969 if (&req->req != _req) {
970 spin_unlock_irqrestore(&ep->dev->lock, flags);
971 return -EINVAL;
972 }
973
974 /* queue head may be partially complete */
975 if (ep->queue.next == &req->queue) {
976 dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
977 net2272_done(ep, req, -ECONNRESET);
978 }
979 req = NULL;
980 ep->stopped = stopped;
981
982 spin_unlock_irqrestore(&ep->dev->lock, flags);
983 return 0;
984 }
985
986 /*---------------------------------------------------------------------------*/
987
988 static int
989 net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
990 {
991 struct net2272_ep *ep;
992 unsigned long flags;
993 int ret = 0;
994
995 ep = container_of(_ep, struct net2272_ep, ep);
996 if (!_ep || (!ep->desc && ep->num != 0))
997 return -EINVAL;
998 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
999 return -ESHUTDOWN;
1000 if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
1001 return -EINVAL;
1002
1003 spin_lock_irqsave(&ep->dev->lock, flags);
1004 if (!list_empty(&ep->queue))
1005 ret = -EAGAIN;
1006 else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
1007 ret = -EAGAIN;
1008 else {
1009 dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
1010 value ? "set" : "clear",
1011 wedged ? "wedge" : "halt");
1012 /* set/clear */
1013 if (value) {
1014 if (ep->num == 0)
1015 ep->dev->protocol_stall = 1;
1016 else
1017 set_halt(ep);
1018 if (wedged)
1019 ep->wedged = 1;
1020 } else {
1021 clear_halt(ep);
1022 ep->wedged = 0;
1023 }
1024 }
1025 spin_unlock_irqrestore(&ep->dev->lock, flags);
1026
1027 return ret;
1028 }
1029
1030 static int
1031 net2272_set_halt(struct usb_ep *_ep, int value)
1032 {
1033 return net2272_set_halt_and_wedge(_ep, value, 0);
1034 }
1035
1036 static int
1037 net2272_set_wedge(struct usb_ep *_ep)
1038 {
1039 if (!_ep || _ep->name == ep0name)
1040 return -EINVAL;
1041 return net2272_set_halt_and_wedge(_ep, 1, 1);
1042 }
1043
1044 static int
1045 net2272_fifo_status(struct usb_ep *_ep)
1046 {
1047 struct net2272_ep *ep;
1048 u16 avail;
1049
1050 ep = container_of(_ep, struct net2272_ep, ep);
1051 if (!_ep || (!ep->desc && ep->num != 0))
1052 return -ENODEV;
1053 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1054 return -ESHUTDOWN;
1055
1056 avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
1057 avail |= net2272_ep_read(ep, EP_AVAIL0);
1058 if (avail > ep->fifo_size)
1059 return -EOVERFLOW;
1060 if (ep->is_in)
1061 avail = ep->fifo_size - avail;
1062 return avail;
1063 }
1064
1065 static void
1066 net2272_fifo_flush(struct usb_ep *_ep)
1067 {
1068 struct net2272_ep *ep;
1069
1070 ep = container_of(_ep, struct net2272_ep, ep);
1071 if (!_ep || (!ep->desc && ep->num != 0))
1072 return;
1073 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1074 return;
1075
1076 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
1077 }
1078
1079 static struct usb_ep_ops net2272_ep_ops = {
1080 .enable = net2272_enable,
1081 .disable = net2272_disable,
1082
1083 .alloc_request = net2272_alloc_request,
1084 .free_request = net2272_free_request,
1085
1086 .queue = net2272_queue,
1087 .dequeue = net2272_dequeue,
1088
1089 .set_halt = net2272_set_halt,
1090 .set_wedge = net2272_set_wedge,
1091 .fifo_status = net2272_fifo_status,
1092 .fifo_flush = net2272_fifo_flush,
1093 };
1094
1095 /*---------------------------------------------------------------------------*/
1096
1097 static int
1098 net2272_get_frame(struct usb_gadget *_gadget)
1099 {
1100 struct net2272 *dev;
1101 unsigned long flags;
1102 u16 ret;
1103
1104 if (!_gadget)
1105 return -ENODEV;
1106 dev = container_of(_gadget, struct net2272, gadget);
1107 spin_lock_irqsave(&dev->lock, flags);
1108
1109 ret = net2272_read(dev, FRAME1) << 8;
1110 ret |= net2272_read(dev, FRAME0);
1111
1112 spin_unlock_irqrestore(&dev->lock, flags);
1113 return ret;
1114 }
1115
1116 static int
1117 net2272_wakeup(struct usb_gadget *_gadget)
1118 {
1119 struct net2272 *dev;
1120 u8 tmp;
1121 unsigned long flags;
1122
1123 if (!_gadget)
1124 return 0;
1125 dev = container_of(_gadget, struct net2272, gadget);
1126
1127 spin_lock_irqsave(&dev->lock, flags);
1128 tmp = net2272_read(dev, USBCTL0);
1129 if (tmp & (1 << IO_WAKEUP_ENABLE))
1130 net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
1131
1132 spin_unlock_irqrestore(&dev->lock, flags);
1133
1134 return 0;
1135 }
1136
1137 static int
1138 net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
1139 {
1140 struct net2272 *dev;
1141
1142 if (!_gadget)
1143 return -ENODEV;
1144 dev = container_of(_gadget, struct net2272, gadget);
1145
1146 dev->is_selfpowered = value;
1147
1148 return 0;
1149 }
1150
1151 static int
1152 net2272_pullup(struct usb_gadget *_gadget, int is_on)
1153 {
1154 struct net2272 *dev;
1155 u8 tmp;
1156 unsigned long flags;
1157
1158 if (!_gadget)
1159 return -ENODEV;
1160 dev = container_of(_gadget, struct net2272, gadget);
1161
1162 spin_lock_irqsave(&dev->lock, flags);
1163 tmp = net2272_read(dev, USBCTL0);
1164 dev->softconnect = (is_on != 0);
1165 if (is_on)
1166 tmp |= (1 << USB_DETECT_ENABLE);
1167 else
1168 tmp &= ~(1 << USB_DETECT_ENABLE);
1169 net2272_write(dev, USBCTL0, tmp);
1170 spin_unlock_irqrestore(&dev->lock, flags);
1171
1172 return 0;
1173 }
1174
1175 static int net2272_start(struct usb_gadget_driver *driver,
1176 int (*bind)(struct usb_gadget *));
1177 static int net2272_stop(struct usb_gadget_driver *driver);
1178
1179 static const struct usb_gadget_ops net2272_ops = {
1180 .get_frame = net2272_get_frame,
1181 .wakeup = net2272_wakeup,
1182 .set_selfpowered = net2272_set_selfpowered,
1183 .pullup = net2272_pullup,
1184 .start = net2272_start,
1185 .stop = net2272_stop,
1186 };
1187
1188 /*---------------------------------------------------------------------------*/
1189
1190 static ssize_t
1191 net2272_show_registers(struct device *_dev, struct device_attribute *attr, char *buf)
1192 {
1193 struct net2272 *dev;
1194 char *next;
1195 unsigned size, t;
1196 unsigned long flags;
1197 u8 t1, t2;
1198 int i;
1199 const char *s;
1200
1201 dev = dev_get_drvdata(_dev);
1202 next = buf;
1203 size = PAGE_SIZE;
1204 spin_lock_irqsave(&dev->lock, flags);
1205
1206 if (dev->driver)
1207 s = dev->driver->driver.name;
1208 else
1209 s = "(none)";
1210
1211 /* Main Control Registers */
1212 t = scnprintf(next, size, "%s version %s,"
1213 "chiprev %02x, locctl %02x\n"
1214 "irqenb0 %02x irqenb1 %02x "
1215 "irqstat0 %02x irqstat1 %02x\n",
1216 driver_name, driver_vers, dev->chiprev,
1217 net2272_read(dev, LOCCTL),
1218 net2272_read(dev, IRQENB0),
1219 net2272_read(dev, IRQENB1),
1220 net2272_read(dev, IRQSTAT0),
1221 net2272_read(dev, IRQSTAT1));
1222 size -= t;
1223 next += t;
1224
1225 /* DMA */
1226 t1 = net2272_read(dev, DMAREQ);
1227 t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
1228 t1, ep_name[(t1 & 0x01) + 1],
1229 t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "",
1230 t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "",
1231 t1 & (1 << DMA_REQUEST) ? "req " : "",
1232 t1 & (1 << DMA_BUFFER_VALID) ? "valid " : "");
1233 size -= t;
1234 next += t;
1235
1236 /* USB Control Registers */
1237 t1 = net2272_read(dev, USBCTL1);
1238 if (t1 & (1 << VBUS_PIN)) {
1239 if (t1 & (1 << USB_HIGH_SPEED))
1240 s = "high speed";
1241 else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1242 s = "powered";
1243 else
1244 s = "full speed";
1245 } else
1246 s = "not attached";
1247 t = scnprintf(next, size,
1248 "usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
1249 net2272_read(dev, USBCTL0), t1,
1250 net2272_read(dev, OURADDR), s);
1251 size -= t;
1252 next += t;
1253
1254 /* Endpoint Registers */
1255 for (i = 0; i < 4; ++i) {
1256 struct net2272_ep *ep;
1257
1258 ep = &dev->ep[i];
1259 if (i && !ep->desc)
1260 continue;
1261
1262 t1 = net2272_ep_read(ep, EP_CFG);
1263 t2 = net2272_ep_read(ep, EP_RSPSET);
1264 t = scnprintf(next, size,
1265 "\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
1266 "irqenb %02x\n",
1267 ep->ep.name, t1, t2,
1268 (t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "",
1269 (t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "",
1270 (t2 & (1 << AUTOVALIDATE)) ? "auto " : "",
1271 (t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "",
1272 (t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "",
1273 (t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "",
1274 (t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ",
1275 (t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "",
1276 net2272_ep_read(ep, EP_IRQENB));
1277 size -= t;
1278 next += t;
1279
1280 t = scnprintf(next, size,
1281 "\tstat0 %02x stat1 %02x avail %04x "
1282 "(ep%d%s-%s)%s\n",
1283 net2272_ep_read(ep, EP_STAT0),
1284 net2272_ep_read(ep, EP_STAT1),
1285 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0),
1286 t1 & 0x0f,
1287 ep->is_in ? "in" : "out",
1288 type_string(t1 >> 5),
1289 ep->stopped ? "*" : "");
1290 size -= t;
1291 next += t;
1292
1293 t = scnprintf(next, size,
1294 "\tep_transfer %06x\n",
1295 ((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) |
1296 ((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) |
1297 ((net2272_ep_read(ep, EP_TRANSFER0) & 0xff)));
1298 size -= t;
1299 next += t;
1300
1301 t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03;
1302 t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03;
1303 t = scnprintf(next, size,
1304 "\tbuf-a %s buf-b %s\n",
1305 buf_state_string(t1),
1306 buf_state_string(t2));
1307 size -= t;
1308 next += t;
1309 }
1310
1311 spin_unlock_irqrestore(&dev->lock, flags);
1312
1313 return PAGE_SIZE - size;
1314 }
1315 static DEVICE_ATTR(registers, S_IRUGO, net2272_show_registers, NULL);
1316
1317 /*---------------------------------------------------------------------------*/
1318
1319 static void
1320 net2272_set_fifo_mode(struct net2272 *dev, int mode)
1321 {
1322 u8 tmp;
1323
1324 tmp = net2272_read(dev, LOCCTL) & 0x3f;
1325 tmp |= (mode << 6);
1326 net2272_write(dev, LOCCTL, tmp);
1327
1328 INIT_LIST_HEAD(&dev->gadget.ep_list);
1329
1330 /* always ep-a, ep-c ... maybe not ep-b */
1331 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1332
1333 switch (mode) {
1334 case 0:
1335 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1336 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512;
1337 break;
1338 case 1:
1339 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1340 dev->ep[1].fifo_size = 1024;
1341 dev->ep[2].fifo_size = 512;
1342 break;
1343 case 2:
1344 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1345 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1346 break;
1347 case 3:
1348 dev->ep[1].fifo_size = 1024;
1349 break;
1350 }
1351
1352 /* ep-c is always 2 512 byte buffers */
1353 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1354 dev->ep[3].fifo_size = 512;
1355 }
1356
1357 /*---------------------------------------------------------------------------*/
1358
1359 static struct net2272 *the_controller;
1360
1361 static void
1362 net2272_usb_reset(struct net2272 *dev)
1363 {
1364 dev->gadget.speed = USB_SPEED_UNKNOWN;
1365
1366 net2272_cancel_dma(dev);
1367
1368 net2272_write(dev, IRQENB0, 0);
1369 net2272_write(dev, IRQENB1, 0);
1370
1371 /* clear irq state */
1372 net2272_write(dev, IRQSTAT0, 0xff);
1373 net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT));
1374
1375 net2272_write(dev, DMAREQ,
1376 (0 << DMA_BUFFER_VALID) |
1377 (0 << DMA_REQUEST_ENABLE) |
1378 (1 << DMA_CONTROL_DACK) |
1379 (dev->dma_eot_polarity << EOT_POLARITY) |
1380 (dev->dma_dack_polarity << DACK_POLARITY) |
1381 (dev->dma_dreq_polarity << DREQ_POLARITY) |
1382 ((dma_ep >> 1) << DMA_ENDPOINT_SELECT));
1383
1384 net2272_cancel_dma(dev);
1385 net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0);
1386
1387 /* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
1388 * note that the higher level gadget drivers are expected to convert data to little endian.
1389 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
1390 */
1391 net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH));
1392 net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE));
1393 }
1394
1395 static void
1396 net2272_usb_reinit(struct net2272 *dev)
1397 {
1398 int i;
1399
1400 /* basic endpoint init */
1401 for (i = 0; i < 4; ++i) {
1402 struct net2272_ep *ep = &dev->ep[i];
1403
1404 ep->ep.name = ep_name[i];
1405 ep->dev = dev;
1406 ep->num = i;
1407 ep->not_empty = 0;
1408
1409 if (use_dma && ep->num == dma_ep)
1410 ep->dma = 1;
1411
1412 if (i > 0 && i <= 3)
1413 ep->fifo_size = 512;
1414 else
1415 ep->fifo_size = 64;
1416 net2272_ep_reset(ep);
1417 }
1418 dev->ep[0].ep.maxpacket = 64;
1419
1420 dev->gadget.ep0 = &dev->ep[0].ep;
1421 dev->ep[0].stopped = 0;
1422 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1423 }
1424
1425 static void
1426 net2272_ep0_start(struct net2272 *dev)
1427 {
1428 struct net2272_ep *ep0 = &dev->ep[0];
1429
1430 net2272_ep_write(ep0, EP_RSPSET,
1431 (1 << NAK_OUT_PACKETS_MODE) |
1432 (1 << ALT_NAK_OUT_PACKETS));
1433 net2272_ep_write(ep0, EP_RSPCLR,
1434 (1 << HIDE_STATUS_PHASE) |
1435 (1 << CONTROL_STATUS_PHASE_HANDSHAKE));
1436 net2272_write(dev, USBCTL0,
1437 (dev->softconnect << USB_DETECT_ENABLE) |
1438 (1 << USB_ROOT_PORT_WAKEUP_ENABLE) |
1439 (1 << IO_WAKEUP_ENABLE));
1440 net2272_write(dev, IRQENB0,
1441 (1 << SETUP_PACKET_INTERRUPT_ENABLE) |
1442 (1 << ENDPOINT_0_INTERRUPT_ENABLE) |
1443 (1 << DMA_DONE_INTERRUPT_ENABLE));
1444 net2272_write(dev, IRQENB1,
1445 (1 << VBUS_INTERRUPT_ENABLE) |
1446 (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) |
1447 (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE));
1448 }
1449
1450 /* when a driver is successfully registered, it will receive
1451 * control requests including set_configuration(), which enables
1452 * non-control requests. then usb traffic follows until a
1453 * disconnect is reported. then a host may connect again, or
1454 * the driver might get unbound.
1455 */
1456 static int net2272_start(struct usb_gadget_driver *driver,
1457 int (*bind)(struct usb_gadget *))
1458 {
1459 struct net2272 *dev = the_controller;
1460 int ret;
1461 unsigned i;
1462
1463 if (!driver || !bind || !driver->unbind || !driver->setup ||
1464 driver->speed != USB_SPEED_HIGH)
1465 return -EINVAL;
1466 if (!dev)
1467 return -ENODEV;
1468 if (dev->driver)
1469 return -EBUSY;
1470
1471 for (i = 0; i < 4; ++i)
1472 dev->ep[i].irqs = 0;
1473 /* hook up the driver ... */
1474 dev->softconnect = 1;
1475 driver->driver.bus = NULL;
1476 dev->driver = driver;
1477 dev->gadget.dev.driver = &driver->driver;
1478 ret = bind(&dev->gadget);
1479 if (ret) {
1480 dev_dbg(dev->dev, "bind to driver %s --> %d\n",
1481 driver->driver.name, ret);
1482 dev->driver = NULL;
1483 dev->gadget.dev.driver = NULL;
1484 return ret;
1485 }
1486
1487 /* ... then enable host detection and ep0; and we're ready
1488 * for set_configuration as well as eventual disconnect.
1489 */
1490 net2272_ep0_start(dev);
1491
1492 dev_dbg(dev->dev, "%s ready\n", driver->driver.name);
1493
1494 return 0;
1495 }
1496
1497 static void
1498 stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
1499 {
1500 int i;
1501
1502 /* don't disconnect if it's not connected */
1503 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1504 driver = NULL;
1505
1506 /* stop hardware; prevent new request submissions;
1507 * and kill any outstanding requests.
1508 */
1509 net2272_usb_reset(dev);
1510 for (i = 0; i < 4; ++i)
1511 net2272_dequeue_all(&dev->ep[i]);
1512
1513 /* report disconnect; the driver is already quiesced */
1514 if (driver) {
1515 spin_unlock(&dev->lock);
1516 driver->disconnect(&dev->gadget);
1517 spin_lock(&dev->lock);
1518
1519 }
1520 net2272_usb_reinit(dev);
1521 }
1522
1523 static int net2272_stop(struct usb_gadget_driver *driver)
1524 {
1525 struct net2272 *dev = the_controller;
1526 unsigned long flags;
1527
1528 if (!dev)
1529 return -ENODEV;
1530 if (!driver || driver != dev->driver)
1531 return -EINVAL;
1532
1533 spin_lock_irqsave(&dev->lock, flags);
1534 stop_activity(dev, driver);
1535 spin_unlock_irqrestore(&dev->lock, flags);
1536
1537 net2272_pullup(&dev->gadget, 0);
1538
1539 driver->unbind(&dev->gadget);
1540 dev->gadget.dev.driver = NULL;
1541 dev->driver = NULL;
1542
1543 dev_dbg(dev->dev, "unregistered driver '%s'\n", driver->driver.name);
1544 return 0;
1545 }
1546
1547 /*---------------------------------------------------------------------------*/
1548 /* handle ep-a/ep-b dma completions */
1549 static void
1550 net2272_handle_dma(struct net2272_ep *ep)
1551 {
1552 struct net2272_request *req;
1553 unsigned len;
1554 int status;
1555
1556 if (!list_empty(&ep->queue))
1557 req = list_entry(ep->queue.next,
1558 struct net2272_request, queue);
1559 else
1560 req = NULL;
1561
1562 dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
1563
1564 /* Ensure DREQ is de-asserted */
1565 net2272_write(ep->dev, DMAREQ,
1566 (0 << DMA_BUFFER_VALID)
1567 | (0 << DMA_REQUEST_ENABLE)
1568 | (1 << DMA_CONTROL_DACK)
1569 | (ep->dev->dma_eot_polarity << EOT_POLARITY)
1570 | (ep->dev->dma_dack_polarity << DACK_POLARITY)
1571 | (ep->dev->dma_dreq_polarity << DREQ_POLARITY)
1572 | ((ep->dma >> 1) << DMA_ENDPOINT_SELECT));
1573
1574 ep->dev->dma_busy = 0;
1575
1576 net2272_ep_write(ep, EP_IRQENB,
1577 (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1578 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1579 | net2272_ep_read(ep, EP_IRQENB));
1580
1581 /* device-to-host transfer completed */
1582 if (ep->is_in) {
1583 /* validate a short packet or zlp if necessary */
1584 if ((req->req.length % ep->ep.maxpacket != 0) ||
1585 req->req.zero)
1586 set_fifo_bytecount(ep, 0);
1587
1588 net2272_done(ep, req, 0);
1589 if (!list_empty(&ep->queue)) {
1590 req = list_entry(ep->queue.next,
1591 struct net2272_request, queue);
1592 status = net2272_kick_dma(ep, req);
1593 if (status < 0)
1594 net2272_pio_advance(ep);
1595 }
1596
1597 /* host-to-device transfer completed */
1598 } else {
1599 /* terminated with a short packet? */
1600 if (net2272_read(ep->dev, IRQSTAT0) &
1601 (1 << DMA_DONE_INTERRUPT)) {
1602 /* abort system dma */
1603 net2272_cancel_dma(ep->dev);
1604 }
1605
1606 /* EP_TRANSFER will contain the number of bytes
1607 * actually received.
1608 * NOTE: There is no overflow detection on EP_TRANSFER:
1609 * We can't deal with transfers larger than 2^24 bytes!
1610 */
1611 len = (net2272_ep_read(ep, EP_TRANSFER2) << 16)
1612 | (net2272_ep_read(ep, EP_TRANSFER1) << 8)
1613 | (net2272_ep_read(ep, EP_TRANSFER0));
1614
1615 if (ep->not_empty)
1616 len += 4;
1617
1618 req->req.actual += len;
1619
1620 /* get any remaining data */
1621 net2272_pio_advance(ep);
1622 }
1623 }
1624
1625 /*---------------------------------------------------------------------------*/
1626
1627 static void
1628 net2272_handle_ep(struct net2272_ep *ep)
1629 {
1630 struct net2272_request *req;
1631 u8 stat0, stat1;
1632
1633 if (!list_empty(&ep->queue))
1634 req = list_entry(ep->queue.next,
1635 struct net2272_request, queue);
1636 else
1637 req = NULL;
1638
1639 /* ack all, and handle what we care about */
1640 stat0 = net2272_ep_read(ep, EP_STAT0);
1641 stat1 = net2272_ep_read(ep, EP_STAT1);
1642 ep->irqs++;
1643
1644 dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
1645 ep->ep.name, stat0, stat1, req ? &req->req : 0);
1646
1647 net2272_ep_write(ep, EP_STAT0, stat0 &
1648 ~((1 << NAK_OUT_PACKETS)
1649 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)));
1650 net2272_ep_write(ep, EP_STAT1, stat1);
1651
1652 /* data packet(s) received (in the fifo, OUT)
1653 * direction must be validated, otherwise control read status phase
1654 * could be interpreted as a valid packet
1655 */
1656 if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT)))
1657 net2272_pio_advance(ep);
1658 /* data packet(s) transmitted (IN) */
1659 else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
1660 net2272_pio_advance(ep);
1661 }
1662
1663 static struct net2272_ep *
1664 net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex)
1665 {
1666 struct net2272_ep *ep;
1667
1668 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1669 return &dev->ep[0];
1670
1671 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1672 u8 bEndpointAddress;
1673
1674 if (!ep->desc)
1675 continue;
1676 bEndpointAddress = ep->desc->bEndpointAddress;
1677 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1678 continue;
1679 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
1680 return ep;
1681 }
1682 return NULL;
1683 }
1684
1685 /*
1686 * USB Test Packet:
1687 * JKJKJKJK * 9
1688 * JJKKJJKK * 8
1689 * JJJJKKKK * 8
1690 * JJJJJJJKKKKKKK * 8
1691 * JJJJJJJK * 8
1692 * {JKKKKKKK * 10}, JK
1693 */
1694 static const u8 net2272_test_packet[] = {
1695 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1696 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1697 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1698 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1699 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1700 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
1701 };
1702
1703 static void
1704 net2272_set_test_mode(struct net2272 *dev, int mode)
1705 {
1706 int i;
1707
1708 /* Disable all net2272 interrupts:
1709 * Nothing but a power cycle should stop the test.
1710 */
1711 net2272_write(dev, IRQENB0, 0x00);
1712 net2272_write(dev, IRQENB1, 0x00);
1713
1714 /* Force tranceiver to high-speed */
1715 net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED);
1716
1717 net2272_write(dev, PAGESEL, 0);
1718 net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT);
1719 net2272_write(dev, EP_RSPCLR,
1720 (1 << CONTROL_STATUS_PHASE_HANDSHAKE)
1721 | (1 << HIDE_STATUS_PHASE));
1722 net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION);
1723 net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH);
1724
1725 /* wait for status phase to complete */
1726 while (!(net2272_read(dev, EP_STAT0) &
1727 (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)))
1728 ;
1729
1730 /* Enable test mode */
1731 net2272_write(dev, USBTEST, mode);
1732
1733 /* load test packet */
1734 if (mode == TEST_PACKET) {
1735 /* switch to 8 bit mode */
1736 net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) &
1737 ~(1 << DATA_WIDTH));
1738
1739 for (i = 0; i < sizeof(net2272_test_packet); ++i)
1740 net2272_write(dev, EP_DATA, net2272_test_packet[i]);
1741
1742 /* Validate test packet */
1743 net2272_write(dev, EP_TRANSFER0, 0);
1744 }
1745 }
1746
1747 static void
1748 net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
1749 {
1750 struct net2272_ep *ep;
1751 u8 num, scratch;
1752
1753 /* starting a control request? */
1754 if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) {
1755 union {
1756 u8 raw[8];
1757 struct usb_ctrlrequest r;
1758 } u;
1759 int tmp = 0;
1760 struct net2272_request *req;
1761
1762 if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
1763 if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED))
1764 dev->gadget.speed = USB_SPEED_HIGH;
1765 else
1766 dev->gadget.speed = USB_SPEED_FULL;
1767 dev_dbg(dev->dev, "%s speed\n",
1768 (dev->gadget.speed == USB_SPEED_HIGH) ? "high" : "full");
1769 }
1770
1771 ep = &dev->ep[0];
1772 ep->irqs++;
1773
1774 /* make sure any leftover interrupt state is cleared */
1775 stat &= ~(1 << ENDPOINT_0_INTERRUPT);
1776 while (!list_empty(&ep->queue)) {
1777 req = list_entry(ep->queue.next,
1778 struct net2272_request, queue);
1779 net2272_done(ep, req,
1780 (req->req.actual == req->req.length) ? 0 : -EPROTO);
1781 }
1782 ep->stopped = 0;
1783 dev->protocol_stall = 0;
1784 net2272_ep_write(ep, EP_STAT0,
1785 (1 << DATA_IN_TOKEN_INTERRUPT)
1786 | (1 << DATA_OUT_TOKEN_INTERRUPT)
1787 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
1788 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
1789 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
1790 net2272_ep_write(ep, EP_STAT1,
1791 (1 << TIMEOUT)
1792 | (1 << USB_OUT_ACK_SENT)
1793 | (1 << USB_OUT_NAK_SENT)
1794 | (1 << USB_IN_ACK_RCVD)
1795 | (1 << USB_IN_NAK_SENT)
1796 | (1 << USB_STALL_SENT)
1797 | (1 << LOCAL_OUT_ZLP));
1798
1799 /*
1800 * Ensure Control Read pre-validation setting is beyond maximum size
1801 * - Control Writes can leave non-zero values in EP_TRANSFER. If
1802 * an EP0 transfer following the Control Write is a Control Read,
1803 * the NET2272 sees the non-zero EP_TRANSFER as an unexpected
1804 * pre-validation count.
1805 * - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
1806 * the pre-validation count cannot cause an unexpected validatation
1807 */
1808 net2272_write(dev, PAGESEL, 0);
1809 net2272_write(dev, EP_TRANSFER2, 0xff);
1810 net2272_write(dev, EP_TRANSFER1, 0xff);
1811 net2272_write(dev, EP_TRANSFER0, 0xff);
1812
1813 u.raw[0] = net2272_read(dev, SETUP0);
1814 u.raw[1] = net2272_read(dev, SETUP1);
1815 u.raw[2] = net2272_read(dev, SETUP2);
1816 u.raw[3] = net2272_read(dev, SETUP3);
1817 u.raw[4] = net2272_read(dev, SETUP4);
1818 u.raw[5] = net2272_read(dev, SETUP5);
1819 u.raw[6] = net2272_read(dev, SETUP6);
1820 u.raw[7] = net2272_read(dev, SETUP7);
1821 /*
1822 * If you have a big endian cpu make sure le16_to_cpus
1823 * performs the proper byte swapping here...
1824 */
1825 le16_to_cpus(&u.r.wValue);
1826 le16_to_cpus(&u.r.wIndex);
1827 le16_to_cpus(&u.r.wLength);
1828
1829 /* ack the irq */
1830 net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT);
1831 stat ^= (1 << SETUP_PACKET_INTERRUPT);
1832
1833 /* watch control traffic at the token level, and force
1834 * synchronization before letting the status phase happen.
1835 */
1836 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
1837 if (ep->is_in) {
1838 scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1839 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1840 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1841 stop_out_naking(ep);
1842 } else
1843 scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1844 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1845 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1846 net2272_ep_write(ep, EP_IRQENB, scratch);
1847
1848 if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
1849 goto delegate;
1850 switch (u.r.bRequest) {
1851 case USB_REQ_GET_STATUS: {
1852 struct net2272_ep *e;
1853 u16 status = 0;
1854
1855 switch (u.r.bRequestType & USB_RECIP_MASK) {
1856 case USB_RECIP_ENDPOINT:
1857 e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1858 if (!e || u.r.wLength > 2)
1859 goto do_stall;
1860 if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
1861 status = __constant_cpu_to_le16(1);
1862 else
1863 status = __constant_cpu_to_le16(0);
1864
1865 /* don't bother with a request object! */
1866 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1867 writew(status, net2272_reg_addr(dev, EP_DATA));
1868 set_fifo_bytecount(&dev->ep[0], 0);
1869 allow_status(ep);
1870 dev_vdbg(dev->dev, "%s stat %02x\n",
1871 ep->ep.name, status);
1872 goto next_endpoints;
1873 case USB_RECIP_DEVICE:
1874 if (u.r.wLength > 2)
1875 goto do_stall;
1876 if (dev->is_selfpowered)
1877 status = (1 << USB_DEVICE_SELF_POWERED);
1878
1879 /* don't bother with a request object! */
1880 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1881 writew(status, net2272_reg_addr(dev, EP_DATA));
1882 set_fifo_bytecount(&dev->ep[0], 0);
1883 allow_status(ep);
1884 dev_vdbg(dev->dev, "device stat %02x\n", status);
1885 goto next_endpoints;
1886 case USB_RECIP_INTERFACE:
1887 if (u.r.wLength > 2)
1888 goto do_stall;
1889
1890 /* don't bother with a request object! */
1891 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1892 writew(status, net2272_reg_addr(dev, EP_DATA));
1893 set_fifo_bytecount(&dev->ep[0], 0);
1894 allow_status(ep);
1895 dev_vdbg(dev->dev, "interface status %02x\n", status);
1896 goto next_endpoints;
1897 }
1898
1899 break;
1900 }
1901 case USB_REQ_CLEAR_FEATURE: {
1902 struct net2272_ep *e;
1903
1904 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1905 goto delegate;
1906 if (u.r.wValue != USB_ENDPOINT_HALT ||
1907 u.r.wLength != 0)
1908 goto do_stall;
1909 e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1910 if (!e)
1911 goto do_stall;
1912 if (e->wedged) {
1913 dev_vdbg(dev->dev, "%s wedged, halt not cleared\n",
1914 ep->ep.name);
1915 } else {
1916 dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name);
1917 clear_halt(e);
1918 }
1919 allow_status(ep);
1920 goto next_endpoints;
1921 }
1922 case USB_REQ_SET_FEATURE: {
1923 struct net2272_ep *e;
1924
1925 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1926 if (u.r.wIndex != NORMAL_OPERATION)
1927 net2272_set_test_mode(dev, (u.r.wIndex >> 8));
1928 allow_status(ep);
1929 dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex);
1930 goto next_endpoints;
1931 } else if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1932 goto delegate;
1933 if (u.r.wValue != USB_ENDPOINT_HALT ||
1934 u.r.wLength != 0)
1935 goto do_stall;
1936 e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1937 if (!e)
1938 goto do_stall;
1939 set_halt(e);
1940 allow_status(ep);
1941 dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name);
1942 goto next_endpoints;
1943 }
1944 case USB_REQ_SET_ADDRESS: {
1945 net2272_write(dev, OURADDR, u.r.wValue & 0xff);
1946 allow_status(ep);
1947 break;
1948 }
1949 default:
1950 delegate:
1951 dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x "
1952 "ep_cfg %08x\n",
1953 u.r.bRequestType, u.r.bRequest,
1954 u.r.wValue, u.r.wIndex,
1955 net2272_ep_read(ep, EP_CFG));
1956 spin_unlock(&dev->lock);
1957 tmp = dev->driver->setup(&dev->gadget, &u.r);
1958 spin_lock(&dev->lock);
1959 }
1960
1961 /* stall ep0 on error */
1962 if (tmp < 0) {
1963 do_stall:
1964 dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n",
1965 u.r.bRequestType, u.r.bRequest, tmp);
1966 dev->protocol_stall = 1;
1967 }
1968 /* endpoint dma irq? */
1969 } else if (stat & (1 << DMA_DONE_INTERRUPT)) {
1970 net2272_cancel_dma(dev);
1971 net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT);
1972 stat &= ~(1 << DMA_DONE_INTERRUPT);
1973 num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT))
1974 ? 2 : 1;
1975
1976 ep = &dev->ep[num];
1977 net2272_handle_dma(ep);
1978 }
1979
1980 next_endpoints:
1981 /* endpoint data irq? */
1982 scratch = stat & 0x0f;
1983 stat &= ~0x0f;
1984 for (num = 0; scratch; num++) {
1985 u8 t;
1986
1987 /* does this endpoint's FIFO and queue need tending? */
1988 t = 1 << num;
1989 if ((scratch & t) == 0)
1990 continue;
1991 scratch ^= t;
1992
1993 ep = &dev->ep[num];
1994 net2272_handle_ep(ep);
1995 }
1996
1997 /* some interrupts we can just ignore */
1998 stat &= ~(1 << SOF_INTERRUPT);
1999
2000 if (stat)
2001 dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat);
2002 }
2003
2004 static void
2005 net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
2006 {
2007 u8 tmp, mask;
2008
2009 /* after disconnect there's nothing else to do! */
2010 tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
2011 mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
2012
2013 if (stat & tmp) {
2014 net2272_write(dev, IRQSTAT1, tmp);
2015 if ((((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
2016 ((net2272_read(dev, USBCTL1) & mask) == 0))
2017 || ((net2272_read(dev, USBCTL1) & (1 << VBUS_PIN))
2018 == 0))
2019 && (dev->gadget.speed != USB_SPEED_UNKNOWN)) {
2020 dev_dbg(dev->dev, "disconnect %s\n",
2021 dev->driver->driver.name);
2022 stop_activity(dev, dev->driver);
2023 net2272_ep0_start(dev);
2024 return;
2025 }
2026 stat &= ~tmp;
2027
2028 if (!stat)
2029 return;
2030 }
2031
2032 tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
2033 if (stat & tmp) {
2034 net2272_write(dev, IRQSTAT1, tmp);
2035 if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
2036 if (dev->driver->suspend)
2037 dev->driver->suspend(&dev->gadget);
2038 if (!enable_suspend) {
2039 stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
2040 dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
2041 }
2042 } else {
2043 if (dev->driver->resume)
2044 dev->driver->resume(&dev->gadget);
2045 }
2046 stat &= ~tmp;
2047 }
2048
2049 /* clear any other status/irqs */
2050 if (stat)
2051 net2272_write(dev, IRQSTAT1, stat);
2052
2053 /* some status we can just ignore */
2054 stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2055 | (1 << SUSPEND_REQUEST_INTERRUPT)
2056 | (1 << RESUME_INTERRUPT));
2057 if (!stat)
2058 return;
2059 else
2060 dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat);
2061 }
2062
2063 static irqreturn_t net2272_irq(int irq, void *_dev)
2064 {
2065 struct net2272 *dev = _dev;
2066 #if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
2067 u32 intcsr;
2068 #endif
2069 #if defined(PLX_PCI_RDK)
2070 u8 dmareq;
2071 #endif
2072 spin_lock(&dev->lock);
2073 #if defined(PLX_PCI_RDK)
2074 intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2075
2076 if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) {
2077 writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE),
2078 dev->rdk1.plx9054_base_addr + INTCSR);
2079 net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2080 net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2081 intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2082 writel(intcsr | (1 << PCI_INTERRUPT_ENABLE),
2083 dev->rdk1.plx9054_base_addr + INTCSR);
2084 }
2085 if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) {
2086 writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2087 dev->rdk1.plx9054_base_addr + DMACSR0);
2088
2089 dmareq = net2272_read(dev, DMAREQ);
2090 if (dmareq & 0x01)
2091 net2272_handle_dma(&dev->ep[2]);
2092 else
2093 net2272_handle_dma(&dev->ep[1]);
2094 }
2095 #endif
2096 #if defined(PLX_PCI_RDK2)
2097 /* see if PCI int for us by checking irqstat */
2098 intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
2099 if (!intcsr & (1 << NET2272_PCI_IRQ))
2100 return IRQ_NONE;
2101 /* check dma interrupts */
2102 #endif
2103 /* Platform/devcice interrupt handler */
2104 #if !defined(PLX_PCI_RDK)
2105 net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2106 net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2107 #endif
2108 spin_unlock(&dev->lock);
2109
2110 return IRQ_HANDLED;
2111 }
2112
2113 static int net2272_present(struct net2272 *dev)
2114 {
2115 /*
2116 * Quick test to see if CPU can communicate properly with the NET2272.
2117 * Verifies connection using writes and reads to write/read and
2118 * read-only registers.
2119 *
2120 * This routine is strongly recommended especially during early bring-up
2121 * of new hardware, however for designs that do not apply Power On System
2122 * Tests (POST) it may discarded (or perhaps minimized).
2123 */
2124 unsigned int ii;
2125 u8 val, refval;
2126
2127 /* Verify NET2272 write/read SCRATCH register can write and read */
2128 refval = net2272_read(dev, SCRATCH);
2129 for (ii = 0; ii < 0x100; ii += 7) {
2130 net2272_write(dev, SCRATCH, ii);
2131 val = net2272_read(dev, SCRATCH);
2132 if (val != ii) {
2133 dev_dbg(dev->dev,
2134 "%s: write/read SCRATCH register test failed: "
2135 "wrote:0x%2.2x, read:0x%2.2x\n",
2136 __func__, ii, val);
2137 return -EINVAL;
2138 }
2139 }
2140 /* To be nice, we write the original SCRATCH value back: */
2141 net2272_write(dev, SCRATCH, refval);
2142
2143 /* Verify NET2272 CHIPREV register is read-only: */
2144 refval = net2272_read(dev, CHIPREV_2272);
2145 for (ii = 0; ii < 0x100; ii += 7) {
2146 net2272_write(dev, CHIPREV_2272, ii);
2147 val = net2272_read(dev, CHIPREV_2272);
2148 if (val != refval) {
2149 dev_dbg(dev->dev,
2150 "%s: write/read CHIPREV register test failed: "
2151 "wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
2152 __func__, ii, val, refval);
2153 return -EINVAL;
2154 }
2155 }
2156
2157 /*
2158 * Verify NET2272's "NET2270 legacy revision" register
2159 * - NET2272 has two revision registers. The NET2270 legacy revision
2160 * register should read the same value, regardless of the NET2272
2161 * silicon revision. The legacy register applies to NET2270
2162 * firmware being applied to the NET2272.
2163 */
2164 val = net2272_read(dev, CHIPREV_LEGACY);
2165 if (val != NET2270_LEGACY_REV) {
2166 /*
2167 * Unexpected legacy revision value
2168 * - Perhaps the chip is a NET2270?
2169 */
2170 dev_dbg(dev->dev,
2171 "%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
2172 " - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
2173 __func__, NET2270_LEGACY_REV, val);
2174 return -EINVAL;
2175 }
2176
2177 /*
2178 * Verify NET2272 silicon revision
2179 * - This revision register is appropriate for the silicon version
2180 * of the NET2272
2181 */
2182 val = net2272_read(dev, CHIPREV_2272);
2183 switch (val) {
2184 case CHIPREV_NET2272_R1:
2185 /*
2186 * NET2272 Rev 1 has DMA related errata:
2187 * - Newer silicon (Rev 1A or better) required
2188 */
2189 dev_dbg(dev->dev,
2190 "%s: Rev 1 detected: newer silicon recommended for DMA support\n",
2191 __func__);
2192 break;
2193 case CHIPREV_NET2272_R1A:
2194 break;
2195 default:
2196 /* NET2272 silicon version *may* not work with this firmware */
2197 dev_dbg(dev->dev,
2198 "%s: unexpected silicon revision register value: "
2199 " CHIPREV_2272: 0x%2.2x\n",
2200 __func__, val);
2201 /*
2202 * Return Success, even though the chip rev is not an expected value
2203 * - Older, pre-built firmware can attempt to operate on newer silicon
2204 * - Often, new silicon is perfectly compatible
2205 */
2206 }
2207
2208 /* Success: NET2272 checks out OK */
2209 return 0;
2210 }
2211
2212 static void
2213 net2272_gadget_release(struct device *_dev)
2214 {
2215 struct net2272 *dev = dev_get_drvdata(_dev);
2216 kfree(dev);
2217 }
2218
2219 /*---------------------------------------------------------------------------*/
2220
2221 static void __devexit
2222 net2272_remove(struct net2272 *dev)
2223 {
2224 usb_del_gadget_udc(&dev->gadget);
2225
2226 /* start with the driver above us */
2227 if (dev->driver) {
2228 /* should have been done already by driver model core */
2229 dev_warn(dev->dev, "pci remove, driver '%s' is still registered\n",
2230 dev->driver->driver.name);
2231 usb_gadget_unregister_driver(dev->driver);
2232 }
2233
2234 free_irq(dev->irq, dev);
2235 iounmap(dev->base_addr);
2236
2237 device_unregister(&dev->gadget.dev);
2238 device_remove_file(dev->dev, &dev_attr_registers);
2239
2240 dev_info(dev->dev, "unbind\n");
2241 the_controller = NULL;
2242 }
2243
2244 static struct net2272 * __devinit
2245 net2272_probe_init(struct device *dev, unsigned int irq)
2246 {
2247 struct net2272 *ret;
2248
2249 if (the_controller) {
2250 dev_warn(dev, "ignoring\n");
2251 return ERR_PTR(-EBUSY);
2252 }
2253
2254 if (!irq) {
2255 dev_dbg(dev, "No IRQ!\n");
2256 return ERR_PTR(-ENODEV);
2257 }
2258
2259 /* alloc, and start init */
2260 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
2261 if (!ret)
2262 return ERR_PTR(-ENOMEM);
2263
2264 spin_lock_init(&ret->lock);
2265 ret->irq = irq;
2266 ret->dev = dev;
2267 ret->gadget.ops = &net2272_ops;
2268 ret->gadget.is_dualspeed = 1;
2269
2270 /* the "gadget" abstracts/virtualizes the controller */
2271 dev_set_name(&ret->gadget.dev, "gadget");
2272 ret->gadget.dev.parent = dev;
2273 ret->gadget.dev.dma_mask = dev->dma_mask;
2274 ret->gadget.dev.release = net2272_gadget_release;
2275 ret->gadget.name = driver_name;
2276
2277 return ret;
2278 }
2279
2280 static int __devinit
2281 net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
2282 {
2283 int ret;
2284
2285 /* See if there... */
2286 if (net2272_present(dev)) {
2287 dev_warn(dev->dev, "2272 not found!\n");
2288 ret = -ENODEV;
2289 goto err;
2290 }
2291
2292 net2272_usb_reset(dev);
2293 net2272_usb_reinit(dev);
2294
2295 ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev);
2296 if (ret) {
2297 dev_err(dev->dev, "request interrupt %i failed\n", dev->irq);
2298 goto err;
2299 }
2300
2301 dev->chiprev = net2272_read(dev, CHIPREV_2272);
2302
2303 /* done */
2304 dev_info(dev->dev, "%s\n", driver_desc);
2305 dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n",
2306 dev->irq, dev->base_addr, dev->chiprev,
2307 dma_mode_string());
2308 dev_info(dev->dev, "version: %s\n", driver_vers);
2309
2310 the_controller = dev;
2311
2312 ret = device_register(&dev->gadget.dev);
2313 if (ret)
2314 goto err_irq;
2315 ret = device_create_file(dev->dev, &dev_attr_registers);
2316 if (ret)
2317 goto err_dev_reg;
2318
2319 ret = usb_add_gadget_udc(dev->dev, &dev->gadget);
2320 if (ret)
2321 goto err_add_udc;
2322
2323 return 0;
2324
2325 err_add_udc:
2326 device_remove_file(dev->dev, &dev_attr_registers);
2327 err_dev_reg:
2328 device_unregister(&dev->gadget.dev);
2329 err_irq:
2330 free_irq(dev->irq, dev);
2331 err:
2332 return ret;
2333 }
2334
2335 #ifdef CONFIG_PCI
2336
2337 /*
2338 * wrap this driver around the specified device, but
2339 * don't respond over USB until a gadget driver binds to us
2340 */
2341
2342 static int __devinit
2343 net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
2344 {
2345 unsigned long resource, len, tmp;
2346 void __iomem *mem_mapped_addr[4];
2347 int ret, i;
2348
2349 /*
2350 * BAR 0 holds PLX 9054 config registers
2351 * BAR 1 is i/o memory; unused here
2352 * BAR 2 holds EPLD config registers
2353 * BAR 3 holds NET2272 registers
2354 */
2355
2356 /* Find and map all address spaces */
2357 for (i = 0; i < 4; ++i) {
2358 if (i == 1)
2359 continue; /* BAR1 unused */
2360
2361 resource = pci_resource_start(pdev, i);
2362 len = pci_resource_len(pdev, i);
2363
2364 if (!request_mem_region(resource, len, driver_name)) {
2365 dev_dbg(dev->dev, "controller already in use\n");
2366 ret = -EBUSY;
2367 goto err;
2368 }
2369
2370 mem_mapped_addr[i] = ioremap_nocache(resource, len);
2371 if (mem_mapped_addr[i] == NULL) {
2372 release_mem_region(resource, len);
2373 dev_dbg(dev->dev, "can't map memory\n");
2374 ret = -EFAULT;
2375 goto err;
2376 }
2377 }
2378
2379 dev->rdk1.plx9054_base_addr = mem_mapped_addr[0];
2380 dev->rdk1.epld_base_addr = mem_mapped_addr[2];
2381 dev->base_addr = mem_mapped_addr[3];
2382
2383 /* Set PLX 9054 bus width (16 bits) */
2384 tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1);
2385 writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT,
2386 dev->rdk1.plx9054_base_addr + LBRD1);
2387
2388 /* Enable PLX 9054 Interrupts */
2389 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) |
2390 (1 << PCI_INTERRUPT_ENABLE) |
2391 (1 << LOCAL_INTERRUPT_INPUT_ENABLE),
2392 dev->rdk1.plx9054_base_addr + INTCSR);
2393
2394 writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2395 dev->rdk1.plx9054_base_addr + DMACSR0);
2396
2397 /* reset */
2398 writeb((1 << EPLD_DMA_ENABLE) |
2399 (1 << DMA_CTL_DACK) |
2400 (1 << DMA_TIMEOUT_ENABLE) |
2401 (1 << USER) |
2402 (0 << MPX_MODE) |
2403 (1 << BUSWIDTH) |
2404 (1 << NET2272_RESET),
2405 dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2406
2407 mb();
2408 writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) &
2409 ~(1 << NET2272_RESET),
2410 dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2411 udelay(200);
2412
2413 return 0;
2414
2415 err:
2416 while (--i >= 0) {
2417 iounmap(mem_mapped_addr[i]);
2418 release_mem_region(pci_resource_start(pdev, i),
2419 pci_resource_len(pdev, i));
2420 }
2421
2422 return ret;
2423 }
2424
2425 static int __devinit
2426 net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
2427 {
2428 unsigned long resource, len;
2429 void __iomem *mem_mapped_addr[2];
2430 int ret, i;
2431
2432 /*
2433 * BAR 0 holds FGPA config registers
2434 * BAR 1 holds NET2272 registers
2435 */
2436
2437 /* Find and map all address spaces, bar2-3 unused in rdk 2 */
2438 for (i = 0; i < 2; ++i) {
2439 resource = pci_resource_start(pdev, i);
2440 len = pci_resource_len(pdev, i);
2441
2442 if (!request_mem_region(resource, len, driver_name)) {
2443 dev_dbg(dev->dev, "controller already in use\n");
2444 ret = -EBUSY;
2445 goto err;
2446 }
2447
2448 mem_mapped_addr[i] = ioremap_nocache(resource, len);
2449 if (mem_mapped_addr[i] == NULL) {
2450 release_mem_region(resource, len);
2451 dev_dbg(dev->dev, "can't map memory\n");
2452 ret = -EFAULT;
2453 goto err;
2454 }
2455 }
2456
2457 dev->rdk2.fpga_base_addr = mem_mapped_addr[0];
2458 dev->base_addr = mem_mapped_addr[1];
2459
2460 mb();
2461 /* Set 2272 bus width (16 bits) and reset */
2462 writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2463 udelay(200);
2464 writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2465 /* Print fpga version number */
2466 dev_info(dev->dev, "RDK2 FPGA version %08x\n",
2467 readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV));
2468 /* Enable FPGA Interrupts */
2469 writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB);
2470
2471 return 0;
2472
2473 err:
2474 while (--i >= 0) {
2475 iounmap(mem_mapped_addr[i]);
2476 release_mem_region(pci_resource_start(pdev, i),
2477 pci_resource_len(pdev, i));
2478 }
2479
2480 return ret;
2481 }
2482
2483 static int __devinit
2484 net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2485 {
2486 struct net2272 *dev;
2487 int ret;
2488
2489 dev = net2272_probe_init(&pdev->dev, pdev->irq);
2490 if (IS_ERR(dev))
2491 return PTR_ERR(dev);
2492 dev->dev_id = pdev->device;
2493
2494 if (pci_enable_device(pdev) < 0) {
2495 ret = -ENODEV;
2496 goto err_free;
2497 }
2498
2499 pci_set_master(pdev);
2500
2501 switch (pdev->device) {
2502 case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break;
2503 case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break;
2504 default: BUG();
2505 }
2506 if (ret)
2507 goto err_pci;
2508
2509 ret = net2272_probe_fin(dev, 0);
2510 if (ret)
2511 goto err_pci;
2512
2513 pci_set_drvdata(pdev, dev);
2514
2515 return 0;
2516
2517 err_pci:
2518 pci_disable_device(pdev);
2519 err_free:
2520 kfree(dev);
2521
2522 return ret;
2523 }
2524
2525 static void __devexit
2526 net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev)
2527 {
2528 int i;
2529
2530 /* disable PLX 9054 interrupts */
2531 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2532 ~(1 << PCI_INTERRUPT_ENABLE),
2533 dev->rdk1.plx9054_base_addr + INTCSR);
2534
2535 /* clean up resources allocated during probe() */
2536 iounmap(dev->rdk1.plx9054_base_addr);
2537 iounmap(dev->rdk1.epld_base_addr);
2538
2539 for (i = 0; i < 4; ++i) {
2540 if (i == 1)
2541 continue; /* BAR1 unused */
2542 release_mem_region(pci_resource_start(pdev, i),
2543 pci_resource_len(pdev, i));
2544 }
2545 }
2546
2547 static void __devexit
2548 net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev)
2549 {
2550 int i;
2551
2552 /* disable fpga interrupts
2553 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2554 ~(1 << PCI_INTERRUPT_ENABLE),
2555 dev->rdk1.plx9054_base_addr + INTCSR);
2556 */
2557
2558 /* clean up resources allocated during probe() */
2559 iounmap(dev->rdk2.fpga_base_addr);
2560
2561 for (i = 0; i < 2; ++i)
2562 release_mem_region(pci_resource_start(pdev, i),
2563 pci_resource_len(pdev, i));
2564 }
2565
2566 static void __devexit
2567 net2272_pci_remove(struct pci_dev *pdev)
2568 {
2569 struct net2272 *dev = pci_get_drvdata(pdev);
2570
2571 net2272_remove(dev);
2572
2573 switch (pdev->device) {
2574 case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break;
2575 case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break;
2576 default: BUG();
2577 }
2578
2579 pci_disable_device(pdev);
2580
2581 kfree(dev);
2582 }
2583
2584 /* Table of matching PCI IDs */
2585 static struct pci_device_id __devinitdata pci_ids[] = {
2586 { /* RDK 1 card */
2587 .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2588 .class_mask = 0,
2589 .vendor = PCI_VENDOR_ID_PLX,
2590 .device = PCI_DEVICE_ID_RDK1,
2591 .subvendor = PCI_ANY_ID,
2592 .subdevice = PCI_ANY_ID,
2593 },
2594 { /* RDK 2 card */
2595 .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2596 .class_mask = 0,
2597 .vendor = PCI_VENDOR_ID_PLX,
2598 .device = PCI_DEVICE_ID_RDK2,
2599 .subvendor = PCI_ANY_ID,
2600 .subdevice = PCI_ANY_ID,
2601 },
2602 { }
2603 };
2604 MODULE_DEVICE_TABLE(pci, pci_ids);
2605
2606 static struct pci_driver net2272_pci_driver = {
2607 .name = driver_name,
2608 .id_table = pci_ids,
2609
2610 .probe = net2272_pci_probe,
2611 .remove = __devexit_p(net2272_pci_remove),
2612 };
2613
2614 static int net2272_pci_register(void)
2615 {
2616 return pci_register_driver(&net2272_pci_driver);
2617 }
2618
2619 static void net2272_pci_unregister(void)
2620 {
2621 pci_unregister_driver(&net2272_pci_driver);
2622 }
2623
2624 #else
2625 static inline int net2272_pci_register(void) { return 0; }
2626 static inline void net2272_pci_unregister(void) { }
2627 #endif
2628
2629 /*---------------------------------------------------------------------------*/
2630
2631 static int __devinit
2632 net2272_plat_probe(struct platform_device *pdev)
2633 {
2634 struct net2272 *dev;
2635 int ret;
2636 unsigned int irqflags;
2637 resource_size_t base, len;
2638 struct resource *iomem, *iomem_bus, *irq_res;
2639
2640 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2641 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2642 iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0);
2643 if (!irq_res || !iomem) {
2644 dev_err(&pdev->dev, "must provide irq/base addr");
2645 return -EINVAL;
2646 }
2647
2648 dev = net2272_probe_init(&pdev->dev, irq_res->start);
2649 if (IS_ERR(dev))
2650 return PTR_ERR(dev);
2651
2652 irqflags = 0;
2653 if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2654 irqflags |= IRQF_TRIGGER_RISING;
2655 if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2656 irqflags |= IRQF_TRIGGER_FALLING;
2657 if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2658 irqflags |= IRQF_TRIGGER_HIGH;
2659 if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2660 irqflags |= IRQF_TRIGGER_LOW;
2661
2662 base = iomem->start;
2663 len = resource_size(iomem);
2664 if (iomem_bus)
2665 dev->base_shift = iomem_bus->start;
2666
2667 if (!request_mem_region(base, len, driver_name)) {
2668 dev_dbg(dev->dev, "get request memory region!\n");
2669 ret = -EBUSY;
2670 goto err;
2671 }
2672 dev->base_addr = ioremap_nocache(base, len);
2673 if (!dev->base_addr) {
2674 dev_dbg(dev->dev, "can't map memory\n");
2675 ret = -EFAULT;
2676 goto err_req;
2677 }
2678
2679 ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
2680 if (ret)
2681 goto err_io;
2682
2683 platform_set_drvdata(pdev, dev);
2684 dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
2685 (net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
2686
2687 the_controller = dev;
2688
2689 return 0;
2690
2691 err_io:
2692 iounmap(dev->base_addr);
2693 err_req:
2694 release_mem_region(base, len);
2695 err:
2696 return ret;
2697 }
2698
2699 static int __devexit
2700 net2272_plat_remove(struct platform_device *pdev)
2701 {
2702 struct net2272 *dev = platform_get_drvdata(pdev);
2703
2704 net2272_remove(dev);
2705
2706 release_mem_region(pdev->resource[0].start,
2707 resource_size(&pdev->resource[0]));
2708
2709 kfree(dev);
2710
2711 return 0;
2712 }
2713
2714 static struct platform_driver net2272_plat_driver = {
2715 .probe = net2272_plat_probe,
2716 .remove = __devexit_p(net2272_plat_remove),
2717 .driver = {
2718 .name = driver_name,
2719 .owner = THIS_MODULE,
2720 },
2721 /* FIXME .suspend, .resume */
2722 };
2723 MODULE_ALIAS("platform:net2272");
2724
2725 static int __init net2272_init(void)
2726 {
2727 int ret;
2728
2729 ret = net2272_pci_register();
2730 if (ret)
2731 return ret;
2732 ret = platform_driver_register(&net2272_plat_driver);
2733 if (ret)
2734 goto err_pci;
2735 return ret;
2736
2737 err_pci:
2738 net2272_pci_unregister();
2739 return ret;
2740 }
2741 module_init(net2272_init);
2742
2743 static void __exit net2272_cleanup(void)
2744 {
2745 net2272_pci_unregister();
2746 platform_driver_unregister(&net2272_plat_driver);
2747 }
2748 module_exit(net2272_cleanup);
2749
2750 MODULE_DESCRIPTION(DRIVER_DESC);
2751 MODULE_AUTHOR("PLX Technology, Inc.");
2752 MODULE_LICENSE("GPL");
This page took 0.088252 seconds and 5 git commands to generate.