Merge branch 'release' of git://lm-sensors.org/kernel/mhoffman/hwmon-2.6
[deliverable/linux.git] / drivers / ieee1394 / ohci1394.c
1 /*
2 * ohci1394.c - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Gord Peters <GordPeters@smarttech.com>
5 * 2001 Ben Collins <bcollins@debian.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22 /*
23 * Things known to be working:
24 * . Async Request Transmit
25 * . Async Response Receive
26 * . Async Request Receive
27 * . Async Response Transmit
28 * . Iso Receive
29 * . DMA mmap for iso receive
30 * . Config ROM generation
31 *
32 * Things implemented, but still in test phase:
33 * . Iso Transmit
34 * . Async Stream Packets Transmit (Receive done via Iso interface)
35 *
36 * Things not implemented:
37 * . DMA error recovery
38 *
39 * Known bugs:
40 * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41 * added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
42 */
43
44 /*
45 * Acknowledgments:
46 *
47 * Adam J Richter <adam@yggdrasil.com>
48 * . Use of pci_class to find device
49 *
50 * Emilie Chung <emilie.chung@axis.com>
51 * . Tip on Async Request Filter
52 *
53 * Pascal Drolet <pascal.drolet@informission.ca>
54 * . Various tips for optimization and functionnalities
55 *
56 * Robert Ficklin <rficklin@westengineering.com>
57 * . Loop in irq_handler
58 *
59 * James Goodwin <jamesg@Filanet.com>
60 * . Various tips on initialization, self-id reception, etc.
61 *
62 * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63 * . Apple PowerBook detection
64 *
65 * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66 * . Reset the board properly before leaving + misc cleanups
67 *
68 * Leon van Stuivenberg <leonvs@iae.nl>
69 * . Bug fixes
70 *
71 * Ben Collins <bcollins@debian.org>
72 * . Working big-endian support
73 * . Updated to 2.4.x module scheme (PCI aswell)
74 * . Config ROM generation
75 *
76 * Manfred Weihs <weihs@ict.tuwien.ac.at>
77 * . Reworked code for initiating bus resets
78 * (long, short, with or without hold-off)
79 *
80 * Nandu Santhi <contactnandu@users.sourceforge.net>
81 * . Added support for nVidia nForce2 onboard Firewire chipset
82 *
83 */
84
85 #include <linux/kernel.h>
86 #include <linux/list.h>
87 #include <linux/slab.h>
88 #include <linux/interrupt.h>
89 #include <linux/wait.h>
90 #include <linux/errno.h>
91 #include <linux/module.h>
92 #include <linux/moduleparam.h>
93 #include <linux/pci.h>
94 #include <linux/fs.h>
95 #include <linux/poll.h>
96 #include <asm/byteorder.h>
97 #include <asm/atomic.h>
98 #include <asm/uaccess.h>
99 #include <linux/delay.h>
100 #include <linux/spinlock.h>
101
102 #include <asm/pgtable.h>
103 #include <asm/page.h>
104 #include <asm/irq.h>
105 #include <linux/types.h>
106 #include <linux/vmalloc.h>
107 #include <linux/init.h>
108
109 #ifdef CONFIG_PPC_PMAC
110 #include <asm/machdep.h>
111 #include <asm/pmac_feature.h>
112 #include <asm/prom.h>
113 #include <asm/pci-bridge.h>
114 #endif
115
116 #include "csr1212.h"
117 #include "ieee1394.h"
118 #include "ieee1394_types.h"
119 #include "hosts.h"
120 #include "dma.h"
121 #include "iso.h"
122 #include "ieee1394_core.h"
123 #include "highlevel.h"
124 #include "ohci1394.h"
125
126 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
127 #define OHCI1394_DEBUG
128 #endif
129
130 #ifdef DBGMSG
131 #undef DBGMSG
132 #endif
133
134 #ifdef OHCI1394_DEBUG
135 #define DBGMSG(fmt, args...) \
136 printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
137 #else
138 #define DBGMSG(fmt, args...) do {} while (0)
139 #endif
140
141 /* print general (card independent) information */
142 #define PRINT_G(level, fmt, args...) \
143 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
144
145 /* print card specific information */
146 #define PRINT(level, fmt, args...) \
147 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
148
149 /* Module Parameters */
150 static int phys_dma = 1;
151 module_param(phys_dma, int, 0444);
152 MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
153
154 static void dma_trm_tasklet(unsigned long data);
155 static void dma_trm_reset(struct dma_trm_ctx *d);
156
157 static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
158 enum context_type type, int ctx, int num_desc,
159 int buf_size, int split_buf_size, int context_base);
160 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
161
162 static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
163 enum context_type type, int ctx, int num_desc,
164 int context_base);
165
166 static void ohci1394_pci_remove(struct pci_dev *pdev);
167
168 #ifndef __LITTLE_ENDIAN
169 static const size_t hdr_sizes[] = {
170 3, /* TCODE_WRITEQ */
171 4, /* TCODE_WRITEB */
172 3, /* TCODE_WRITE_RESPONSE */
173 0, /* reserved */
174 3, /* TCODE_READQ */
175 4, /* TCODE_READB */
176 3, /* TCODE_READQ_RESPONSE */
177 4, /* TCODE_READB_RESPONSE */
178 1, /* TCODE_CYCLE_START */
179 4, /* TCODE_LOCK_REQUEST */
180 2, /* TCODE_ISO_DATA */
181 4, /* TCODE_LOCK_RESPONSE */
182 /* rest is reserved or link-internal */
183 };
184
185 static inline void header_le32_to_cpu(quadlet_t *data, unsigned char tcode)
186 {
187 size_t size;
188
189 if (unlikely(tcode >= ARRAY_SIZE(hdr_sizes)))
190 return;
191
192 size = hdr_sizes[tcode];
193 while (size--)
194 data[size] = le32_to_cpu(data[size]);
195 }
196 #else
197 #define header_le32_to_cpu(w,x) do {} while (0)
198 #endif /* !LITTLE_ENDIAN */
199
200 /***********************************
201 * IEEE-1394 functionality section *
202 ***********************************/
203
204 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
205 {
206 int i;
207 unsigned long flags;
208 quadlet_t r;
209
210 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
211
212 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
213
214 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
215 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
216 break;
217
218 mdelay(1);
219 }
220
221 r = reg_read(ohci, OHCI1394_PhyControl);
222
223 if (i >= OHCI_LOOP_COUNT)
224 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
225 r, r & 0x80000000, i);
226
227 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
228
229 return (r & 0x00ff0000) >> 16;
230 }
231
232 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
233 {
234 int i;
235 unsigned long flags;
236 u32 r = 0;
237
238 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
239
240 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
241
242 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
243 r = reg_read(ohci, OHCI1394_PhyControl);
244 if (!(r & 0x00004000))
245 break;
246
247 mdelay(1);
248 }
249
250 if (i == OHCI_LOOP_COUNT)
251 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
252 r, r & 0x00004000, i);
253
254 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
255
256 return;
257 }
258
259 /* Or's our value into the current value */
260 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
261 {
262 u8 old;
263
264 old = get_phy_reg (ohci, addr);
265 old |= data;
266 set_phy_reg (ohci, addr, old);
267
268 return;
269 }
270
271 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
272 int phyid, int isroot)
273 {
274 quadlet_t *q = ohci->selfid_buf_cpu;
275 quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
276 size_t size;
277 quadlet_t q0, q1;
278
279 /* Check status of self-id reception */
280
281 if (ohci->selfid_swap)
282 q0 = le32_to_cpu(q[0]);
283 else
284 q0 = q[0];
285
286 if ((self_id_count & 0x80000000) ||
287 ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
288 PRINT(KERN_ERR,
289 "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
290 self_id_count, q0, ohci->self_id_errors);
291
292 /* Tip by James Goodwin <jamesg@Filanet.com>:
293 * We had an error, generate another bus reset in response. */
294 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
295 set_phy_reg_mask (ohci, 1, 0x40);
296 ohci->self_id_errors++;
297 } else {
298 PRINT(KERN_ERR,
299 "Too many errors on SelfID error reception, giving up!");
300 }
301 return;
302 }
303
304 /* SelfID Ok, reset error counter. */
305 ohci->self_id_errors = 0;
306
307 size = ((self_id_count & 0x00001FFC) >> 2) - 1;
308 q++;
309
310 while (size > 0) {
311 if (ohci->selfid_swap) {
312 q0 = le32_to_cpu(q[0]);
313 q1 = le32_to_cpu(q[1]);
314 } else {
315 q0 = q[0];
316 q1 = q[1];
317 }
318
319 if (q0 == ~q1) {
320 DBGMSG ("SelfID packet 0x%x received", q0);
321 hpsb_selfid_received(host, cpu_to_be32(q0));
322 if (((q0 & 0x3f000000) >> 24) == phyid)
323 DBGMSG ("SelfID for this node is 0x%08x", q0);
324 } else {
325 PRINT(KERN_ERR,
326 "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
327 }
328 q += 2;
329 size -= 2;
330 }
331
332 DBGMSG("SelfID complete");
333
334 return;
335 }
336
337 static void ohci_soft_reset(struct ti_ohci *ohci) {
338 int i;
339
340 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
341
342 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
343 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
344 break;
345 mdelay(1);
346 }
347 DBGMSG ("Soft reset finished");
348 }
349
350
351 /* Generate the dma receive prgs and start the context */
352 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
353 {
354 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
355 int i;
356
357 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
358
359 for (i=0; i<d->num_desc; i++) {
360 u32 c;
361
362 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
363 if (generate_irq)
364 c |= DMA_CTL_IRQ;
365
366 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
367
368 /* End of descriptor list? */
369 if (i + 1 < d->num_desc) {
370 d->prg_cpu[i]->branchAddress =
371 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
372 } else {
373 d->prg_cpu[i]->branchAddress =
374 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
375 }
376
377 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
378 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
379 }
380
381 d->buf_ind = 0;
382 d->buf_offset = 0;
383
384 if (d->type == DMA_CTX_ISO) {
385 /* Clear contextControl */
386 reg_write(ohci, d->ctrlClear, 0xffffffff);
387
388 /* Set bufferFill, isochHeader, multichannel for IR context */
389 reg_write(ohci, d->ctrlSet, 0xd0000000);
390
391 /* Set the context match register to match on all tags */
392 reg_write(ohci, d->ctxtMatch, 0xf0000000);
393
394 /* Clear the multi channel mask high and low registers */
395 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
396 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
397
398 /* Set up isoRecvIntMask to generate interrupts */
399 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
400 }
401
402 /* Tell the controller where the first AR program is */
403 reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
404
405 /* Run context */
406 reg_write(ohci, d->ctrlSet, 0x00008000);
407
408 DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
409 }
410
411 /* Initialize the dma transmit context */
412 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
413 {
414 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
415
416 /* Stop the context */
417 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
418
419 d->prg_ind = 0;
420 d->sent_ind = 0;
421 d->free_prgs = d->num_desc;
422 d->branchAddrPtr = NULL;
423 INIT_LIST_HEAD(&d->fifo_list);
424 INIT_LIST_HEAD(&d->pending_list);
425
426 if (d->type == DMA_CTX_ISO) {
427 /* enable interrupts */
428 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
429 }
430
431 DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
432 }
433
434 /* Count the number of available iso contexts */
435 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
436 {
437 int i,ctx=0;
438 u32 tmp;
439
440 reg_write(ohci, reg, 0xffffffff);
441 tmp = reg_read(ohci, reg);
442
443 DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
444
445 /* Count the number of contexts */
446 for (i=0; i<32; i++) {
447 if (tmp & 1) ctx++;
448 tmp >>= 1;
449 }
450 return ctx;
451 }
452
453 /* Global initialization */
454 static void ohci_initialize(struct ti_ohci *ohci)
455 {
456 quadlet_t buf;
457 int num_ports, i;
458
459 spin_lock_init(&ohci->phy_reg_lock);
460
461 /* Put some defaults to these undefined bus options */
462 buf = reg_read(ohci, OHCI1394_BusOptions);
463 buf |= 0x60000000; /* Enable CMC and ISC */
464 if (hpsb_disable_irm)
465 buf &= ~0x80000000;
466 else
467 buf |= 0x80000000; /* Enable IRMC */
468 buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
469 buf &= ~0x18000000; /* Disable PMC and BMC */
470 reg_write(ohci, OHCI1394_BusOptions, buf);
471
472 /* Set the bus number */
473 reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
474
475 /* Enable posted writes */
476 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
477
478 /* Clear link control register */
479 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
480
481 /* Enable cycle timer and cycle master and set the IRM
482 * contender bit in our self ID packets if appropriate. */
483 reg_write(ohci, OHCI1394_LinkControlSet,
484 OHCI1394_LinkControl_CycleTimerEnable |
485 OHCI1394_LinkControl_CycleMaster);
486 i = get_phy_reg(ohci, 4) | PHY_04_LCTRL;
487 if (hpsb_disable_irm)
488 i &= ~PHY_04_CONTENDER;
489 else
490 i |= PHY_04_CONTENDER;
491 set_phy_reg(ohci, 4, i);
492
493 /* Set up self-id dma buffer */
494 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
495
496 /* enable self-id */
497 reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID);
498
499 /* Set the Config ROM mapping register */
500 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
501
502 /* Now get our max packet size */
503 ohci->max_packet_size =
504 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
505
506 /* Clear the interrupt mask */
507 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
508 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
509
510 /* Clear the interrupt mask */
511 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
512 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
513
514 /* Initialize AR dma */
515 initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
516 initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
517
518 /* Initialize AT dma */
519 initialize_dma_trm_ctx(&ohci->at_req_context);
520 initialize_dma_trm_ctx(&ohci->at_resp_context);
521
522 /* Accept AR requests from all nodes */
523 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
524
525 /* Set the address range of the physical response unit.
526 * Most controllers do not implement it as a writable register though.
527 * They will keep a hardwired offset of 0x00010000 and show 0x0 as
528 * register content.
529 * To actually enable physical responses is the job of our interrupt
530 * handler which programs the physical request filter. */
531 reg_write(ohci, OHCI1394_PhyUpperBound,
532 OHCI1394_PHYS_UPPER_BOUND_PROGRAMMED >> 16);
533
534 DBGMSG("physUpperBoundOffset=%08x",
535 reg_read(ohci, OHCI1394_PhyUpperBound));
536
537 /* Specify AT retries */
538 reg_write(ohci, OHCI1394_ATRetries,
539 OHCI1394_MAX_AT_REQ_RETRIES |
540 (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
541 (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
542
543 /* We don't want hardware swapping */
544 reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
545
546 /* Enable interrupts */
547 reg_write(ohci, OHCI1394_IntMaskSet,
548 OHCI1394_unrecoverableError |
549 OHCI1394_masterIntEnable |
550 OHCI1394_busReset |
551 OHCI1394_selfIDComplete |
552 OHCI1394_RSPkt |
553 OHCI1394_RQPkt |
554 OHCI1394_respTxComplete |
555 OHCI1394_reqTxComplete |
556 OHCI1394_isochRx |
557 OHCI1394_isochTx |
558 OHCI1394_postedWriteErr |
559 OHCI1394_cycleTooLong |
560 OHCI1394_cycleInconsistent);
561
562 /* Enable link */
563 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
564
565 buf = reg_read(ohci, OHCI1394_Version);
566 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%d] "
567 "MMIO=[%llx-%llx] Max Packet=[%d] IR/IT contexts=[%d/%d]",
568 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
569 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), ohci->dev->irq,
570 (unsigned long long)pci_resource_start(ohci->dev, 0),
571 (unsigned long long)pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
572 ohci->max_packet_size,
573 ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx);
574
575 /* Check all of our ports to make sure that if anything is
576 * connected, we enable that port. */
577 num_ports = get_phy_reg(ohci, 2) & 0xf;
578 for (i = 0; i < num_ports; i++) {
579 unsigned int status;
580
581 set_phy_reg(ohci, 7, i);
582 status = get_phy_reg(ohci, 8);
583
584 if (status & 0x20)
585 set_phy_reg(ohci, 8, status & ~1);
586 }
587
588 /* Serial EEPROM Sanity check. */
589 if ((ohci->max_packet_size < 512) ||
590 (ohci->max_packet_size > 4096)) {
591 /* Serial EEPROM contents are suspect, set a sane max packet
592 * size and print the raw contents for bug reports if verbose
593 * debug is enabled. */
594 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
595 int i;
596 #endif
597
598 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
599 "attempting to set max_packet_size to 512 bytes");
600 reg_write(ohci, OHCI1394_BusOptions,
601 (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
602 ohci->max_packet_size = 512;
603 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
604 PRINT(KERN_DEBUG, " EEPROM Present: %d",
605 (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
606 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
607
608 for (i = 0;
609 ((i < 1000) &&
610 (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
611 udelay(10);
612
613 for (i = 0; i < 0x20; i++) {
614 reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
615 PRINT(KERN_DEBUG, " EEPROM %02x: %02x", i,
616 (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
617 }
618 #endif
619 }
620 }
621
622 /*
623 * Insert a packet in the DMA fifo and generate the DMA prg
624 * FIXME: rewrite the program in order to accept packets crossing
625 * page boundaries.
626 * check also that a single dma descriptor doesn't cross a
627 * page boundary.
628 */
629 static void insert_packet(struct ti_ohci *ohci,
630 struct dma_trm_ctx *d, struct hpsb_packet *packet)
631 {
632 u32 cycleTimer;
633 int idx = d->prg_ind;
634
635 DBGMSG("Inserting packet for node " NODE_BUS_FMT
636 ", tlabel=%d, tcode=0x%x, speed=%d",
637 NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
638 packet->tcode, packet->speed_code);
639
640 d->prg_cpu[idx]->begin.address = 0;
641 d->prg_cpu[idx]->begin.branchAddress = 0;
642
643 if (d->type == DMA_CTX_ASYNC_RESP) {
644 /*
645 * For response packets, we need to put a timeout value in
646 * the 16 lower bits of the status... let's try 1 sec timeout
647 */
648 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
649 d->prg_cpu[idx]->begin.status = cpu_to_le32(
650 (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
651 ((cycleTimer&0x01fff000)>>12));
652
653 DBGMSG("cycleTimer: %08x timeStamp: %08x",
654 cycleTimer, d->prg_cpu[idx]->begin.status);
655 } else
656 d->prg_cpu[idx]->begin.status = 0;
657
658 if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
659
660 if (packet->type == hpsb_raw) {
661 d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
662 d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
663 d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
664 } else {
665 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
666 (packet->header[0] & 0xFFFF);
667
668 if (packet->tcode == TCODE_ISO_DATA) {
669 /* Sending an async stream packet */
670 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
671 } else {
672 /* Sending a normal async request or response */
673 d->prg_cpu[idx]->data[1] =
674 (packet->header[1] & 0xFFFF) |
675 (packet->header[0] & 0xFFFF0000);
676 d->prg_cpu[idx]->data[2] = packet->header[2];
677 d->prg_cpu[idx]->data[3] = packet->header[3];
678 }
679 header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
680 }
681
682 if (packet->data_size) { /* block transmit */
683 if (packet->tcode == TCODE_STREAM_DATA){
684 d->prg_cpu[idx]->begin.control =
685 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
686 DMA_CTL_IMMEDIATE | 0x8);
687 } else {
688 d->prg_cpu[idx]->begin.control =
689 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
690 DMA_CTL_IMMEDIATE | 0x10);
691 }
692 d->prg_cpu[idx]->end.control =
693 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
694 DMA_CTL_IRQ |
695 DMA_CTL_BRANCH |
696 packet->data_size);
697 /*
698 * Check that the packet data buffer
699 * does not cross a page boundary.
700 *
701 * XXX Fix this some day. eth1394 seems to trigger
702 * it, but ignoring it doesn't seem to cause a
703 * problem.
704 */
705 #if 0
706 if (cross_bound((unsigned long)packet->data,
707 packet->data_size)>0) {
708 /* FIXME: do something about it */
709 PRINT(KERN_ERR,
710 "%s: packet data addr: %p size %Zd bytes "
711 "cross page boundary", __func__,
712 packet->data, packet->data_size);
713 }
714 #endif
715 d->prg_cpu[idx]->end.address = cpu_to_le32(
716 pci_map_single(ohci->dev, packet->data,
717 packet->data_size,
718 PCI_DMA_TODEVICE));
719
720 d->prg_cpu[idx]->end.branchAddress = 0;
721 d->prg_cpu[idx]->end.status = 0;
722 if (d->branchAddrPtr)
723 *(d->branchAddrPtr) =
724 cpu_to_le32(d->prg_bus[idx] | 0x3);
725 d->branchAddrPtr =
726 &(d->prg_cpu[idx]->end.branchAddress);
727 } else { /* quadlet transmit */
728 if (packet->type == hpsb_raw)
729 d->prg_cpu[idx]->begin.control =
730 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
731 DMA_CTL_IMMEDIATE |
732 DMA_CTL_IRQ |
733 DMA_CTL_BRANCH |
734 (packet->header_size + 4));
735 else
736 d->prg_cpu[idx]->begin.control =
737 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
738 DMA_CTL_IMMEDIATE |
739 DMA_CTL_IRQ |
740 DMA_CTL_BRANCH |
741 packet->header_size);
742
743 if (d->branchAddrPtr)
744 *(d->branchAddrPtr) =
745 cpu_to_le32(d->prg_bus[idx] | 0x2);
746 d->branchAddrPtr =
747 &(d->prg_cpu[idx]->begin.branchAddress);
748 }
749
750 } else { /* iso packet */
751 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
752 (packet->header[0] & 0xFFFF);
753 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
754 header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
755
756 d->prg_cpu[idx]->begin.control =
757 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
758 DMA_CTL_IMMEDIATE | 0x8);
759 d->prg_cpu[idx]->end.control =
760 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
761 DMA_CTL_UPDATE |
762 DMA_CTL_IRQ |
763 DMA_CTL_BRANCH |
764 packet->data_size);
765 d->prg_cpu[idx]->end.address = cpu_to_le32(
766 pci_map_single(ohci->dev, packet->data,
767 packet->data_size, PCI_DMA_TODEVICE));
768
769 d->prg_cpu[idx]->end.branchAddress = 0;
770 d->prg_cpu[idx]->end.status = 0;
771 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
772 " begin=%08x %08x %08x %08x\n"
773 " %08x %08x %08x %08x\n"
774 " end =%08x %08x %08x %08x",
775 d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
776 d->prg_cpu[idx]->begin.control,
777 d->prg_cpu[idx]->begin.address,
778 d->prg_cpu[idx]->begin.branchAddress,
779 d->prg_cpu[idx]->begin.status,
780 d->prg_cpu[idx]->data[0],
781 d->prg_cpu[idx]->data[1],
782 d->prg_cpu[idx]->data[2],
783 d->prg_cpu[idx]->data[3],
784 d->prg_cpu[idx]->end.control,
785 d->prg_cpu[idx]->end.address,
786 d->prg_cpu[idx]->end.branchAddress,
787 d->prg_cpu[idx]->end.status);
788 if (d->branchAddrPtr)
789 *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
790 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
791 }
792 d->free_prgs--;
793
794 /* queue the packet in the appropriate context queue */
795 list_add_tail(&packet->driver_list, &d->fifo_list);
796 d->prg_ind = (d->prg_ind + 1) % d->num_desc;
797 }
798
799 /*
800 * This function fills the FIFO with the (eventual) pending packets
801 * and runs or wakes up the DMA prg if necessary.
802 *
803 * The function MUST be called with the d->lock held.
804 */
805 static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
806 {
807 struct hpsb_packet *packet, *ptmp;
808 int idx = d->prg_ind;
809 int z = 0;
810
811 /* insert the packets into the dma fifo */
812 list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
813 if (!d->free_prgs)
814 break;
815
816 /* For the first packet only */
817 if (!z)
818 z = (packet->data_size) ? 3 : 2;
819
820 /* Insert the packet */
821 list_del_init(&packet->driver_list);
822 insert_packet(ohci, d, packet);
823 }
824
825 /* Nothing must have been done, either no free_prgs or no packets */
826 if (z == 0)
827 return;
828
829 /* Is the context running ? (should be unless it is
830 the first packet to be sent in this context) */
831 if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
832 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
833
834 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
835 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
836
837 /* Check that the node id is valid, and not 63 */
838 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
839 PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
840 else
841 reg_write(ohci, d->ctrlSet, 0x8000);
842 } else {
843 /* Wake up the dma context if necessary */
844 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
845 DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
846
847 /* do this always, to avoid race condition */
848 reg_write(ohci, d->ctrlSet, 0x1000);
849 }
850
851 return;
852 }
853
854 /* Transmission of an async or iso packet */
855 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
856 {
857 struct ti_ohci *ohci = host->hostdata;
858 struct dma_trm_ctx *d;
859 unsigned long flags;
860
861 if (packet->data_size > ohci->max_packet_size) {
862 PRINT(KERN_ERR,
863 "Transmit packet size %Zd is too big",
864 packet->data_size);
865 return -EOVERFLOW;
866 }
867
868 if (packet->type == hpsb_raw)
869 d = &ohci->at_req_context;
870 else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
871 d = &ohci->at_resp_context;
872 else
873 d = &ohci->at_req_context;
874
875 spin_lock_irqsave(&d->lock,flags);
876
877 list_add_tail(&packet->driver_list, &d->pending_list);
878
879 dma_trm_flush(ohci, d);
880
881 spin_unlock_irqrestore(&d->lock,flags);
882
883 return 0;
884 }
885
886 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
887 {
888 struct ti_ohci *ohci = host->hostdata;
889 int retval = 0, phy_reg;
890
891 switch (cmd) {
892 case RESET_BUS:
893 switch (arg) {
894 case SHORT_RESET:
895 phy_reg = get_phy_reg(ohci, 5);
896 phy_reg |= 0x40;
897 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
898 break;
899 case LONG_RESET:
900 phy_reg = get_phy_reg(ohci, 1);
901 phy_reg |= 0x40;
902 set_phy_reg(ohci, 1, phy_reg); /* set IBR */
903 break;
904 case SHORT_RESET_NO_FORCE_ROOT:
905 phy_reg = get_phy_reg(ohci, 1);
906 if (phy_reg & 0x80) {
907 phy_reg &= ~0x80;
908 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
909 }
910
911 phy_reg = get_phy_reg(ohci, 5);
912 phy_reg |= 0x40;
913 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
914 break;
915 case LONG_RESET_NO_FORCE_ROOT:
916 phy_reg = get_phy_reg(ohci, 1);
917 phy_reg &= ~0x80;
918 phy_reg |= 0x40;
919 set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
920 break;
921 case SHORT_RESET_FORCE_ROOT:
922 phy_reg = get_phy_reg(ohci, 1);
923 if (!(phy_reg & 0x80)) {
924 phy_reg |= 0x80;
925 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
926 }
927
928 phy_reg = get_phy_reg(ohci, 5);
929 phy_reg |= 0x40;
930 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
931 break;
932 case LONG_RESET_FORCE_ROOT:
933 phy_reg = get_phy_reg(ohci, 1);
934 phy_reg |= 0xc0;
935 set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
936 break;
937 default:
938 retval = -1;
939 }
940 break;
941
942 case GET_CYCLE_COUNTER:
943 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
944 break;
945
946 case SET_CYCLE_COUNTER:
947 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
948 break;
949
950 case SET_BUS_ID:
951 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
952 break;
953
954 case ACT_CYCLE_MASTER:
955 if (arg) {
956 /* check if we are root and other nodes are present */
957 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
958 if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
959 /*
960 * enable cycleTimer, cycleMaster
961 */
962 DBGMSG("Cycle master enabled");
963 reg_write(ohci, OHCI1394_LinkControlSet,
964 OHCI1394_LinkControl_CycleTimerEnable |
965 OHCI1394_LinkControl_CycleMaster);
966 }
967 } else {
968 /* disable cycleTimer, cycleMaster, cycleSource */
969 reg_write(ohci, OHCI1394_LinkControlClear,
970 OHCI1394_LinkControl_CycleTimerEnable |
971 OHCI1394_LinkControl_CycleMaster |
972 OHCI1394_LinkControl_CycleSource);
973 }
974 break;
975
976 case CANCEL_REQUESTS:
977 DBGMSG("Cancel request received");
978 dma_trm_reset(&ohci->at_req_context);
979 dma_trm_reset(&ohci->at_resp_context);
980 break;
981
982 default:
983 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
984 cmd);
985 break;
986 }
987 return retval;
988 }
989
990 /***********************************
991 * rawiso ISO reception *
992 ***********************************/
993
994 /*
995 We use either buffer-fill or packet-per-buffer DMA mode. The DMA
996 buffer is split into "blocks" (regions described by one DMA
997 descriptor). Each block must be one page or less in size, and
998 must not cross a page boundary.
999
1000 There is one little wrinkle with buffer-fill mode: a packet that
1001 starts in the final block may wrap around into the first block. But
1002 the user API expects all packets to be contiguous. Our solution is
1003 to keep the very last page of the DMA buffer in reserve - if a
1004 packet spans the gap, we copy its tail into this page.
1005 */
1006
1007 struct ohci_iso_recv {
1008 struct ti_ohci *ohci;
1009
1010 struct ohci1394_iso_tasklet task;
1011 int task_active;
1012
1013 enum { BUFFER_FILL_MODE = 0,
1014 PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1015
1016 /* memory and PCI mapping for the DMA descriptors */
1017 struct dma_prog_region prog;
1018 struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1019
1020 /* how many DMA blocks fit in the buffer */
1021 unsigned int nblocks;
1022
1023 /* stride of DMA blocks */
1024 unsigned int buf_stride;
1025
1026 /* number of blocks to batch between interrupts */
1027 int block_irq_interval;
1028
1029 /* block that DMA will finish next */
1030 int block_dma;
1031
1032 /* (buffer-fill only) block that the reader will release next */
1033 int block_reader;
1034
1035 /* (buffer-fill only) bytes of buffer the reader has released,
1036 less than one block */
1037 int released_bytes;
1038
1039 /* (buffer-fill only) buffer offset at which the next packet will appear */
1040 int dma_offset;
1041
1042 /* OHCI DMA context control registers */
1043 u32 ContextControlSet;
1044 u32 ContextControlClear;
1045 u32 CommandPtr;
1046 u32 ContextMatch;
1047 };
1048
1049 static void ohci_iso_recv_task(unsigned long data);
1050 static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1051 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1052 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1053 static void ohci_iso_recv_program(struct hpsb_iso *iso);
1054
1055 static int ohci_iso_recv_init(struct hpsb_iso *iso)
1056 {
1057 struct ti_ohci *ohci = iso->host->hostdata;
1058 struct ohci_iso_recv *recv;
1059 int ctx;
1060 int ret = -ENOMEM;
1061
1062 recv = kmalloc(sizeof(*recv), GFP_KERNEL);
1063 if (!recv)
1064 return -ENOMEM;
1065
1066 iso->hostdata = recv;
1067 recv->ohci = ohci;
1068 recv->task_active = 0;
1069 dma_prog_region_init(&recv->prog);
1070 recv->block = NULL;
1071
1072 /* use buffer-fill mode, unless irq_interval is 1
1073 (note: multichannel requires buffer-fill) */
1074
1075 if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1076 iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1077 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1078 } else {
1079 recv->dma_mode = BUFFER_FILL_MODE;
1080 }
1081
1082 /* set nblocks, buf_stride, block_irq_interval */
1083
1084 if (recv->dma_mode == BUFFER_FILL_MODE) {
1085 recv->buf_stride = PAGE_SIZE;
1086
1087 /* one block per page of data in the DMA buffer, minus the final guard page */
1088 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1089 if (recv->nblocks < 3) {
1090 DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1091 goto err;
1092 }
1093
1094 /* iso->irq_interval is in packets - translate that to blocks */
1095 if (iso->irq_interval == 1)
1096 recv->block_irq_interval = 1;
1097 else
1098 recv->block_irq_interval = iso->irq_interval *
1099 ((recv->nblocks+1)/iso->buf_packets);
1100 if (recv->block_irq_interval*4 > recv->nblocks)
1101 recv->block_irq_interval = recv->nblocks/4;
1102 if (recv->block_irq_interval < 1)
1103 recv->block_irq_interval = 1;
1104
1105 } else {
1106 int max_packet_size;
1107
1108 recv->nblocks = iso->buf_packets;
1109 recv->block_irq_interval = iso->irq_interval;
1110 if (recv->block_irq_interval * 4 > iso->buf_packets)
1111 recv->block_irq_interval = iso->buf_packets / 4;
1112 if (recv->block_irq_interval < 1)
1113 recv->block_irq_interval = 1;
1114
1115 /* choose a buffer stride */
1116 /* must be a power of 2, and <= PAGE_SIZE */
1117
1118 max_packet_size = iso->buf_size / iso->buf_packets;
1119
1120 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1121 recv->buf_stride *= 2);
1122
1123 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1124 recv->buf_stride > PAGE_SIZE) {
1125 /* this shouldn't happen, but anyway... */
1126 DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1127 goto err;
1128 }
1129 }
1130
1131 recv->block_reader = 0;
1132 recv->released_bytes = 0;
1133 recv->block_dma = 0;
1134 recv->dma_offset = 0;
1135
1136 /* size of DMA program = one descriptor per block */
1137 if (dma_prog_region_alloc(&recv->prog,
1138 sizeof(struct dma_cmd) * recv->nblocks,
1139 recv->ohci->dev))
1140 goto err;
1141
1142 recv->block = (struct dma_cmd*) recv->prog.kvirt;
1143
1144 ohci1394_init_iso_tasklet(&recv->task,
1145 iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1146 OHCI_ISO_RECEIVE,
1147 ohci_iso_recv_task, (unsigned long) iso);
1148
1149 if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
1150 ret = -EBUSY;
1151 goto err;
1152 }
1153
1154 recv->task_active = 1;
1155
1156 /* recv context registers are spaced 32 bytes apart */
1157 ctx = recv->task.context;
1158 recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1159 recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1160 recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1161 recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1162
1163 if (iso->channel == -1) {
1164 /* clear multi-channel selection mask */
1165 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1166 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1167 }
1168
1169 /* write the DMA program */
1170 ohci_iso_recv_program(iso);
1171
1172 DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1173 " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1174 recv->dma_mode == BUFFER_FILL_MODE ?
1175 "buffer-fill" : "packet-per-buffer",
1176 iso->buf_size/PAGE_SIZE, iso->buf_size,
1177 recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1178
1179 return 0;
1180
1181 err:
1182 ohci_iso_recv_shutdown(iso);
1183 return ret;
1184 }
1185
1186 static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1187 {
1188 struct ohci_iso_recv *recv = iso->hostdata;
1189
1190 /* disable interrupts */
1191 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1192
1193 /* halt DMA */
1194 ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1195 }
1196
1197 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1198 {
1199 struct ohci_iso_recv *recv = iso->hostdata;
1200
1201 if (recv->task_active) {
1202 ohci_iso_recv_stop(iso);
1203 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1204 recv->task_active = 0;
1205 }
1206
1207 dma_prog_region_free(&recv->prog);
1208 kfree(recv);
1209 iso->hostdata = NULL;
1210 }
1211
1212 /* set up a "gapped" ring buffer DMA program */
1213 static void ohci_iso_recv_program(struct hpsb_iso *iso)
1214 {
1215 struct ohci_iso_recv *recv = iso->hostdata;
1216 int blk;
1217
1218 /* address of 'branch' field in previous DMA descriptor */
1219 u32 *prev_branch = NULL;
1220
1221 for (blk = 0; blk < recv->nblocks; blk++) {
1222 u32 control;
1223
1224 /* the DMA descriptor */
1225 struct dma_cmd *cmd = &recv->block[blk];
1226
1227 /* offset of the DMA descriptor relative to the DMA prog buffer */
1228 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1229
1230 /* offset of this packet's data within the DMA buffer */
1231 unsigned long buf_offset = blk * recv->buf_stride;
1232
1233 if (recv->dma_mode == BUFFER_FILL_MODE) {
1234 control = 2 << 28; /* INPUT_MORE */
1235 } else {
1236 control = 3 << 28; /* INPUT_LAST */
1237 }
1238
1239 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1240
1241 /* interrupt on last block, and at intervals */
1242 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1243 control |= 3 << 20; /* want interrupt */
1244 }
1245
1246 control |= 3 << 18; /* enable branch to address */
1247 control |= recv->buf_stride;
1248
1249 cmd->control = cpu_to_le32(control);
1250 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1251 cmd->branchAddress = 0; /* filled in on next loop */
1252 cmd->status = cpu_to_le32(recv->buf_stride);
1253
1254 /* link the previous descriptor to this one */
1255 if (prev_branch) {
1256 *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1257 }
1258
1259 prev_branch = &cmd->branchAddress;
1260 }
1261
1262 /* the final descriptor's branch address and Z should be left at 0 */
1263 }
1264
1265 /* listen or unlisten to a specific channel (multi-channel mode only) */
1266 static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1267 {
1268 struct ohci_iso_recv *recv = iso->hostdata;
1269 int reg, i;
1270
1271 if (channel < 32) {
1272 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1273 i = channel;
1274 } else {
1275 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1276 i = channel - 32;
1277 }
1278
1279 reg_write(recv->ohci, reg, (1 << i));
1280
1281 /* issue a dummy read to force all PCI writes to be posted immediately */
1282 mb();
1283 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1284 }
1285
1286 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1287 {
1288 struct ohci_iso_recv *recv = iso->hostdata;
1289 int i;
1290
1291 for (i = 0; i < 64; i++) {
1292 if (mask & (1ULL << i)) {
1293 if (i < 32)
1294 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1295 else
1296 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1297 } else {
1298 if (i < 32)
1299 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1300 else
1301 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1302 }
1303 }
1304
1305 /* issue a dummy read to force all PCI writes to be posted immediately */
1306 mb();
1307 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1308 }
1309
1310 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1311 {
1312 struct ohci_iso_recv *recv = iso->hostdata;
1313 struct ti_ohci *ohci = recv->ohci;
1314 u32 command, contextMatch;
1315
1316 reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1317 wmb();
1318
1319 /* always keep ISO headers */
1320 command = (1 << 30);
1321
1322 if (recv->dma_mode == BUFFER_FILL_MODE)
1323 command |= (1 << 31);
1324
1325 reg_write(recv->ohci, recv->ContextControlSet, command);
1326
1327 /* match on specified tags */
1328 contextMatch = tag_mask << 28;
1329
1330 if (iso->channel == -1) {
1331 /* enable multichannel reception */
1332 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1333 } else {
1334 /* listen on channel */
1335 contextMatch |= iso->channel;
1336 }
1337
1338 if (cycle != -1) {
1339 u32 seconds;
1340
1341 /* enable cycleMatch */
1342 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1343
1344 /* set starting cycle */
1345 cycle &= 0x1FFF;
1346
1347 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1348 just snarf them from the current time */
1349 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1350
1351 /* advance one second to give some extra time for DMA to start */
1352 seconds += 1;
1353
1354 cycle |= (seconds & 3) << 13;
1355
1356 contextMatch |= cycle << 12;
1357 }
1358
1359 if (sync != -1) {
1360 /* set sync flag on first DMA descriptor */
1361 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1362 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1363
1364 /* match sync field */
1365 contextMatch |= (sync&0xf)<<8;
1366 }
1367
1368 reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1369
1370 /* address of first descriptor block */
1371 command = dma_prog_region_offset_to_bus(&recv->prog,
1372 recv->block_dma * sizeof(struct dma_cmd));
1373 command |= 1; /* Z=1 */
1374
1375 reg_write(recv->ohci, recv->CommandPtr, command);
1376
1377 /* enable interrupts */
1378 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1379
1380 wmb();
1381
1382 /* run */
1383 reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1384
1385 /* issue a dummy read of the cycle timer register to force
1386 all PCI writes to be posted immediately */
1387 mb();
1388 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1389
1390 /* check RUN */
1391 if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1392 PRINT(KERN_ERR,
1393 "Error starting IR DMA (ContextControl 0x%08x)\n",
1394 reg_read(recv->ohci, recv->ContextControlSet));
1395 return -1;
1396 }
1397
1398 return 0;
1399 }
1400
1401 static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1402 {
1403 /* re-use the DMA descriptor for the block */
1404 /* by linking the previous descriptor to it */
1405
1406 int next_i = block;
1407 int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1408
1409 struct dma_cmd *next = &recv->block[next_i];
1410 struct dma_cmd *prev = &recv->block[prev_i];
1411
1412 /* ignore out-of-range requests */
1413 if ((block < 0) || (block > recv->nblocks))
1414 return;
1415
1416 /* 'next' becomes the new end of the DMA chain,
1417 so disable branch and enable interrupt */
1418 next->branchAddress = 0;
1419 next->control |= cpu_to_le32(3 << 20);
1420 next->status = cpu_to_le32(recv->buf_stride);
1421
1422 /* link prev to next */
1423 prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1424 sizeof(struct dma_cmd) * next_i)
1425 | 1); /* Z=1 */
1426
1427 /* disable interrupt on previous DMA descriptor, except at intervals */
1428 if ((prev_i % recv->block_irq_interval) == 0) {
1429 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1430 } else {
1431 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1432 }
1433 wmb();
1434
1435 /* wake up DMA in case it fell asleep */
1436 reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1437 }
1438
1439 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1440 struct hpsb_iso_packet_info *info)
1441 {
1442 /* release the memory where the packet was */
1443 recv->released_bytes += info->total_len;
1444
1445 /* have we released enough memory for one block? */
1446 while (recv->released_bytes > recv->buf_stride) {
1447 ohci_iso_recv_release_block(recv, recv->block_reader);
1448 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1449 recv->released_bytes -= recv->buf_stride;
1450 }
1451 }
1452
1453 static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1454 {
1455 struct ohci_iso_recv *recv = iso->hostdata;
1456 if (recv->dma_mode == BUFFER_FILL_MODE) {
1457 ohci_iso_recv_bufferfill_release(recv, info);
1458 } else {
1459 ohci_iso_recv_release_block(recv, info - iso->infos);
1460 }
1461 }
1462
1463 /* parse all packets from blocks that have been fully received */
1464 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1465 {
1466 int wake = 0;
1467 int runaway = 0;
1468 struct ti_ohci *ohci = recv->ohci;
1469
1470 while (1) {
1471 /* we expect the next parsable packet to begin at recv->dma_offset */
1472 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1473
1474 unsigned int offset;
1475 unsigned short len, cycle, total_len;
1476 unsigned char channel, tag, sy;
1477
1478 unsigned char *p = iso->data_buf.kvirt;
1479
1480 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1481
1482 /* don't loop indefinitely */
1483 if (runaway++ > 100000) {
1484 atomic_inc(&iso->overflows);
1485 PRINT(KERN_ERR,
1486 "IR DMA error - Runaway during buffer parsing!\n");
1487 break;
1488 }
1489
1490 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1491 if (this_block == recv->block_dma)
1492 break;
1493
1494 wake = 1;
1495
1496 /* parse data length, tag, channel, and sy */
1497
1498 /* note: we keep our own local copies of 'len' and 'offset'
1499 so the user can't mess with them by poking in the mmap area */
1500
1501 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1502
1503 if (len > 4096) {
1504 PRINT(KERN_ERR,
1505 "IR DMA error - bogus 'len' value %u\n", len);
1506 }
1507
1508 channel = p[recv->dma_offset+1] & 0x3F;
1509 tag = p[recv->dma_offset+1] >> 6;
1510 sy = p[recv->dma_offset+0] & 0xF;
1511
1512 /* advance to data payload */
1513 recv->dma_offset += 4;
1514
1515 /* check for wrap-around */
1516 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1517 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1518 }
1519
1520 /* dma_offset now points to the first byte of the data payload */
1521 offset = recv->dma_offset;
1522
1523 /* advance to xferStatus/timeStamp */
1524 recv->dma_offset += len;
1525
1526 total_len = len + 8; /* 8 bytes header+trailer in OHCI packet */
1527 /* payload is padded to 4 bytes */
1528 if (len % 4) {
1529 recv->dma_offset += 4 - (len%4);
1530 total_len += 4 - (len%4);
1531 }
1532
1533 /* check for wrap-around */
1534 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1535 /* uh oh, the packet data wraps from the last
1536 to the first DMA block - make the packet
1537 contiguous by copying its "tail" into the
1538 guard page */
1539
1540 int guard_off = recv->buf_stride*recv->nblocks;
1541 int tail_len = len - (guard_off - offset);
1542
1543 if (tail_len > 0 && tail_len < recv->buf_stride) {
1544 memcpy(iso->data_buf.kvirt + guard_off,
1545 iso->data_buf.kvirt,
1546 tail_len);
1547 }
1548
1549 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1550 }
1551
1552 /* parse timestamp */
1553 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1554 cycle &= 0x1FFF;
1555
1556 /* advance to next packet */
1557 recv->dma_offset += 4;
1558
1559 /* check for wrap-around */
1560 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1561 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1562 }
1563
1564 hpsb_iso_packet_received(iso, offset, len, total_len, cycle, channel, tag, sy);
1565 }
1566
1567 if (wake)
1568 hpsb_iso_wake(iso);
1569 }
1570
1571 static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1572 {
1573 int loop;
1574 struct ti_ohci *ohci = recv->ohci;
1575
1576 /* loop over all blocks */
1577 for (loop = 0; loop < recv->nblocks; loop++) {
1578
1579 /* check block_dma to see if it's done */
1580 struct dma_cmd *im = &recv->block[recv->block_dma];
1581
1582 /* check the DMA descriptor for new writes to xferStatus */
1583 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1584
1585 /* rescount is the number of bytes *remaining to be written* in the block */
1586 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1587
1588 unsigned char event = xferstatus & 0x1F;
1589
1590 if (!event) {
1591 /* nothing has happened to this block yet */
1592 break;
1593 }
1594
1595 if (event != 0x11) {
1596 atomic_inc(&iso->overflows);
1597 PRINT(KERN_ERR,
1598 "IR DMA error - OHCI error code 0x%02x\n", event);
1599 }
1600
1601 if (rescount != 0) {
1602 /* the card is still writing to this block;
1603 we can't touch it until it's done */
1604 break;
1605 }
1606
1607 /* OK, the block is finished... */
1608
1609 /* sync our view of the block */
1610 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1611
1612 /* reset the DMA descriptor */
1613 im->status = recv->buf_stride;
1614
1615 /* advance block_dma */
1616 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1617
1618 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1619 atomic_inc(&iso->overflows);
1620 DBGMSG("ISO reception overflow - "
1621 "ran out of DMA blocks");
1622 }
1623 }
1624
1625 /* parse any packets that have arrived */
1626 ohci_iso_recv_bufferfill_parse(iso, recv);
1627 }
1628
1629 static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1630 {
1631 int count;
1632 int wake = 0;
1633 struct ti_ohci *ohci = recv->ohci;
1634
1635 /* loop over the entire buffer */
1636 for (count = 0; count < recv->nblocks; count++) {
1637 u32 packet_len = 0;
1638
1639 /* pointer to the DMA descriptor */
1640 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1641
1642 /* check the DMA descriptor for new writes to xferStatus */
1643 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1644 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1645
1646 unsigned char event = xferstatus & 0x1F;
1647
1648 if (!event) {
1649 /* this packet hasn't come in yet; we are done for now */
1650 goto out;
1651 }
1652
1653 if (event == 0x11) {
1654 /* packet received successfully! */
1655
1656 /* rescount is the number of bytes *remaining* in the packet buffer,
1657 after the packet was written */
1658 packet_len = recv->buf_stride - rescount;
1659
1660 } else if (event == 0x02) {
1661 PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1662 } else if (event) {
1663 PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1664 }
1665
1666 /* sync our view of the buffer */
1667 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1668
1669 /* record the per-packet info */
1670 {
1671 /* iso header is 8 bytes ahead of the data payload */
1672 unsigned char *hdr;
1673
1674 unsigned int offset;
1675 unsigned short cycle;
1676 unsigned char channel, tag, sy;
1677
1678 offset = iso->pkt_dma * recv->buf_stride;
1679 hdr = iso->data_buf.kvirt + offset;
1680
1681 /* skip iso header */
1682 offset += 8;
1683 packet_len -= 8;
1684
1685 cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1686 channel = hdr[5] & 0x3F;
1687 tag = hdr[5] >> 6;
1688 sy = hdr[4] & 0xF;
1689
1690 hpsb_iso_packet_received(iso, offset, packet_len,
1691 recv->buf_stride, cycle, channel, tag, sy);
1692 }
1693
1694 /* reset the DMA descriptor */
1695 il->status = recv->buf_stride;
1696
1697 wake = 1;
1698 recv->block_dma = iso->pkt_dma;
1699 }
1700
1701 out:
1702 if (wake)
1703 hpsb_iso_wake(iso);
1704 }
1705
1706 static void ohci_iso_recv_task(unsigned long data)
1707 {
1708 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1709 struct ohci_iso_recv *recv = iso->hostdata;
1710
1711 if (recv->dma_mode == BUFFER_FILL_MODE)
1712 ohci_iso_recv_bufferfill_task(iso, recv);
1713 else
1714 ohci_iso_recv_packetperbuf_task(iso, recv);
1715 }
1716
1717 /***********************************
1718 * rawiso ISO transmission *
1719 ***********************************/
1720
1721 struct ohci_iso_xmit {
1722 struct ti_ohci *ohci;
1723 struct dma_prog_region prog;
1724 struct ohci1394_iso_tasklet task;
1725 int task_active;
1726 int last_cycle;
1727 atomic_t skips;
1728
1729 u32 ContextControlSet;
1730 u32 ContextControlClear;
1731 u32 CommandPtr;
1732 };
1733
1734 /* transmission DMA program:
1735 one OUTPUT_MORE_IMMEDIATE for the IT header
1736 one OUTPUT_LAST for the buffer data */
1737
1738 struct iso_xmit_cmd {
1739 struct dma_cmd output_more_immediate;
1740 u8 iso_hdr[8];
1741 u32 unused[2];
1742 struct dma_cmd output_last;
1743 };
1744
1745 static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1746 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1747 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1748 static void ohci_iso_xmit_task(unsigned long data);
1749
1750 static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1751 {
1752 struct ohci_iso_xmit *xmit;
1753 unsigned int prog_size;
1754 int ctx;
1755 int ret = -ENOMEM;
1756
1757 xmit = kmalloc(sizeof(*xmit), GFP_KERNEL);
1758 if (!xmit)
1759 return -ENOMEM;
1760
1761 iso->hostdata = xmit;
1762 xmit->ohci = iso->host->hostdata;
1763 xmit->task_active = 0;
1764 xmit->last_cycle = -1;
1765 atomic_set(&iso->skips, 0);
1766
1767 dma_prog_region_init(&xmit->prog);
1768
1769 prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1770
1771 if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1772 goto err;
1773
1774 ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1775 ohci_iso_xmit_task, (unsigned long) iso);
1776
1777 if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
1778 ret = -EBUSY;
1779 goto err;
1780 }
1781
1782 xmit->task_active = 1;
1783
1784 /* xmit context registers are spaced 16 bytes apart */
1785 ctx = xmit->task.context;
1786 xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1787 xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1788 xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1789
1790 return 0;
1791
1792 err:
1793 ohci_iso_xmit_shutdown(iso);
1794 return ret;
1795 }
1796
1797 static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1798 {
1799 struct ohci_iso_xmit *xmit = iso->hostdata;
1800 struct ti_ohci *ohci = xmit->ohci;
1801
1802 /* disable interrupts */
1803 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1804
1805 /* halt DMA */
1806 if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1807 /* XXX the DMA context will lock up if you try to send too much data! */
1808 PRINT(KERN_ERR,
1809 "you probably exceeded the OHCI card's bandwidth limit - "
1810 "reload the module and reduce xmit bandwidth");
1811 }
1812 }
1813
1814 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1815 {
1816 struct ohci_iso_xmit *xmit = iso->hostdata;
1817
1818 if (xmit->task_active) {
1819 ohci_iso_xmit_stop(iso);
1820 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1821 xmit->task_active = 0;
1822 }
1823
1824 dma_prog_region_free(&xmit->prog);
1825 kfree(xmit);
1826 iso->hostdata = NULL;
1827 }
1828
1829 static void ohci_iso_xmit_task(unsigned long data)
1830 {
1831 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1832 struct ohci_iso_xmit *xmit = iso->hostdata;
1833 struct ti_ohci *ohci = xmit->ohci;
1834 int wake = 0;
1835 int count;
1836
1837 /* check the whole buffer if necessary, starting at pkt_dma */
1838 for (count = 0; count < iso->buf_packets; count++) {
1839 int cycle;
1840
1841 /* DMA descriptor */
1842 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
1843
1844 /* check for new writes to xferStatus */
1845 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
1846 u8 event = xferstatus & 0x1F;
1847
1848 if (!event) {
1849 /* packet hasn't been sent yet; we are done for now */
1850 break;
1851 }
1852
1853 if (event != 0x11)
1854 PRINT(KERN_ERR,
1855 "IT DMA error - OHCI error code 0x%02x\n", event);
1856
1857 /* at least one packet went out, so wake up the writer */
1858 wake = 1;
1859
1860 /* parse cycle */
1861 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
1862
1863 if (xmit->last_cycle > -1) {
1864 int cycle_diff = cycle - xmit->last_cycle;
1865 int skip;
1866
1867 /* unwrap */
1868 if (cycle_diff < 0) {
1869 cycle_diff += 8000;
1870 if (cycle_diff < 0)
1871 PRINT(KERN_ERR, "bogus cycle diff %d\n",
1872 cycle_diff);
1873 }
1874
1875 skip = cycle_diff - 1;
1876 if (skip > 0) {
1877 DBGMSG("skipped %d cycles without packet loss", skip);
1878 atomic_add(skip, &iso->skips);
1879 }
1880 }
1881 xmit->last_cycle = cycle;
1882
1883 /* tell the subsystem the packet has gone out */
1884 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
1885
1886 /* reset the DMA descriptor for next time */
1887 cmd->output_last.status = 0;
1888 }
1889
1890 if (wake)
1891 hpsb_iso_wake(iso);
1892 }
1893
1894 static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1895 {
1896 struct ohci_iso_xmit *xmit = iso->hostdata;
1897 struct ti_ohci *ohci = xmit->ohci;
1898
1899 int next_i, prev_i;
1900 struct iso_xmit_cmd *next, *prev;
1901
1902 unsigned int offset;
1903 unsigned short len;
1904 unsigned char tag, sy;
1905
1906 /* check that the packet doesn't cross a page boundary
1907 (we could allow this if we added OUTPUT_MORE descriptor support) */
1908 if (cross_bound(info->offset, info->len)) {
1909 PRINT(KERN_ERR,
1910 "rawiso xmit: packet %u crosses a page boundary",
1911 iso->first_packet);
1912 return -EINVAL;
1913 }
1914
1915 offset = info->offset;
1916 len = info->len;
1917 tag = info->tag;
1918 sy = info->sy;
1919
1920 /* sync up the card's view of the buffer */
1921 dma_region_sync_for_device(&iso->data_buf, offset, len);
1922
1923 /* append first_packet to the DMA chain */
1924 /* by linking the previous descriptor to it */
1925 /* (next will become the new end of the DMA chain) */
1926
1927 next_i = iso->first_packet;
1928 prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
1929
1930 next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
1931 prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
1932
1933 /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
1934 memset(next, 0, sizeof(struct iso_xmit_cmd));
1935 next->output_more_immediate.control = cpu_to_le32(0x02000008);
1936
1937 /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
1938
1939 /* tcode = 0xA, and sy */
1940 next->iso_hdr[0] = 0xA0 | (sy & 0xF);
1941
1942 /* tag and channel number */
1943 next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
1944
1945 /* transmission speed */
1946 next->iso_hdr[2] = iso->speed & 0x7;
1947
1948 /* payload size */
1949 next->iso_hdr[6] = len & 0xFF;
1950 next->iso_hdr[7] = len >> 8;
1951
1952 /* set up the OUTPUT_LAST */
1953 next->output_last.control = cpu_to_le32(1 << 28);
1954 next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
1955 next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
1956 next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
1957 next->output_last.control |= cpu_to_le32(len);
1958
1959 /* payload bus address */
1960 next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
1961
1962 /* leave branchAddress at zero for now */
1963
1964 /* re-write the previous DMA descriptor to chain to this one */
1965
1966 /* set prev branch address to point to next (Z=3) */
1967 prev->output_last.branchAddress = cpu_to_le32(
1968 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
1969
1970 /*
1971 * Link the skip address to this descriptor itself. This causes a
1972 * context to skip a cycle whenever lost cycles or FIFO overruns occur,
1973 * without dropping the data at that point the application should then
1974 * decide whether this is an error condition or not. Some protocols
1975 * can deal with this by dropping some rate-matching padding packets.
1976 */
1977 next->output_more_immediate.branchAddress =
1978 prev->output_last.branchAddress;
1979
1980 /* disable interrupt, unless required by the IRQ interval */
1981 if (prev_i % iso->irq_interval) {
1982 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
1983 } else {
1984 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
1985 }
1986
1987 wmb();
1988
1989 /* wake DMA in case it is sleeping */
1990 reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
1991
1992 /* issue a dummy read of the cycle timer to force all PCI
1993 writes to be posted immediately */
1994 mb();
1995 reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
1996
1997 return 0;
1998 }
1999
2000 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2001 {
2002 struct ohci_iso_xmit *xmit = iso->hostdata;
2003 struct ti_ohci *ohci = xmit->ohci;
2004
2005 /* clear out the control register */
2006 reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2007 wmb();
2008
2009 /* address and length of first descriptor block (Z=3) */
2010 reg_write(xmit->ohci, xmit->CommandPtr,
2011 dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2012
2013 /* cycle match */
2014 if (cycle != -1) {
2015 u32 start = cycle & 0x1FFF;
2016
2017 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2018 just snarf them from the current time */
2019 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2020
2021 /* advance one second to give some extra time for DMA to start */
2022 seconds += 1;
2023
2024 start |= (seconds & 3) << 13;
2025
2026 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2027 }
2028
2029 /* enable interrupts */
2030 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2031
2032 /* run */
2033 reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2034 mb();
2035
2036 /* wait 100 usec to give the card time to go active */
2037 udelay(100);
2038
2039 /* check the RUN bit */
2040 if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2041 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2042 reg_read(xmit->ohci, xmit->ContextControlSet));
2043 return -1;
2044 }
2045
2046 return 0;
2047 }
2048
2049 static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2050 {
2051
2052 switch(cmd) {
2053 case XMIT_INIT:
2054 return ohci_iso_xmit_init(iso);
2055 case XMIT_START:
2056 return ohci_iso_xmit_start(iso, arg);
2057 case XMIT_STOP:
2058 ohci_iso_xmit_stop(iso);
2059 return 0;
2060 case XMIT_QUEUE:
2061 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2062 case XMIT_SHUTDOWN:
2063 ohci_iso_xmit_shutdown(iso);
2064 return 0;
2065
2066 case RECV_INIT:
2067 return ohci_iso_recv_init(iso);
2068 case RECV_START: {
2069 int *args = (int*) arg;
2070 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2071 }
2072 case RECV_STOP:
2073 ohci_iso_recv_stop(iso);
2074 return 0;
2075 case RECV_RELEASE:
2076 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2077 return 0;
2078 case RECV_FLUSH:
2079 ohci_iso_recv_task((unsigned long) iso);
2080 return 0;
2081 case RECV_SHUTDOWN:
2082 ohci_iso_recv_shutdown(iso);
2083 return 0;
2084 case RECV_LISTEN_CHANNEL:
2085 ohci_iso_recv_change_channel(iso, arg, 1);
2086 return 0;
2087 case RECV_UNLISTEN_CHANNEL:
2088 ohci_iso_recv_change_channel(iso, arg, 0);
2089 return 0;
2090 case RECV_SET_CHANNEL_MASK:
2091 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2092 return 0;
2093
2094 default:
2095 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2096 cmd);
2097 break;
2098 }
2099 return -EINVAL;
2100 }
2101
2102 /***************************************
2103 * IEEE-1394 functionality section END *
2104 ***************************************/
2105
2106
2107 /********************************************************
2108 * Global stuff (interrupt handler, init/shutdown code) *
2109 ********************************************************/
2110
2111 static void dma_trm_reset(struct dma_trm_ctx *d)
2112 {
2113 unsigned long flags;
2114 LIST_HEAD(packet_list);
2115 struct ti_ohci *ohci = d->ohci;
2116 struct hpsb_packet *packet, *ptmp;
2117
2118 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2119
2120 /* Lock the context, reset it and release it. Move the packets
2121 * that were pending in the context to packet_list and free
2122 * them after releasing the lock. */
2123
2124 spin_lock_irqsave(&d->lock, flags);
2125
2126 list_splice_init(&d->fifo_list, &packet_list);
2127 list_splice_init(&d->pending_list, &packet_list);
2128
2129 d->branchAddrPtr = NULL;
2130 d->sent_ind = d->prg_ind;
2131 d->free_prgs = d->num_desc;
2132
2133 spin_unlock_irqrestore(&d->lock, flags);
2134
2135 if (list_empty(&packet_list))
2136 return;
2137
2138 PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2139
2140 /* Now process subsystem callbacks for the packets from this
2141 * context. */
2142 list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2143 list_del_init(&packet->driver_list);
2144 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2145 }
2146 }
2147
2148 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2149 quadlet_t rx_event,
2150 quadlet_t tx_event)
2151 {
2152 struct ohci1394_iso_tasklet *t;
2153 unsigned long mask;
2154 unsigned long flags;
2155
2156 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
2157
2158 list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2159 mask = 1 << t->context;
2160
2161 if (t->type == OHCI_ISO_TRANSMIT) {
2162 if (tx_event & mask)
2163 tasklet_schedule(&t->tasklet);
2164 } else {
2165 /* OHCI_ISO_RECEIVE or OHCI_ISO_MULTICHANNEL_RECEIVE */
2166 if (rx_event & mask)
2167 tasklet_schedule(&t->tasklet);
2168 }
2169 }
2170
2171 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
2172 }
2173
2174 static irqreturn_t ohci_irq_handler(int irq, void *dev_id)
2175 {
2176 quadlet_t event, node_id;
2177 struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2178 struct hpsb_host *host = ohci->host;
2179 int phyid = -1, isroot = 0;
2180 unsigned long flags;
2181
2182 /* Read and clear the interrupt event register. Don't clear
2183 * the busReset event, though. This is done when we get the
2184 * selfIDComplete interrupt. */
2185 spin_lock_irqsave(&ohci->event_lock, flags);
2186 event = reg_read(ohci, OHCI1394_IntEventClear);
2187 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2188 spin_unlock_irqrestore(&ohci->event_lock, flags);
2189
2190 if (!event)
2191 return IRQ_NONE;
2192
2193 /* If event is ~(u32)0 cardbus card was ejected. In this case
2194 * we just return, and clean up in the ohci1394_pci_remove
2195 * function. */
2196 if (event == ~(u32) 0) {
2197 DBGMSG("Device removed.");
2198 return IRQ_NONE;
2199 }
2200
2201 DBGMSG("IntEvent: %08x", event);
2202
2203 if (event & OHCI1394_unrecoverableError) {
2204 int ctx;
2205 PRINT(KERN_ERR, "Unrecoverable error!");
2206
2207 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2208 PRINT(KERN_ERR, "Async Req Tx Context died: "
2209 "ctrl[%08x] cmdptr[%08x]",
2210 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2211 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2212
2213 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2214 PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2215 "ctrl[%08x] cmdptr[%08x]",
2216 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2217 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2218
2219 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2220 PRINT(KERN_ERR, "Async Req Rcv Context died: "
2221 "ctrl[%08x] cmdptr[%08x]",
2222 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2223 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2224
2225 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2226 PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2227 "ctrl[%08x] cmdptr[%08x]",
2228 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2229 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2230
2231 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2232 if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2233 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2234 "ctrl[%08x] cmdptr[%08x]", ctx,
2235 reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2236 reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2237 }
2238
2239 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2240 if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2241 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2242 "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2243 reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2244 reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2245 reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2246 }
2247
2248 event &= ~OHCI1394_unrecoverableError;
2249 }
2250 if (event & OHCI1394_postedWriteErr) {
2251 PRINT(KERN_ERR, "physical posted write error");
2252 /* no recovery strategy yet, had to involve protocol drivers */
2253 event &= ~OHCI1394_postedWriteErr;
2254 }
2255 if (event & OHCI1394_cycleTooLong) {
2256 if(printk_ratelimit())
2257 PRINT(KERN_WARNING, "isochronous cycle too long");
2258 else
2259 DBGMSG("OHCI1394_cycleTooLong");
2260 reg_write(ohci, OHCI1394_LinkControlSet,
2261 OHCI1394_LinkControl_CycleMaster);
2262 event &= ~OHCI1394_cycleTooLong;
2263 }
2264 if (event & OHCI1394_cycleInconsistent) {
2265 /* We subscribe to the cycleInconsistent event only to
2266 * clear the corresponding event bit... otherwise,
2267 * isochronous cycleMatch DMA won't work. */
2268 DBGMSG("OHCI1394_cycleInconsistent");
2269 event &= ~OHCI1394_cycleInconsistent;
2270 }
2271 if (event & OHCI1394_busReset) {
2272 /* The busReset event bit can't be cleared during the
2273 * selfID phase, so we disable busReset interrupts, to
2274 * avoid burying the cpu in interrupt requests. */
2275 spin_lock_irqsave(&ohci->event_lock, flags);
2276 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2277
2278 if (ohci->check_busreset) {
2279 int loop_count = 0;
2280
2281 udelay(10);
2282
2283 while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2284 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2285
2286 spin_unlock_irqrestore(&ohci->event_lock, flags);
2287 udelay(10);
2288 spin_lock_irqsave(&ohci->event_lock, flags);
2289
2290 /* The loop counter check is to prevent the driver
2291 * from remaining in this state forever. For the
2292 * initial bus reset, the loop continues for ever
2293 * and the system hangs, until some device is plugged-in
2294 * or out manually into a port! The forced reset seems
2295 * to solve this problem. This mainly effects nForce2. */
2296 if (loop_count > 10000) {
2297 ohci_devctl(host, RESET_BUS, LONG_RESET);
2298 DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2299 loop_count = 0;
2300 }
2301
2302 loop_count++;
2303 }
2304 }
2305 spin_unlock_irqrestore(&ohci->event_lock, flags);
2306 if (!host->in_bus_reset) {
2307 DBGMSG("irq_handler: Bus reset requested");
2308
2309 /* Subsystem call */
2310 hpsb_bus_reset(ohci->host);
2311 }
2312 event &= ~OHCI1394_busReset;
2313 }
2314 if (event & OHCI1394_reqTxComplete) {
2315 struct dma_trm_ctx *d = &ohci->at_req_context;
2316 DBGMSG("Got reqTxComplete interrupt "
2317 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2318 if (reg_read(ohci, d->ctrlSet) & 0x800)
2319 ohci1394_stop_context(ohci, d->ctrlClear,
2320 "reqTxComplete");
2321 else
2322 dma_trm_tasklet((unsigned long)d);
2323 //tasklet_schedule(&d->task);
2324 event &= ~OHCI1394_reqTxComplete;
2325 }
2326 if (event & OHCI1394_respTxComplete) {
2327 struct dma_trm_ctx *d = &ohci->at_resp_context;
2328 DBGMSG("Got respTxComplete interrupt "
2329 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2330 if (reg_read(ohci, d->ctrlSet) & 0x800)
2331 ohci1394_stop_context(ohci, d->ctrlClear,
2332 "respTxComplete");
2333 else
2334 tasklet_schedule(&d->task);
2335 event &= ~OHCI1394_respTxComplete;
2336 }
2337 if (event & OHCI1394_RQPkt) {
2338 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2339 DBGMSG("Got RQPkt interrupt status=0x%08X",
2340 reg_read(ohci, d->ctrlSet));
2341 if (reg_read(ohci, d->ctrlSet) & 0x800)
2342 ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2343 else
2344 tasklet_schedule(&d->task);
2345 event &= ~OHCI1394_RQPkt;
2346 }
2347 if (event & OHCI1394_RSPkt) {
2348 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2349 DBGMSG("Got RSPkt interrupt status=0x%08X",
2350 reg_read(ohci, d->ctrlSet));
2351 if (reg_read(ohci, d->ctrlSet) & 0x800)
2352 ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2353 else
2354 tasklet_schedule(&d->task);
2355 event &= ~OHCI1394_RSPkt;
2356 }
2357 if (event & OHCI1394_isochRx) {
2358 quadlet_t rx_event;
2359
2360 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2361 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2362 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2363 event &= ~OHCI1394_isochRx;
2364 }
2365 if (event & OHCI1394_isochTx) {
2366 quadlet_t tx_event;
2367
2368 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2369 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2370 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2371 event &= ~OHCI1394_isochTx;
2372 }
2373 if (event & OHCI1394_selfIDComplete) {
2374 if (host->in_bus_reset) {
2375 node_id = reg_read(ohci, OHCI1394_NodeID);
2376
2377 if (!(node_id & 0x80000000)) {
2378 PRINT(KERN_ERR,
2379 "SelfID received, but NodeID invalid "
2380 "(probably new bus reset occurred): %08X",
2381 node_id);
2382 goto selfid_not_valid;
2383 }
2384
2385 phyid = node_id & 0x0000003f;
2386 isroot = (node_id & 0x40000000) != 0;
2387
2388 DBGMSG("SelfID interrupt received "
2389 "(phyid %d, %s)", phyid,
2390 (isroot ? "root" : "not root"));
2391
2392 handle_selfid(ohci, host, phyid, isroot);
2393
2394 /* Clear the bus reset event and re-enable the
2395 * busReset interrupt. */
2396 spin_lock_irqsave(&ohci->event_lock, flags);
2397 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2398 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2399 spin_unlock_irqrestore(&ohci->event_lock, flags);
2400
2401 /* Turn on phys dma reception.
2402 *
2403 * TODO: Enable some sort of filtering management.
2404 */
2405 if (phys_dma) {
2406 reg_write(ohci, OHCI1394_PhyReqFilterHiSet,
2407 0xffffffff);
2408 reg_write(ohci, OHCI1394_PhyReqFilterLoSet,
2409 0xffffffff);
2410 }
2411
2412 DBGMSG("PhyReqFilter=%08x%08x",
2413 reg_read(ohci, OHCI1394_PhyReqFilterHiSet),
2414 reg_read(ohci, OHCI1394_PhyReqFilterLoSet));
2415
2416 hpsb_selfid_complete(host, phyid, isroot);
2417 } else
2418 PRINT(KERN_ERR,
2419 "SelfID received outside of bus reset sequence");
2420
2421 selfid_not_valid:
2422 event &= ~OHCI1394_selfIDComplete;
2423 }
2424
2425 /* Make sure we handle everything, just in case we accidentally
2426 * enabled an interrupt that we didn't write a handler for. */
2427 if (event)
2428 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2429 event);
2430
2431 return IRQ_HANDLED;
2432 }
2433
2434 /* Put the buffer back into the dma context */
2435 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2436 {
2437 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2438 DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2439
2440 d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2441 d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2442 idx = (idx + d->num_desc - 1 ) % d->num_desc;
2443 d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2444
2445 /* To avoid a race, ensure 1394 interface hardware sees the inserted
2446 * context program descriptors before it sees the wakeup bit set. */
2447 wmb();
2448
2449 /* wake up the dma context if necessary */
2450 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2451 PRINT(KERN_INFO,
2452 "Waking dma ctx=%d ... processing is probably too slow",
2453 d->ctx);
2454 }
2455
2456 /* do this always, to avoid race condition */
2457 reg_write(ohci, d->ctrlSet, 0x1000);
2458 }
2459
2460 #define cond_le32_to_cpu(data, noswap) \
2461 (noswap ? data : le32_to_cpu(data))
2462
2463 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2464 -1, 0, -1, 0, -1, -1, 16, -1};
2465
2466 /*
2467 * Determine the length of a packet in the buffer
2468 * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2469 */
2470 static inline int packet_length(struct dma_rcv_ctx *d, int idx,
2471 quadlet_t *buf_ptr, int offset,
2472 unsigned char tcode, int noswap)
2473 {
2474 int length = -1;
2475
2476 if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2477 length = TCODE_SIZE[tcode];
2478 if (length == 0) {
2479 if (offset + 12 >= d->buf_size) {
2480 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2481 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2482 } else {
2483 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2484 }
2485 length += 20;
2486 }
2487 } else if (d->type == DMA_CTX_ISO) {
2488 /* Assumption: buffer fill mode with header/trailer */
2489 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2490 }
2491
2492 if (length > 0 && length % 4)
2493 length += 4 - (length % 4);
2494
2495 return length;
2496 }
2497
2498 /* Tasklet that processes dma receive buffers */
2499 static void dma_rcv_tasklet (unsigned long data)
2500 {
2501 struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2502 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2503 unsigned int split_left, idx, offset, rescount;
2504 unsigned char tcode;
2505 int length, bytes_left, ack;
2506 unsigned long flags;
2507 quadlet_t *buf_ptr;
2508 char *split_ptr;
2509 char msg[256];
2510
2511 spin_lock_irqsave(&d->lock, flags);
2512
2513 idx = d->buf_ind;
2514 offset = d->buf_offset;
2515 buf_ptr = d->buf_cpu[idx] + offset/4;
2516
2517 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2518 bytes_left = d->buf_size - rescount - offset;
2519
2520 while (bytes_left > 0) {
2521 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2522
2523 /* packet_length() will return < 4 for an error */
2524 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2525
2526 if (length < 4) { /* something is wrong */
2527 sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2528 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2529 d->ctx, length);
2530 ohci1394_stop_context(ohci, d->ctrlClear, msg);
2531 spin_unlock_irqrestore(&d->lock, flags);
2532 return;
2533 }
2534
2535 /* The first case is where we have a packet that crosses
2536 * over more than one descriptor. The next case is where
2537 * it's all in the first descriptor. */
2538 if ((offset + length) > d->buf_size) {
2539 DBGMSG("Split packet rcv'd");
2540 if (length > d->split_buf_size) {
2541 ohci1394_stop_context(ohci, d->ctrlClear,
2542 "Split packet size exceeded");
2543 d->buf_ind = idx;
2544 d->buf_offset = offset;
2545 spin_unlock_irqrestore(&d->lock, flags);
2546 return;
2547 }
2548
2549 if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2550 == d->buf_size) {
2551 /* Other part of packet not written yet.
2552 * this should never happen I think
2553 * anyway we'll get it on the next call. */
2554 PRINT(KERN_INFO,
2555 "Got only half a packet!");
2556 d->buf_ind = idx;
2557 d->buf_offset = offset;
2558 spin_unlock_irqrestore(&d->lock, flags);
2559 return;
2560 }
2561
2562 split_left = length;
2563 split_ptr = (char *)d->spb;
2564 memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2565 split_left -= d->buf_size-offset;
2566 split_ptr += d->buf_size-offset;
2567 insert_dma_buffer(d, idx);
2568 idx = (idx+1) % d->num_desc;
2569 buf_ptr = d->buf_cpu[idx];
2570 offset=0;
2571
2572 while (split_left >= d->buf_size) {
2573 memcpy(split_ptr,buf_ptr,d->buf_size);
2574 split_ptr += d->buf_size;
2575 split_left -= d->buf_size;
2576 insert_dma_buffer(d, idx);
2577 idx = (idx+1) % d->num_desc;
2578 buf_ptr = d->buf_cpu[idx];
2579 }
2580
2581 if (split_left > 0) {
2582 memcpy(split_ptr, buf_ptr, split_left);
2583 offset = split_left;
2584 buf_ptr += offset/4;
2585 }
2586 } else {
2587 DBGMSG("Single packet rcv'd");
2588 memcpy(d->spb, buf_ptr, length);
2589 offset += length;
2590 buf_ptr += length/4;
2591 if (offset==d->buf_size) {
2592 insert_dma_buffer(d, idx);
2593 idx = (idx+1) % d->num_desc;
2594 buf_ptr = d->buf_cpu[idx];
2595 offset=0;
2596 }
2597 }
2598
2599 /* We get one phy packet to the async descriptor for each
2600 * bus reset. We always ignore it. */
2601 if (tcode != OHCI1394_TCODE_PHY) {
2602 if (!ohci->no_swap_incoming)
2603 header_le32_to_cpu(d->spb, tcode);
2604 DBGMSG("Packet received from node"
2605 " %d ack=0x%02X spd=%d tcode=0x%X"
2606 " length=%d ctx=%d tlabel=%d",
2607 (d->spb[1]>>16)&0x3f,
2608 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2609 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2610 tcode, length, d->ctx,
2611 (d->spb[0]>>10)&0x3f);
2612
2613 ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2614 == 0x11) ? 1 : 0;
2615
2616 hpsb_packet_received(ohci->host, d->spb,
2617 length-4, ack);
2618 }
2619 #ifdef OHCI1394_DEBUG
2620 else
2621 PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2622 d->ctx);
2623 #endif
2624
2625 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2626
2627 bytes_left = d->buf_size - rescount - offset;
2628
2629 }
2630
2631 d->buf_ind = idx;
2632 d->buf_offset = offset;
2633
2634 spin_unlock_irqrestore(&d->lock, flags);
2635 }
2636
2637 /* Bottom half that processes sent packets */
2638 static void dma_trm_tasklet (unsigned long data)
2639 {
2640 struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2641 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2642 struct hpsb_packet *packet, *ptmp;
2643 unsigned long flags;
2644 u32 status, ack;
2645 size_t datasize;
2646
2647 spin_lock_irqsave(&d->lock, flags);
2648
2649 list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2650 datasize = packet->data_size;
2651 if (datasize && packet->type != hpsb_raw)
2652 status = le32_to_cpu(
2653 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2654 else
2655 status = le32_to_cpu(
2656 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2657
2658 if (status == 0)
2659 /* this packet hasn't been sent yet*/
2660 break;
2661
2662 #ifdef OHCI1394_DEBUG
2663 if (datasize)
2664 if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2665 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2666 "ack=0x%X spd=%d dataLength=%d ctx=%d",
2667 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2668 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2669 status&0x1f, (status>>5)&0x3,
2670 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2671 d->ctx);
2672 else
2673 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2674 "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
2675 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2676 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2677 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2678 status&0x1f, (status>>5)&0x3,
2679 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2680 d->ctx);
2681 else
2682 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2683 "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
2684 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2685 >>16)&0x3f,
2686 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2687 >>4)&0xf,
2688 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2689 >>10)&0x3f,
2690 status&0x1f, (status>>5)&0x3,
2691 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2692 d->ctx);
2693 #endif
2694
2695 if (status & 0x10) {
2696 ack = status & 0xf;
2697 } else {
2698 switch (status & 0x1f) {
2699 case EVT_NO_STATUS: /* that should never happen */
2700 case EVT_RESERVED_A: /* that should never happen */
2701 case EVT_LONG_PACKET: /* that should never happen */
2702 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2703 ack = ACKX_SEND_ERROR;
2704 break;
2705 case EVT_MISSING_ACK:
2706 ack = ACKX_TIMEOUT;
2707 break;
2708 case EVT_UNDERRUN:
2709 ack = ACKX_SEND_ERROR;
2710 break;
2711 case EVT_OVERRUN: /* that should never happen */
2712 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2713 ack = ACKX_SEND_ERROR;
2714 break;
2715 case EVT_DESCRIPTOR_READ:
2716 case EVT_DATA_READ:
2717 case EVT_DATA_WRITE:
2718 ack = ACKX_SEND_ERROR;
2719 break;
2720 case EVT_BUS_RESET: /* that should never happen */
2721 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2722 ack = ACKX_SEND_ERROR;
2723 break;
2724 case EVT_TIMEOUT:
2725 ack = ACKX_TIMEOUT;
2726 break;
2727 case EVT_TCODE_ERR:
2728 ack = ACKX_SEND_ERROR;
2729 break;
2730 case EVT_RESERVED_B: /* that should never happen */
2731 case EVT_RESERVED_C: /* that should never happen */
2732 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2733 ack = ACKX_SEND_ERROR;
2734 break;
2735 case EVT_UNKNOWN:
2736 case EVT_FLUSHED:
2737 ack = ACKX_SEND_ERROR;
2738 break;
2739 default:
2740 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2741 ack = ACKX_SEND_ERROR;
2742 BUG();
2743 }
2744 }
2745
2746 list_del_init(&packet->driver_list);
2747 hpsb_packet_sent(ohci->host, packet, ack);
2748
2749 if (datasize)
2750 pci_unmap_single(ohci->dev,
2751 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2752 datasize, PCI_DMA_TODEVICE);
2753
2754 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2755 d->free_prgs++;
2756 }
2757
2758 dma_trm_flush(ohci, d);
2759
2760 spin_unlock_irqrestore(&d->lock, flags);
2761 }
2762
2763 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2764 {
2765 int i;
2766 struct ti_ohci *ohci = d->ohci;
2767
2768 if (ohci == NULL)
2769 return;
2770
2771 DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2772
2773 if (d->buf_cpu) {
2774 for (i=0; i<d->num_desc; i++)
2775 if (d->buf_cpu[i] && d->buf_bus[i])
2776 pci_free_consistent(
2777 ohci->dev, d->buf_size,
2778 d->buf_cpu[i], d->buf_bus[i]);
2779 kfree(d->buf_cpu);
2780 kfree(d->buf_bus);
2781 }
2782 if (d->prg_cpu) {
2783 for (i=0; i<d->num_desc; i++)
2784 if (d->prg_cpu[i] && d->prg_bus[i])
2785 pci_pool_free(d->prg_pool, d->prg_cpu[i],
2786 d->prg_bus[i]);
2787 pci_pool_destroy(d->prg_pool);
2788 kfree(d->prg_cpu);
2789 kfree(d->prg_bus);
2790 }
2791 kfree(d->spb);
2792
2793 /* Mark this context as freed. */
2794 d->ohci = NULL;
2795 }
2796
2797 static int
2798 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2799 enum context_type type, int ctx, int num_desc,
2800 int buf_size, int split_buf_size, int context_base)
2801 {
2802 int i, len;
2803 static int num_allocs;
2804 static char pool_name[20];
2805
2806 d->ohci = ohci;
2807 d->type = type;
2808 d->ctx = ctx;
2809
2810 d->num_desc = num_desc;
2811 d->buf_size = buf_size;
2812 d->split_buf_size = split_buf_size;
2813
2814 d->ctrlSet = 0;
2815 d->ctrlClear = 0;
2816 d->cmdPtr = 0;
2817
2818 d->buf_cpu = kzalloc(d->num_desc * sizeof(*d->buf_cpu), GFP_ATOMIC);
2819 d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
2820
2821 if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2822 PRINT(KERN_ERR, "Failed to allocate %s", "DMA buffer");
2823 free_dma_rcv_ctx(d);
2824 return -ENOMEM;
2825 }
2826
2827 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_ATOMIC);
2828 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
2829
2830 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2831 PRINT(KERN_ERR, "Failed to allocate %s", "DMA prg");
2832 free_dma_rcv_ctx(d);
2833 return -ENOMEM;
2834 }
2835
2836 d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2837
2838 if (d->spb == NULL) {
2839 PRINT(KERN_ERR, "Failed to allocate %s", "split buffer");
2840 free_dma_rcv_ctx(d);
2841 return -ENOMEM;
2842 }
2843
2844 len = sprintf(pool_name, "ohci1394_rcv_prg");
2845 sprintf(pool_name+len, "%d", num_allocs);
2846 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2847 sizeof(struct dma_cmd), 4, 0);
2848 if(d->prg_pool == NULL)
2849 {
2850 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
2851 free_dma_rcv_ctx(d);
2852 return -ENOMEM;
2853 }
2854 num_allocs++;
2855
2856 for (i=0; i<d->num_desc; i++) {
2857 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
2858 d->buf_size,
2859 d->buf_bus+i);
2860
2861 if (d->buf_cpu[i] != NULL) {
2862 memset(d->buf_cpu[i], 0, d->buf_size);
2863 } else {
2864 PRINT(KERN_ERR,
2865 "Failed to allocate %s", "DMA buffer");
2866 free_dma_rcv_ctx(d);
2867 return -ENOMEM;
2868 }
2869
2870 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
2871
2872 if (d->prg_cpu[i] != NULL) {
2873 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
2874 } else {
2875 PRINT(KERN_ERR,
2876 "Failed to allocate %s", "DMA prg");
2877 free_dma_rcv_ctx(d);
2878 return -ENOMEM;
2879 }
2880 }
2881
2882 spin_lock_init(&d->lock);
2883
2884 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
2885 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
2886 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
2887
2888 tasklet_init(&d->task, dma_rcv_tasklet, (unsigned long) d);
2889 return 0;
2890 }
2891
2892 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
2893 {
2894 int i;
2895 struct ti_ohci *ohci = d->ohci;
2896
2897 if (ohci == NULL)
2898 return;
2899
2900 DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
2901
2902 if (d->prg_cpu) {
2903 for (i=0; i<d->num_desc; i++)
2904 if (d->prg_cpu[i] && d->prg_bus[i])
2905 pci_pool_free(d->prg_pool, d->prg_cpu[i],
2906 d->prg_bus[i]);
2907 pci_pool_destroy(d->prg_pool);
2908 kfree(d->prg_cpu);
2909 kfree(d->prg_bus);
2910 }
2911
2912 /* Mark this context as freed. */
2913 d->ohci = NULL;
2914 }
2915
2916 static int
2917 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
2918 enum context_type type, int ctx, int num_desc,
2919 int context_base)
2920 {
2921 int i, len;
2922 static char pool_name[20];
2923 static int num_allocs=0;
2924
2925 d->ohci = ohci;
2926 d->type = type;
2927 d->ctx = ctx;
2928 d->num_desc = num_desc;
2929 d->ctrlSet = 0;
2930 d->ctrlClear = 0;
2931 d->cmdPtr = 0;
2932
2933 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_KERNEL);
2934 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
2935
2936 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2937 PRINT(KERN_ERR, "Failed to allocate %s", "AT DMA prg");
2938 free_dma_trm_ctx(d);
2939 return -ENOMEM;
2940 }
2941
2942 len = sprintf(pool_name, "ohci1394_trm_prg");
2943 sprintf(pool_name+len, "%d", num_allocs);
2944 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2945 sizeof(struct at_dma_prg), 4, 0);
2946 if (d->prg_pool == NULL) {
2947 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
2948 free_dma_trm_ctx(d);
2949 return -ENOMEM;
2950 }
2951 num_allocs++;
2952
2953 for (i = 0; i < d->num_desc; i++) {
2954 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
2955
2956 if (d->prg_cpu[i] != NULL) {
2957 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
2958 } else {
2959 PRINT(KERN_ERR,
2960 "Failed to allocate %s", "AT DMA prg");
2961 free_dma_trm_ctx(d);
2962 return -ENOMEM;
2963 }
2964 }
2965
2966 spin_lock_init(&d->lock);
2967
2968 /* initialize tasklet */
2969 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
2970 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
2971 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
2972 tasklet_init(&d->task, dma_trm_tasklet, (unsigned long)d);
2973 return 0;
2974 }
2975
2976 static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
2977 {
2978 struct ti_ohci *ohci = host->hostdata;
2979
2980 reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
2981 reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
2982
2983 memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
2984 }
2985
2986
2987 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
2988 quadlet_t data, quadlet_t compare)
2989 {
2990 struct ti_ohci *ohci = host->hostdata;
2991 int i;
2992
2993 reg_write(ohci, OHCI1394_CSRData, data);
2994 reg_write(ohci, OHCI1394_CSRCompareData, compare);
2995 reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
2996
2997 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
2998 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
2999 break;
3000
3001 mdelay(1);
3002 }
3003
3004 return reg_read(ohci, OHCI1394_CSRData);
3005 }
3006
3007 static struct hpsb_host_driver ohci1394_driver = {
3008 .owner = THIS_MODULE,
3009 .name = OHCI1394_DRIVER_NAME,
3010 .set_hw_config_rom = ohci_set_hw_config_rom,
3011 .transmit_packet = ohci_transmit,
3012 .devctl = ohci_devctl,
3013 .isoctl = ohci_isoctl,
3014 .hw_csr_reg = ohci_hw_csr_reg,
3015 };
3016
3017 /***********************************
3018 * PCI Driver Interface functions *
3019 ***********************************/
3020
3021 #ifdef CONFIG_PPC_PMAC
3022 static void ohci1394_pmac_on(struct pci_dev *dev)
3023 {
3024 if (machine_is(powermac)) {
3025 struct device_node *ofn = pci_device_to_OF_node(dev);
3026
3027 if (ofn) {
3028 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
3029 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3030 }
3031 }
3032 }
3033
3034 static void ohci1394_pmac_off(struct pci_dev *dev)
3035 {
3036 if (machine_is(powermac)) {
3037 struct device_node *ofn = pci_device_to_OF_node(dev);
3038
3039 if (ofn) {
3040 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3041 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3042 }
3043 }
3044 }
3045 #else
3046 #define ohci1394_pmac_on(dev)
3047 #define ohci1394_pmac_off(dev)
3048 #endif /* CONFIG_PPC_PMAC */
3049
3050 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3051 const struct pci_device_id *ent)
3052 {
3053 struct hpsb_host *host;
3054 struct ti_ohci *ohci; /* shortcut to currently handled device */
3055 resource_size_t ohci_base;
3056 int err = -ENOMEM;
3057
3058 ohci1394_pmac_on(dev);
3059 if (pci_enable_device(dev)) {
3060 PRINT_G(KERN_ERR, "Failed to enable OHCI hardware");
3061 err = -ENXIO;
3062 goto err;
3063 }
3064 pci_set_master(dev);
3065
3066 host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3067 if (!host) {
3068 PRINT_G(KERN_ERR, "Failed to allocate %s", "host structure");
3069 goto err;
3070 }
3071 ohci = host->hostdata;
3072 ohci->dev = dev;
3073 ohci->host = host;
3074 ohci->init_state = OHCI_INIT_ALLOC_HOST;
3075 host->pdev = dev;
3076 pci_set_drvdata(dev, ohci);
3077
3078 /* We don't want hardware swapping */
3079 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3080
3081 /* Some oddball Apple controllers do not order the selfid
3082 * properly, so we make up for it here. */
3083 #ifndef __LITTLE_ENDIAN
3084 /* XXX: Need a better way to check this. I'm wondering if we can
3085 * read the values of the OHCI1394_PCI_HCI_Control and the
3086 * noByteSwapData registers to see if they were not cleared to
3087 * zero. Should this work? Obviously it's not defined what these
3088 * registers will read when they aren't supported. Bleh! */
3089 if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3090 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3091 ohci->no_swap_incoming = 1;
3092 ohci->selfid_swap = 0;
3093 } else
3094 ohci->selfid_swap = 1;
3095 #endif
3096
3097
3098 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3099 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3100 #endif
3101
3102 /* These chipsets require a bit of extra care when checking after
3103 * a busreset. */
3104 if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3105 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3106 (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
3107 dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3108 ohci->check_busreset = 1;
3109
3110 /* We hardwire the MMIO length, since some CardBus adaptors
3111 * fail to report the right length. Anyway, the ohci spec
3112 * clearly says it's 2kb, so this shouldn't be a problem. */
3113 ohci_base = pci_resource_start(dev, 0);
3114 if (pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE)
3115 PRINT(KERN_WARNING, "PCI resource length of 0x%llx too small!",
3116 (unsigned long long)pci_resource_len(dev, 0));
3117
3118 if (!request_mem_region(ohci_base, OHCI1394_REGISTER_SIZE,
3119 OHCI1394_DRIVER_NAME)) {
3120 PRINT_G(KERN_ERR, "MMIO resource (0x%llx - 0x%llx) unavailable",
3121 (unsigned long long)ohci_base,
3122 (unsigned long long)ohci_base + OHCI1394_REGISTER_SIZE);
3123 goto err;
3124 }
3125 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3126
3127 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3128 if (ohci->registers == NULL) {
3129 PRINT_G(KERN_ERR, "Failed to remap registers");
3130 err = -ENXIO;
3131 goto err;
3132 }
3133 ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3134 DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3135
3136 /* csr_config rom allocation */
3137 ohci->csr_config_rom_cpu =
3138 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3139 &ohci->csr_config_rom_bus);
3140 if (ohci->csr_config_rom_cpu == NULL) {
3141 PRINT_G(KERN_ERR, "Failed to allocate %s", "buffer config rom");
3142 goto err;
3143 }
3144 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3145
3146 /* self-id dma buffer allocation */
3147 ohci->selfid_buf_cpu =
3148 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3149 &ohci->selfid_buf_bus);
3150 if (ohci->selfid_buf_cpu == NULL) {
3151 PRINT_G(KERN_ERR, "Failed to allocate %s", "self-ID buffer");
3152 goto err;
3153 }
3154 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3155
3156 if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3157 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3158 "8Kb boundary... may cause problems on some CXD3222 chip",
3159 ohci->selfid_buf_cpu);
3160
3161 /* No self-id errors at startup */
3162 ohci->self_id_errors = 0;
3163
3164 ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3165 /* AR DMA request context allocation */
3166 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3167 DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3168 AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3169 OHCI1394_AsReqRcvContextBase) < 0) {
3170 PRINT_G(KERN_ERR, "Failed to allocate %s", "AR Req context");
3171 goto err;
3172 }
3173 /* AR DMA response context allocation */
3174 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3175 DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3176 AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3177 OHCI1394_AsRspRcvContextBase) < 0) {
3178 PRINT_G(KERN_ERR, "Failed to allocate %s", "AR Resp context");
3179 goto err;
3180 }
3181 /* AT DMA request context */
3182 if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3183 DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3184 OHCI1394_AsReqTrContextBase) < 0) {
3185 PRINT_G(KERN_ERR, "Failed to allocate %s", "AT Req context");
3186 goto err;
3187 }
3188 /* AT DMA response context */
3189 if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3190 DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3191 OHCI1394_AsRspTrContextBase) < 0) {
3192 PRINT_G(KERN_ERR, "Failed to allocate %s", "AT Resp context");
3193 goto err;
3194 }
3195 /* Start off with a soft reset, to clear everything to a sane
3196 * state. */
3197 ohci_soft_reset(ohci);
3198
3199 /* Now enable LPS, which we need in order to start accessing
3200 * most of the registers. In fact, on some cards (ALI M5251),
3201 * accessing registers in the SClk domain without LPS enabled
3202 * will lock up the machine. Wait 50msec to make sure we have
3203 * full link enabled. */
3204 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3205
3206 /* Disable and clear interrupts */
3207 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3208 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3209
3210 mdelay(50);
3211
3212 /* Determine the number of available IR and IT contexts. */
3213 ohci->nb_iso_rcv_ctx =
3214 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3215 ohci->nb_iso_xmit_ctx =
3216 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3217
3218 /* Set the usage bits for non-existent contexts so they can't
3219 * be allocated */
3220 ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3221 ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3222
3223 INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3224 spin_lock_init(&ohci->iso_tasklet_list_lock);
3225 ohci->ISO_channel_usage = 0;
3226 spin_lock_init(&ohci->IR_channel_lock);
3227
3228 spin_lock_init(&ohci->event_lock);
3229
3230 /*
3231 * interrupts are disabled, all right, but... due to IRQF_SHARED we
3232 * might get called anyway. We'll see no event, of course, but
3233 * we need to get to that "no event", so enough should be initialized
3234 * by that point.
3235 */
3236 if (request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
3237 OHCI1394_DRIVER_NAME, ohci)) {
3238 PRINT_G(KERN_ERR, "Failed to allocate interrupt %d", dev->irq);
3239 goto err;
3240 }
3241 ohci->init_state = OHCI_INIT_HAVE_IRQ;
3242 ohci_initialize(ohci);
3243
3244 /* Set certain csr values */
3245 host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3246 host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3247 host->csr.cyc_clk_acc = 100; /* how do we determine clk accuracy? */
3248 host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3249 host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3250
3251 if (phys_dma) {
3252 host->low_addr_space =
3253 (u64) reg_read(ohci, OHCI1394_PhyUpperBound) << 16;
3254 if (!host->low_addr_space)
3255 host->low_addr_space = OHCI1394_PHYS_UPPER_BOUND_FIXED;
3256 }
3257 host->middle_addr_space = OHCI1394_MIDDLE_ADDRESS_SPACE;
3258
3259 /* Tell the highlevel this host is ready */
3260 if (hpsb_add_host(host)) {
3261 PRINT_G(KERN_ERR, "Failed to register host with highlevel");
3262 goto err;
3263 }
3264 ohci->init_state = OHCI_INIT_DONE;
3265
3266 return 0;
3267 err:
3268 ohci1394_pci_remove(dev);
3269 return err;
3270 }
3271
3272 static void ohci1394_pci_remove(struct pci_dev *dev)
3273 {
3274 struct ti_ohci *ohci;
3275 struct device *device;
3276
3277 ohci = pci_get_drvdata(dev);
3278 if (!ohci)
3279 goto out;
3280
3281 device = get_device(&ohci->host->device);
3282
3283 switch (ohci->init_state) {
3284 case OHCI_INIT_DONE:
3285 hpsb_remove_host(ohci->host);
3286
3287 /* Clear out BUS Options */
3288 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3289 reg_write(ohci, OHCI1394_BusOptions,
3290 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3291 0x00ff0000);
3292 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3293
3294 case OHCI_INIT_HAVE_IRQ:
3295 /* Clear interrupt registers */
3296 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3297 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3298 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3299 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3300 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3301 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3302
3303 /* Disable IRM Contender */
3304 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3305
3306 /* Clear link control register */
3307 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3308
3309 /* Let all other nodes know to ignore us */
3310 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3311
3312 /* Soft reset before we start - this disables
3313 * interrupts and clears linkEnable and LPS. */
3314 ohci_soft_reset(ohci);
3315 free_irq(dev->irq, ohci);
3316
3317 case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3318 /* The ohci_soft_reset() stops all DMA contexts, so we
3319 * dont need to do this. */
3320 free_dma_rcv_ctx(&ohci->ar_req_context);
3321 free_dma_rcv_ctx(&ohci->ar_resp_context);
3322 free_dma_trm_ctx(&ohci->at_req_context);
3323 free_dma_trm_ctx(&ohci->at_resp_context);
3324
3325 case OHCI_INIT_HAVE_SELFID_BUFFER:
3326 pci_free_consistent(dev, OHCI1394_SI_DMA_BUF_SIZE,
3327 ohci->selfid_buf_cpu,
3328 ohci->selfid_buf_bus);
3329
3330 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3331 pci_free_consistent(dev, OHCI_CONFIG_ROM_LEN,
3332 ohci->csr_config_rom_cpu,
3333 ohci->csr_config_rom_bus);
3334
3335 case OHCI_INIT_HAVE_IOMAPPING:
3336 iounmap(ohci->registers);
3337
3338 case OHCI_INIT_HAVE_MEM_REGION:
3339 release_mem_region(pci_resource_start(dev, 0),
3340 OHCI1394_REGISTER_SIZE);
3341
3342 case OHCI_INIT_ALLOC_HOST:
3343 pci_set_drvdata(dev, NULL);
3344 }
3345
3346 if (device)
3347 put_device(device);
3348 out:
3349 ohci1394_pmac_off(dev);
3350 }
3351
3352 #ifdef CONFIG_PM
3353 static int ohci1394_pci_suspend(struct pci_dev *dev, pm_message_t state)
3354 {
3355 int err;
3356 struct ti_ohci *ohci = pci_get_drvdata(dev);
3357
3358 if (!ohci) {
3359 printk(KERN_ERR "%s: tried to suspend nonexisting host\n",
3360 OHCI1394_DRIVER_NAME);
3361 return -ENXIO;
3362 }
3363 DBGMSG("suspend called");
3364
3365 /* Clear the async DMA contexts and stop using the controller */
3366 hpsb_bus_reset(ohci->host);
3367
3368 /* See ohci1394_pci_remove() for comments on this sequence */
3369 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3370 reg_write(ohci, OHCI1394_BusOptions,
3371 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3372 0x00ff0000);
3373 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3374 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3375 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3376 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3377 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3378 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3379 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3380 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3381 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3382 ohci_soft_reset(ohci);
3383
3384 err = pci_save_state(dev);
3385 if (err) {
3386 PRINT(KERN_ERR, "pci_save_state failed with %d", err);
3387 return err;
3388 }
3389 err = pci_set_power_state(dev, pci_choose_state(dev, state));
3390 if (err)
3391 DBGMSG("pci_set_power_state failed with %d", err);
3392 ohci1394_pmac_off(dev);
3393
3394 return 0;
3395 }
3396
3397 static int ohci1394_pci_resume(struct pci_dev *dev)
3398 {
3399 int err;
3400 struct ti_ohci *ohci = pci_get_drvdata(dev);
3401
3402 if (!ohci) {
3403 printk(KERN_ERR "%s: tried to resume nonexisting host\n",
3404 OHCI1394_DRIVER_NAME);
3405 return -ENXIO;
3406 }
3407 DBGMSG("resume called");
3408
3409 ohci1394_pmac_on(dev);
3410 pci_set_power_state(dev, PCI_D0);
3411 pci_restore_state(dev);
3412 err = pci_enable_device(dev);
3413 if (err) {
3414 PRINT(KERN_ERR, "pci_enable_device failed with %d", err);
3415 return err;
3416 }
3417
3418 /* See ohci1394_pci_probe() for comments on this sequence */
3419 ohci_soft_reset(ohci);
3420 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3421 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3422 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3423 mdelay(50);
3424 ohci_initialize(ohci);
3425
3426 hpsb_resume_host(ohci->host);
3427 return 0;
3428 }
3429 #endif /* CONFIG_PM */
3430
3431 static struct pci_device_id ohci1394_pci_tbl[] = {
3432 {
3433 .class = PCI_CLASS_SERIAL_FIREWIRE_OHCI,
3434 .class_mask = PCI_ANY_ID,
3435 .vendor = PCI_ANY_ID,
3436 .device = PCI_ANY_ID,
3437 .subvendor = PCI_ANY_ID,
3438 .subdevice = PCI_ANY_ID,
3439 },
3440 { 0, },
3441 };
3442
3443 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3444
3445 static struct pci_driver ohci1394_pci_driver = {
3446 .name = OHCI1394_DRIVER_NAME,
3447 .id_table = ohci1394_pci_tbl,
3448 .probe = ohci1394_pci_probe,
3449 .remove = ohci1394_pci_remove,
3450 #ifdef CONFIG_PM
3451 .resume = ohci1394_pci_resume,
3452 .suspend = ohci1394_pci_suspend,
3453 #endif
3454 };
3455
3456 /***********************************
3457 * OHCI1394 Video Interface *
3458 ***********************************/
3459
3460 /* essentially the only purpose of this code is to allow another
3461 module to hook into ohci's interrupt handler */
3462
3463 /* returns zero if successful, one if DMA context is locked up */
3464 int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3465 {
3466 int i=0;
3467
3468 /* stop the channel program if it's still running */
3469 reg_write(ohci, reg, 0x8000);
3470
3471 /* Wait until it effectively stops */
3472 while (reg_read(ohci, reg) & 0x400) {
3473 i++;
3474 if (i>5000) {
3475 PRINT(KERN_ERR,
3476 "Runaway loop while stopping context: %s...", msg ? msg : "");
3477 return 1;
3478 }
3479
3480 mb();
3481 udelay(10);
3482 }
3483 if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3484 return 0;
3485 }
3486
3487 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3488 void (*func)(unsigned long), unsigned long data)
3489 {
3490 tasklet_init(&tasklet->tasklet, func, data);
3491 tasklet->type = type;
3492 /* We init the tasklet->link field, so we can list_del() it
3493 * without worrying whether it was added to the list or not. */
3494 INIT_LIST_HEAD(&tasklet->link);
3495 }
3496
3497 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3498 struct ohci1394_iso_tasklet *tasklet)
3499 {
3500 unsigned long flags, *usage;
3501 int n, i, r = -EBUSY;
3502
3503 if (tasklet->type == OHCI_ISO_TRANSMIT) {
3504 n = ohci->nb_iso_xmit_ctx;
3505 usage = &ohci->it_ctx_usage;
3506 }
3507 else {
3508 n = ohci->nb_iso_rcv_ctx;
3509 usage = &ohci->ir_ctx_usage;
3510
3511 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3512 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3513 if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3514 return r;
3515 }
3516 }
3517 }
3518
3519 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3520
3521 for (i = 0; i < n; i++)
3522 if (!test_and_set_bit(i, usage)) {
3523 tasklet->context = i;
3524 list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3525 r = 0;
3526 break;
3527 }
3528
3529 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3530
3531 return r;
3532 }
3533
3534 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3535 struct ohci1394_iso_tasklet *tasklet)
3536 {
3537 unsigned long flags;
3538
3539 tasklet_kill(&tasklet->tasklet);
3540
3541 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3542
3543 if (tasklet->type == OHCI_ISO_TRANSMIT)
3544 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3545 else {
3546 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3547
3548 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3549 clear_bit(0, &ohci->ir_multichannel_used);
3550 }
3551 }
3552
3553 list_del(&tasklet->link);
3554
3555 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3556 }
3557
3558 EXPORT_SYMBOL(ohci1394_stop_context);
3559 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3560 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3561 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3562
3563 /***********************************
3564 * General module initialization *
3565 ***********************************/
3566
3567 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3568 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3569 MODULE_LICENSE("GPL");
3570
3571 static void __exit ohci1394_cleanup (void)
3572 {
3573 pci_unregister_driver(&ohci1394_pci_driver);
3574 }
3575
3576 static int __init ohci1394_init(void)
3577 {
3578 return pci_register_driver(&ohci1394_pci_driver);
3579 }
3580
3581 module_init(ohci1394_init);
3582 module_exit(ohci1394_cleanup);
This page took 0.110773 seconds and 6 git commands to generate.