2 * Driver for OHCI 1394 controllers
4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/compiler.h>
22 #include <linux/delay.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/gfp.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/spinlock.h>
34 #include <asm/system.h>
37 #include "fw-transaction.h"
39 #define DESCRIPTOR_OUTPUT_MORE 0
40 #define DESCRIPTOR_OUTPUT_LAST (1 << 12)
41 #define DESCRIPTOR_INPUT_MORE (2 << 12)
42 #define DESCRIPTOR_INPUT_LAST (3 << 12)
43 #define DESCRIPTOR_STATUS (1 << 11)
44 #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
45 #define DESCRIPTOR_PING (1 << 7)
46 #define DESCRIPTOR_YY (1 << 6)
47 #define DESCRIPTOR_NO_IRQ (0 << 4)
48 #define DESCRIPTOR_IRQ_ERROR (1 << 4)
49 #define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
50 #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
51 #define DESCRIPTOR_WAIT (3 << 0)
57 __le32 branch_address
;
59 __le16 transfer_status
;
60 } __attribute__((aligned(16)));
62 struct db_descriptor
{
65 __le16 second_req_count
;
66 __le16 first_req_count
;
67 __le32 branch_address
;
68 __le16 second_res_count
;
69 __le16 first_res_count
;
74 } __attribute__((aligned(16)));
76 #define CONTROL_SET(regs) (regs)
77 #define CONTROL_CLEAR(regs) ((regs) + 4)
78 #define COMMAND_PTR(regs) ((regs) + 12)
79 #define CONTEXT_MATCH(regs) ((regs) + 16)
82 struct descriptor descriptor
;
83 struct ar_buffer
*next
;
89 struct ar_buffer
*current_buffer
;
90 struct ar_buffer
*last_buffer
;
93 struct tasklet_struct tasklet
;
98 typedef int (*descriptor_callback_t
)(struct context
*ctx
,
100 struct descriptor
*last
);
102 struct fw_ohci
*ohci
;
105 struct descriptor
*buffer
;
106 dma_addr_t buffer_bus
;
108 struct descriptor
*head_descriptor
;
109 struct descriptor
*tail_descriptor
;
110 struct descriptor
*tail_descriptor_last
;
111 struct descriptor
*prev_descriptor
;
113 descriptor_callback_t callback
;
115 struct tasklet_struct tasklet
;
118 #define IT_HEADER_SY(v) ((v) << 0)
119 #define IT_HEADER_TCODE(v) ((v) << 4)
120 #define IT_HEADER_CHANNEL(v) ((v) << 8)
121 #define IT_HEADER_TAG(v) ((v) << 14)
122 #define IT_HEADER_SPEED(v) ((v) << 16)
123 #define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
126 struct fw_iso_context base
;
127 struct context context
;
129 size_t header_length
;
132 #define CONFIG_ROM_SIZE 1024
138 __iomem
char *registers
;
139 dma_addr_t self_id_bus
;
141 struct tasklet_struct bus_reset_tasklet
;
144 int request_generation
;
148 * Spinlock for accessing fw_ohci data. Never call out of
149 * this driver with this lock held.
152 u32 self_id_buffer
[512];
154 /* Config rom buffers */
156 dma_addr_t config_rom_bus
;
157 __be32
*next_config_rom
;
158 dma_addr_t next_config_rom_bus
;
161 struct ar_context ar_request_ctx
;
162 struct ar_context ar_response_ctx
;
163 struct context at_request_ctx
;
164 struct context at_response_ctx
;
167 struct iso_context
*it_context_list
;
169 struct iso_context
*ir_context_list
;
172 static inline struct fw_ohci
*fw_ohci(struct fw_card
*card
)
174 return container_of(card
, struct fw_ohci
, card
);
177 #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
178 #define IR_CONTEXT_BUFFER_FILL 0x80000000
179 #define IR_CONTEXT_ISOCH_HEADER 0x40000000
180 #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
181 #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
182 #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
184 #define CONTEXT_RUN 0x8000
185 #define CONTEXT_WAKE 0x1000
186 #define CONTEXT_DEAD 0x0800
187 #define CONTEXT_ACTIVE 0x0400
189 #define OHCI1394_MAX_AT_REQ_RETRIES 0x2
190 #define OHCI1394_MAX_AT_RESP_RETRIES 0x2
191 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
193 #define FW_OHCI_MAJOR 240
194 #define OHCI1394_REGISTER_SIZE 0x800
195 #define OHCI_LOOP_COUNT 500
196 #define OHCI1394_PCI_HCI_Control 0x40
197 #define SELF_ID_BUF_SIZE 0x800
198 #define OHCI_TCODE_PHY_PACKET 0x0e
199 #define OHCI_VERSION_1_1 0x010010
200 #define ISO_BUFFER_SIZE (64 * 1024)
201 #define AT_BUFFER_SIZE 4096
203 static char ohci_driver_name
[] = KBUILD_MODNAME
;
205 static inline void reg_write(const struct fw_ohci
*ohci
, int offset
, u32 data
)
207 writel(data
, ohci
->registers
+ offset
);
210 static inline u32
reg_read(const struct fw_ohci
*ohci
, int offset
)
212 return readl(ohci
->registers
+ offset
);
215 static inline void flush_writes(const struct fw_ohci
*ohci
)
217 /* Do a dummy read to flush writes. */
218 reg_read(ohci
, OHCI1394_Version
);
222 ohci_update_phy_reg(struct fw_card
*card
, int addr
,
223 int clear_bits
, int set_bits
)
225 struct fw_ohci
*ohci
= fw_ohci(card
);
228 reg_write(ohci
, OHCI1394_PhyControl
, OHCI1394_PhyControl_Read(addr
));
231 val
= reg_read(ohci
, OHCI1394_PhyControl
);
232 if ((val
& OHCI1394_PhyControl_ReadDone
) == 0) {
233 fw_error("failed to set phy reg bits.\n");
237 old
= OHCI1394_PhyControl_ReadData(val
);
238 old
= (old
& ~clear_bits
) | set_bits
;
239 reg_write(ohci
, OHCI1394_PhyControl
,
240 OHCI1394_PhyControl_Write(addr
, old
));
245 static int ar_context_add_page(struct ar_context
*ctx
)
247 struct device
*dev
= ctx
->ohci
->card
.device
;
248 struct ar_buffer
*ab
;
252 ab
= (struct ar_buffer
*) __get_free_page(GFP_ATOMIC
);
256 ab_bus
= dma_map_single(dev
, ab
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
257 if (dma_mapping_error(ab_bus
)) {
258 free_page((unsigned long) ab
);
262 memset(&ab
->descriptor
, 0, sizeof(ab
->descriptor
));
263 ab
->descriptor
.control
= cpu_to_le16(DESCRIPTOR_INPUT_MORE
|
265 DESCRIPTOR_BRANCH_ALWAYS
);
266 offset
= offsetof(struct ar_buffer
, data
);
267 ab
->descriptor
.req_count
= cpu_to_le16(PAGE_SIZE
- offset
);
268 ab
->descriptor
.data_address
= cpu_to_le32(ab_bus
+ offset
);
269 ab
->descriptor
.res_count
= cpu_to_le16(PAGE_SIZE
- offset
);
270 ab
->descriptor
.branch_address
= 0;
272 dma_sync_single_for_device(dev
, ab_bus
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
274 ctx
->last_buffer
->descriptor
.branch_address
= cpu_to_le32(ab_bus
| 1);
275 ctx
->last_buffer
->next
= ab
;
276 ctx
->last_buffer
= ab
;
278 reg_write(ctx
->ohci
, CONTROL_SET(ctx
->regs
), CONTEXT_WAKE
);
279 flush_writes(ctx
->ohci
);
284 static __le32
*handle_ar_packet(struct ar_context
*ctx
, __le32
*buffer
)
286 struct fw_ohci
*ohci
= ctx
->ohci
;
288 u32 status
, length
, tcode
;
290 p
.header
[0] = le32_to_cpu(buffer
[0]);
291 p
.header
[1] = le32_to_cpu(buffer
[1]);
292 p
.header
[2] = le32_to_cpu(buffer
[2]);
294 tcode
= (p
.header
[0] >> 4) & 0x0f;
296 case TCODE_WRITE_QUADLET_REQUEST
:
297 case TCODE_READ_QUADLET_RESPONSE
:
298 p
.header
[3] = (__force __u32
) buffer
[3];
299 p
.header_length
= 16;
300 p
.payload_length
= 0;
303 case TCODE_READ_BLOCK_REQUEST
:
304 p
.header
[3] = le32_to_cpu(buffer
[3]);
305 p
.header_length
= 16;
306 p
.payload_length
= 0;
309 case TCODE_WRITE_BLOCK_REQUEST
:
310 case TCODE_READ_BLOCK_RESPONSE
:
311 case TCODE_LOCK_REQUEST
:
312 case TCODE_LOCK_RESPONSE
:
313 p
.header
[3] = le32_to_cpu(buffer
[3]);
314 p
.header_length
= 16;
315 p
.payload_length
= p
.header
[3] >> 16;
318 case TCODE_WRITE_RESPONSE
:
319 case TCODE_READ_QUADLET_REQUEST
:
320 case OHCI_TCODE_PHY_PACKET
:
321 p
.header_length
= 12;
322 p
.payload_length
= 0;
326 p
.payload
= (void *) buffer
+ p
.header_length
;
328 /* FIXME: What to do about evt_* errors? */
329 length
= (p
.header_length
+ p
.payload_length
+ 3) / 4;
330 status
= le32_to_cpu(buffer
[length
]);
332 p
.ack
= ((status
>> 16) & 0x1f) - 16;
333 p
.speed
= (status
>> 21) & 0x7;
334 p
.timestamp
= status
& 0xffff;
335 p
.generation
= ohci
->request_generation
;
338 * The OHCI bus reset handler synthesizes a phy packet with
339 * the new generation number when a bus reset happens (see
340 * section 8.4.2.3). This helps us determine when a request
341 * was received and make sure we send the response in the same
342 * generation. We only need this for requests; for responses
343 * we use the unique tlabel for finding the matching
347 if (p
.ack
+ 16 == 0x09)
348 ohci
->request_generation
= (buffer
[2] >> 16) & 0xff;
349 else if (ctx
== &ohci
->ar_request_ctx
)
350 fw_core_handle_request(&ohci
->card
, &p
);
352 fw_core_handle_response(&ohci
->card
, &p
);
354 return buffer
+ length
+ 1;
357 static void ar_context_tasklet(unsigned long data
)
359 struct ar_context
*ctx
= (struct ar_context
*)data
;
360 struct fw_ohci
*ohci
= ctx
->ohci
;
361 struct ar_buffer
*ab
;
362 struct descriptor
*d
;
365 ab
= ctx
->current_buffer
;
368 if (d
->res_count
== 0) {
369 size_t size
, rest
, offset
;
372 * This descriptor is finished and we may have a
373 * packet split across this and the next buffer. We
374 * reuse the page for reassembling the split packet.
377 offset
= offsetof(struct ar_buffer
, data
);
378 dma_unmap_single(ohci
->card
.device
,
379 le32_to_cpu(ab
->descriptor
.data_address
) - offset
,
380 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
385 size
= buffer
+ PAGE_SIZE
- ctx
->pointer
;
386 rest
= le16_to_cpu(d
->req_count
) - le16_to_cpu(d
->res_count
);
387 memmove(buffer
, ctx
->pointer
, size
);
388 memcpy(buffer
+ size
, ab
->data
, rest
);
389 ctx
->current_buffer
= ab
;
390 ctx
->pointer
= (void *) ab
->data
+ rest
;
391 end
= buffer
+ size
+ rest
;
394 buffer
= handle_ar_packet(ctx
, buffer
);
396 free_page((unsigned long)buffer
);
397 ar_context_add_page(ctx
);
399 buffer
= ctx
->pointer
;
401 (void *) ab
+ PAGE_SIZE
- le16_to_cpu(d
->res_count
);
404 buffer
= handle_ar_packet(ctx
, buffer
);
409 ar_context_init(struct ar_context
*ctx
, struct fw_ohci
*ohci
, u32 regs
)
415 ctx
->last_buffer
= &ab
;
416 tasklet_init(&ctx
->tasklet
, ar_context_tasklet
, (unsigned long)ctx
);
418 ar_context_add_page(ctx
);
419 ar_context_add_page(ctx
);
420 ctx
->current_buffer
= ab
.next
;
421 ctx
->pointer
= ctx
->current_buffer
->data
;
426 static void ar_context_run(struct ar_context
*ctx
)
428 struct ar_buffer
*ab
= ctx
->current_buffer
;
432 offset
= offsetof(struct ar_buffer
, data
);
433 ab_bus
= le32_to_cpu(ab
->descriptor
.data_address
) - offset
;
435 reg_write(ctx
->ohci
, COMMAND_PTR(ctx
->regs
), ab_bus
| 1);
436 reg_write(ctx
->ohci
, CONTROL_SET(ctx
->regs
), CONTEXT_RUN
);
437 flush_writes(ctx
->ohci
);
440 static void context_tasklet(unsigned long data
)
442 struct context
*ctx
= (struct context
*) data
;
443 struct fw_ohci
*ohci
= ctx
->ohci
;
444 struct descriptor
*d
, *last
;
448 dma_sync_single_for_cpu(ohci
->card
.device
, ctx
->buffer_bus
,
449 ctx
->buffer_size
, DMA_TO_DEVICE
);
451 d
= ctx
->tail_descriptor
;
452 last
= ctx
->tail_descriptor_last
;
454 while (last
->branch_address
!= 0) {
455 address
= le32_to_cpu(last
->branch_address
);
457 d
= ctx
->buffer
+ (address
- ctx
->buffer_bus
) / sizeof(*d
);
458 last
= (z
== 2) ? d
: d
+ z
- 1;
460 if (!ctx
->callback(ctx
, d
, last
))
463 ctx
->tail_descriptor
= d
;
464 ctx
->tail_descriptor_last
= last
;
469 context_init(struct context
*ctx
, struct fw_ohci
*ohci
,
470 size_t buffer_size
, u32 regs
,
471 descriptor_callback_t callback
)
475 ctx
->buffer_size
= buffer_size
;
476 ctx
->buffer
= kmalloc(buffer_size
, GFP_KERNEL
);
477 if (ctx
->buffer
== NULL
)
480 tasklet_init(&ctx
->tasklet
, context_tasklet
, (unsigned long)ctx
);
481 ctx
->callback
= callback
;
484 dma_map_single(ohci
->card
.device
, ctx
->buffer
,
485 buffer_size
, DMA_TO_DEVICE
);
486 if (dma_mapping_error(ctx
->buffer_bus
)) {
491 ctx
->head_descriptor
= ctx
->buffer
;
492 ctx
->prev_descriptor
= ctx
->buffer
;
493 ctx
->tail_descriptor
= ctx
->buffer
;
494 ctx
->tail_descriptor_last
= ctx
->buffer
;
497 * We put a dummy descriptor in the buffer that has a NULL
498 * branch address and looks like it's been sent. That way we
499 * have a descriptor to append DMA programs to. Also, the
500 * ring buffer invariant is that it always has at least one
501 * element so that head == tail means buffer full.
504 memset(ctx
->head_descriptor
, 0, sizeof(*ctx
->head_descriptor
));
505 ctx
->head_descriptor
->control
= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST
);
506 ctx
->head_descriptor
->transfer_status
= cpu_to_le16(0x8011);
507 ctx
->head_descriptor
++;
513 context_release(struct context
*ctx
)
515 struct fw_card
*card
= &ctx
->ohci
->card
;
517 dma_unmap_single(card
->device
, ctx
->buffer_bus
,
518 ctx
->buffer_size
, DMA_TO_DEVICE
);
522 static struct descriptor
*
523 context_get_descriptors(struct context
*ctx
, int z
, dma_addr_t
*d_bus
)
525 struct descriptor
*d
, *tail
, *end
;
527 d
= ctx
->head_descriptor
;
528 tail
= ctx
->tail_descriptor
;
529 end
= ctx
->buffer
+ ctx
->buffer_size
/ sizeof(*d
);
533 } else if (d
> tail
&& d
+ z
<= end
) {
535 } else if (d
> tail
&& ctx
->buffer
+ z
<= tail
) {
543 memset(d
, 0, z
* sizeof(*d
));
544 *d_bus
= ctx
->buffer_bus
+ (d
- ctx
->buffer
) * sizeof(*d
);
549 static void context_run(struct context
*ctx
, u32 extra
)
551 struct fw_ohci
*ohci
= ctx
->ohci
;
553 reg_write(ohci
, COMMAND_PTR(ctx
->regs
),
554 le32_to_cpu(ctx
->tail_descriptor_last
->branch_address
));
555 reg_write(ohci
, CONTROL_CLEAR(ctx
->regs
), ~0);
556 reg_write(ohci
, CONTROL_SET(ctx
->regs
), CONTEXT_RUN
| extra
);
560 static void context_append(struct context
*ctx
,
561 struct descriptor
*d
, int z
, int extra
)
565 d_bus
= ctx
->buffer_bus
+ (d
- ctx
->buffer
) * sizeof(*d
);
567 ctx
->head_descriptor
= d
+ z
+ extra
;
568 ctx
->prev_descriptor
->branch_address
= cpu_to_le32(d_bus
| z
);
569 ctx
->prev_descriptor
= z
== 2 ? d
: d
+ z
- 1;
571 dma_sync_single_for_device(ctx
->ohci
->card
.device
, ctx
->buffer_bus
,
572 ctx
->buffer_size
, DMA_TO_DEVICE
);
574 reg_write(ctx
->ohci
, CONTROL_SET(ctx
->regs
), CONTEXT_WAKE
);
575 flush_writes(ctx
->ohci
);
578 static void context_stop(struct context
*ctx
)
583 reg_write(ctx
->ohci
, CONTROL_CLEAR(ctx
->regs
), CONTEXT_RUN
);
584 flush_writes(ctx
->ohci
);
586 for (i
= 0; i
< 10; i
++) {
587 reg
= reg_read(ctx
->ohci
, CONTROL_SET(ctx
->regs
));
588 if ((reg
& CONTEXT_ACTIVE
) == 0)
591 fw_notify("context_stop: still active (0x%08x)\n", reg
);
597 struct fw_packet
*packet
;
601 * This function apppends a packet to the DMA queue for transmission.
602 * Must always be called with the ochi->lock held to ensure proper
603 * generation handling and locking around packet queue manipulation.
606 at_context_queue_packet(struct context
*ctx
, struct fw_packet
*packet
)
608 struct fw_ohci
*ohci
= ctx
->ohci
;
609 dma_addr_t d_bus
, uninitialized_var(payload_bus
);
610 struct driver_data
*driver_data
;
611 struct descriptor
*d
, *last
;
616 d
= context_get_descriptors(ctx
, 4, &d_bus
);
618 packet
->ack
= RCODE_SEND_ERROR
;
622 d
[0].control
= cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE
);
623 d
[0].res_count
= cpu_to_le16(packet
->timestamp
);
626 * The DMA format for asyncronous link packets is different
627 * from the IEEE1394 layout, so shift the fields around
628 * accordingly. If header_length is 8, it's a PHY packet, to
629 * which we need to prepend an extra quadlet.
632 header
= (__le32
*) &d
[1];
633 if (packet
->header_length
> 8) {
634 header
[0] = cpu_to_le32((packet
->header
[0] & 0xffff) |
635 (packet
->speed
<< 16));
636 header
[1] = cpu_to_le32((packet
->header
[1] & 0xffff) |
637 (packet
->header
[0] & 0xffff0000));
638 header
[2] = cpu_to_le32(packet
->header
[2]);
640 tcode
= (packet
->header
[0] >> 4) & 0x0f;
641 if (TCODE_IS_BLOCK_PACKET(tcode
))
642 header
[3] = cpu_to_le32(packet
->header
[3]);
644 header
[3] = (__force __le32
) packet
->header
[3];
646 d
[0].req_count
= cpu_to_le16(packet
->header_length
);
648 header
[0] = cpu_to_le32((OHCI1394_phy_tcode
<< 4) |
649 (packet
->speed
<< 16));
650 header
[1] = cpu_to_le32(packet
->header
[0]);
651 header
[2] = cpu_to_le32(packet
->header
[1]);
652 d
[0].req_count
= cpu_to_le16(12);
655 driver_data
= (struct driver_data
*) &d
[3];
656 driver_data
->packet
= packet
;
657 packet
->driver_data
= driver_data
;
659 if (packet
->payload_length
> 0) {
661 dma_map_single(ohci
->card
.device
, packet
->payload
,
662 packet
->payload_length
, DMA_TO_DEVICE
);
663 if (dma_mapping_error(payload_bus
)) {
664 packet
->ack
= RCODE_SEND_ERROR
;
668 d
[2].req_count
= cpu_to_le16(packet
->payload_length
);
669 d
[2].data_address
= cpu_to_le32(payload_bus
);
677 last
->control
|= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST
|
678 DESCRIPTOR_IRQ_ALWAYS
|
679 DESCRIPTOR_BRANCH_ALWAYS
);
681 /* FIXME: Document how the locking works. */
682 if (ohci
->generation
!= packet
->generation
) {
683 if (packet
->payload_length
> 0)
684 dma_unmap_single(ohci
->card
.device
, payload_bus
,
685 packet
->payload_length
, DMA_TO_DEVICE
);
686 packet
->ack
= RCODE_GENERATION
;
690 context_append(ctx
, d
, z
, 4 - z
);
692 /* If the context isn't already running, start it up. */
693 reg
= reg_read(ctx
->ohci
, CONTROL_SET(ctx
->regs
));
694 if ((reg
& CONTEXT_RUN
) == 0)
700 static int handle_at_packet(struct context
*context
,
701 struct descriptor
*d
,
702 struct descriptor
*last
)
704 struct driver_data
*driver_data
;
705 struct fw_packet
*packet
;
706 struct fw_ohci
*ohci
= context
->ohci
;
707 dma_addr_t payload_bus
;
710 if (last
->transfer_status
== 0)
711 /* This descriptor isn't done yet, stop iteration. */
714 driver_data
= (struct driver_data
*) &d
[3];
715 packet
= driver_data
->packet
;
717 /* This packet was cancelled, just continue. */
720 payload_bus
= le32_to_cpu(last
->data_address
);
721 if (payload_bus
!= 0)
722 dma_unmap_single(ohci
->card
.device
, payload_bus
,
723 packet
->payload_length
, DMA_TO_DEVICE
);
725 evt
= le16_to_cpu(last
->transfer_status
) & 0x1f;
726 packet
->timestamp
= le16_to_cpu(last
->res_count
);
729 case OHCI1394_evt_timeout
:
730 /* Async response transmit timed out. */
731 packet
->ack
= RCODE_CANCELLED
;
734 case OHCI1394_evt_flushed
:
736 * The packet was flushed should give same error as
737 * when we try to use a stale generation count.
739 packet
->ack
= RCODE_GENERATION
;
742 case OHCI1394_evt_missing_ack
:
744 * Using a valid (current) generation count, but the
745 * node is not on the bus or not sending acks.
747 packet
->ack
= RCODE_NO_ACK
;
750 case ACK_COMPLETE
+ 0x10:
751 case ACK_PENDING
+ 0x10:
752 case ACK_BUSY_X
+ 0x10:
753 case ACK_BUSY_A
+ 0x10:
754 case ACK_BUSY_B
+ 0x10:
755 case ACK_DATA_ERROR
+ 0x10:
756 case ACK_TYPE_ERROR
+ 0x10:
757 packet
->ack
= evt
- 0x10;
761 packet
->ack
= RCODE_SEND_ERROR
;
765 packet
->callback(packet
, &ohci
->card
, packet
->ack
);
770 #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
771 #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
772 #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
773 #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
774 #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
777 handle_local_rom(struct fw_ohci
*ohci
, struct fw_packet
*packet
, u32 csr
)
779 struct fw_packet response
;
780 int tcode
, length
, i
;
782 tcode
= HEADER_GET_TCODE(packet
->header
[0]);
783 if (TCODE_IS_BLOCK_PACKET(tcode
))
784 length
= HEADER_GET_DATA_LENGTH(packet
->header
[3]);
788 i
= csr
- CSR_CONFIG_ROM
;
789 if (i
+ length
> CONFIG_ROM_SIZE
) {
790 fw_fill_response(&response
, packet
->header
,
791 RCODE_ADDRESS_ERROR
, NULL
, 0);
792 } else if (!TCODE_IS_READ_REQUEST(tcode
)) {
793 fw_fill_response(&response
, packet
->header
,
794 RCODE_TYPE_ERROR
, NULL
, 0);
796 fw_fill_response(&response
, packet
->header
, RCODE_COMPLETE
,
797 (void *) ohci
->config_rom
+ i
, length
);
800 fw_core_handle_response(&ohci
->card
, &response
);
804 handle_local_lock(struct fw_ohci
*ohci
, struct fw_packet
*packet
, u32 csr
)
806 struct fw_packet response
;
807 int tcode
, length
, ext_tcode
, sel
;
808 __be32
*payload
, lock_old
;
809 u32 lock_arg
, lock_data
;
811 tcode
= HEADER_GET_TCODE(packet
->header
[0]);
812 length
= HEADER_GET_DATA_LENGTH(packet
->header
[3]);
813 payload
= packet
->payload
;
814 ext_tcode
= HEADER_GET_EXTENDED_TCODE(packet
->header
[3]);
816 if (tcode
== TCODE_LOCK_REQUEST
&&
817 ext_tcode
== EXTCODE_COMPARE_SWAP
&& length
== 8) {
818 lock_arg
= be32_to_cpu(payload
[0]);
819 lock_data
= be32_to_cpu(payload
[1]);
820 } else if (tcode
== TCODE_READ_QUADLET_REQUEST
) {
824 fw_fill_response(&response
, packet
->header
,
825 RCODE_TYPE_ERROR
, NULL
, 0);
829 sel
= (csr
- CSR_BUS_MANAGER_ID
) / 4;
830 reg_write(ohci
, OHCI1394_CSRData
, lock_data
);
831 reg_write(ohci
, OHCI1394_CSRCompareData
, lock_arg
);
832 reg_write(ohci
, OHCI1394_CSRControl
, sel
);
834 if (reg_read(ohci
, OHCI1394_CSRControl
) & 0x80000000)
835 lock_old
= cpu_to_be32(reg_read(ohci
, OHCI1394_CSRData
));
837 fw_notify("swap not done yet\n");
839 fw_fill_response(&response
, packet
->header
,
840 RCODE_COMPLETE
, &lock_old
, sizeof(lock_old
));
842 fw_core_handle_response(&ohci
->card
, &response
);
846 handle_local_request(struct context
*ctx
, struct fw_packet
*packet
)
851 if (ctx
== &ctx
->ohci
->at_request_ctx
) {
852 packet
->ack
= ACK_PENDING
;
853 packet
->callback(packet
, &ctx
->ohci
->card
, packet
->ack
);
857 ((unsigned long long)
858 HEADER_GET_OFFSET_HIGH(packet
->header
[1]) << 32) |
860 csr
= offset
- CSR_REGISTER_BASE
;
862 /* Handle config rom reads. */
863 if (csr
>= CSR_CONFIG_ROM
&& csr
< CSR_CONFIG_ROM_END
)
864 handle_local_rom(ctx
->ohci
, packet
, csr
);
866 case CSR_BUS_MANAGER_ID
:
867 case CSR_BANDWIDTH_AVAILABLE
:
868 case CSR_CHANNELS_AVAILABLE_HI
:
869 case CSR_CHANNELS_AVAILABLE_LO
:
870 handle_local_lock(ctx
->ohci
, packet
, csr
);
873 if (ctx
== &ctx
->ohci
->at_request_ctx
)
874 fw_core_handle_request(&ctx
->ohci
->card
, packet
);
876 fw_core_handle_response(&ctx
->ohci
->card
, packet
);
880 if (ctx
== &ctx
->ohci
->at_response_ctx
) {
881 packet
->ack
= ACK_COMPLETE
;
882 packet
->callback(packet
, &ctx
->ohci
->card
, packet
->ack
);
887 at_context_transmit(struct context
*ctx
, struct fw_packet
*packet
)
892 spin_lock_irqsave(&ctx
->ohci
->lock
, flags
);
894 if (HEADER_GET_DESTINATION(packet
->header
[0]) == ctx
->ohci
->node_id
&&
895 ctx
->ohci
->generation
== packet
->generation
) {
896 spin_unlock_irqrestore(&ctx
->ohci
->lock
, flags
);
897 handle_local_request(ctx
, packet
);
901 retval
= at_context_queue_packet(ctx
, packet
);
902 spin_unlock_irqrestore(&ctx
->ohci
->lock
, flags
);
905 packet
->callback(packet
, &ctx
->ohci
->card
, packet
->ack
);
909 static void bus_reset_tasklet(unsigned long data
)
911 struct fw_ohci
*ohci
= (struct fw_ohci
*)data
;
912 int self_id_count
, i
, j
, reg
;
913 int generation
, new_generation
;
915 void *free_rom
= NULL
;
916 dma_addr_t free_rom_bus
= 0;
918 reg
= reg_read(ohci
, OHCI1394_NodeID
);
919 if (!(reg
& OHCI1394_NodeID_idValid
)) {
920 fw_notify("node ID not valid, new bus reset in progress\n");
923 if ((reg
& OHCI1394_NodeID_nodeNumber
) == 63) {
924 fw_notify("malconfigured bus\n");
927 ohci
->node_id
= reg
& (OHCI1394_NodeID_busNumber
|
928 OHCI1394_NodeID_nodeNumber
);
931 * The count in the SelfIDCount register is the number of
932 * bytes in the self ID receive buffer. Since we also receive
933 * the inverted quadlets and a header quadlet, we shift one
934 * bit extra to get the actual number of self IDs.
937 self_id_count
= (reg_read(ohci
, OHCI1394_SelfIDCount
) >> 3) & 0x3ff;
938 generation
= (le32_to_cpu(ohci
->self_id_cpu
[0]) >> 16) & 0xff;
941 for (i
= 1, j
= 0; j
< self_id_count
; i
+= 2, j
++) {
942 if (ohci
->self_id_cpu
[i
] != ~ohci
->self_id_cpu
[i
+ 1])
943 fw_error("inconsistent self IDs\n");
944 ohci
->self_id_buffer
[j
] = le32_to_cpu(ohci
->self_id_cpu
[i
]);
949 * Check the consistency of the self IDs we just read. The
950 * problem we face is that a new bus reset can start while we
951 * read out the self IDs from the DMA buffer. If this happens,
952 * the DMA buffer will be overwritten with new self IDs and we
953 * will read out inconsistent data. The OHCI specification
954 * (section 11.2) recommends a technique similar to
955 * linux/seqlock.h, where we remember the generation of the
956 * self IDs in the buffer before reading them out and compare
957 * it to the current generation after reading them out. If
958 * the two generations match we know we have a consistent set
962 new_generation
= (reg_read(ohci
, OHCI1394_SelfIDCount
) >> 16) & 0xff;
963 if (new_generation
!= generation
) {
964 fw_notify("recursive bus reset detected, "
965 "discarding self ids\n");
969 /* FIXME: Document how the locking works. */
970 spin_lock_irqsave(&ohci
->lock
, flags
);
972 ohci
->generation
= generation
;
973 context_stop(&ohci
->at_request_ctx
);
974 context_stop(&ohci
->at_response_ctx
);
975 reg_write(ohci
, OHCI1394_IntEventClear
, OHCI1394_busReset
);
978 * This next bit is unrelated to the AT context stuff but we
979 * have to do it under the spinlock also. If a new config rom
980 * was set up before this reset, the old one is now no longer
981 * in use and we can free it. Update the config rom pointers
982 * to point to the current config rom and clear the
983 * next_config_rom pointer so a new udpate can take place.
986 if (ohci
->next_config_rom
!= NULL
) {
987 if (ohci
->next_config_rom
!= ohci
->config_rom
) {
988 free_rom
= ohci
->config_rom
;
989 free_rom_bus
= ohci
->config_rom_bus
;
991 ohci
->config_rom
= ohci
->next_config_rom
;
992 ohci
->config_rom_bus
= ohci
->next_config_rom_bus
;
993 ohci
->next_config_rom
= NULL
;
996 * Restore config_rom image and manually update
997 * config_rom registers. Writing the header quadlet
998 * will indicate that the config rom is ready, so we
1001 reg_write(ohci
, OHCI1394_BusOptions
,
1002 be32_to_cpu(ohci
->config_rom
[2]));
1003 ohci
->config_rom
[0] = cpu_to_be32(ohci
->next_header
);
1004 reg_write(ohci
, OHCI1394_ConfigROMhdr
, ohci
->next_header
);
1007 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1010 dma_free_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
1011 free_rom
, free_rom_bus
);
1013 fw_core_handle_bus_reset(&ohci
->card
, ohci
->node_id
, generation
,
1014 self_id_count
, ohci
->self_id_buffer
);
1017 static irqreturn_t
irq_handler(int irq
, void *data
)
1019 struct fw_ohci
*ohci
= data
;
1020 u32 event
, iso_event
, cycle_time
;
1023 event
= reg_read(ohci
, OHCI1394_IntEventClear
);
1025 if (!event
|| !~event
)
1028 reg_write(ohci
, OHCI1394_IntEventClear
, event
);
1030 if (event
& OHCI1394_selfIDComplete
)
1031 tasklet_schedule(&ohci
->bus_reset_tasklet
);
1033 if (event
& OHCI1394_RQPkt
)
1034 tasklet_schedule(&ohci
->ar_request_ctx
.tasklet
);
1036 if (event
& OHCI1394_RSPkt
)
1037 tasklet_schedule(&ohci
->ar_response_ctx
.tasklet
);
1039 if (event
& OHCI1394_reqTxComplete
)
1040 tasklet_schedule(&ohci
->at_request_ctx
.tasklet
);
1042 if (event
& OHCI1394_respTxComplete
)
1043 tasklet_schedule(&ohci
->at_response_ctx
.tasklet
);
1045 iso_event
= reg_read(ohci
, OHCI1394_IsoRecvIntEventClear
);
1046 reg_write(ohci
, OHCI1394_IsoRecvIntEventClear
, iso_event
);
1049 i
= ffs(iso_event
) - 1;
1050 tasklet_schedule(&ohci
->ir_context_list
[i
].context
.tasklet
);
1051 iso_event
&= ~(1 << i
);
1054 iso_event
= reg_read(ohci
, OHCI1394_IsoXmitIntEventClear
);
1055 reg_write(ohci
, OHCI1394_IsoXmitIntEventClear
, iso_event
);
1058 i
= ffs(iso_event
) - 1;
1059 tasklet_schedule(&ohci
->it_context_list
[i
].context
.tasklet
);
1060 iso_event
&= ~(1 << i
);
1063 if (unlikely(event
& OHCI1394_postedWriteErr
))
1064 fw_error("PCI posted write error\n");
1066 if (event
& OHCI1394_cycle64Seconds
) {
1067 cycle_time
= reg_read(ohci
, OHCI1394_IsochronousCycleTimer
);
1068 if ((cycle_time
& 0x80000000) == 0)
1069 ohci
->bus_seconds
++;
1075 static int software_reset(struct fw_ohci
*ohci
)
1079 reg_write(ohci
, OHCI1394_HCControlSet
, OHCI1394_HCControl_softReset
);
1081 for (i
= 0; i
< OHCI_LOOP_COUNT
; i
++) {
1082 if ((reg_read(ohci
, OHCI1394_HCControlSet
) &
1083 OHCI1394_HCControl_softReset
) == 0)
1091 static int ohci_enable(struct fw_card
*card
, u32
*config_rom
, size_t length
)
1093 struct fw_ohci
*ohci
= fw_ohci(card
);
1094 struct pci_dev
*dev
= to_pci_dev(card
->device
);
1096 if (software_reset(ohci
)) {
1097 fw_error("Failed to reset ohci card.\n");
1102 * Now enable LPS, which we need in order to start accessing
1103 * most of the registers. In fact, on some cards (ALI M5251),
1104 * accessing registers in the SClk domain without LPS enabled
1105 * will lock up the machine. Wait 50msec to make sure we have
1106 * full link enabled.
1108 reg_write(ohci
, OHCI1394_HCControlSet
,
1109 OHCI1394_HCControl_LPS
|
1110 OHCI1394_HCControl_postedWriteEnable
);
1114 reg_write(ohci
, OHCI1394_HCControlClear
,
1115 OHCI1394_HCControl_noByteSwapData
);
1117 reg_write(ohci
, OHCI1394_LinkControlSet
,
1118 OHCI1394_LinkControl_rcvSelfID
|
1119 OHCI1394_LinkControl_cycleTimerEnable
|
1120 OHCI1394_LinkControl_cycleMaster
);
1122 reg_write(ohci
, OHCI1394_ATRetries
,
1123 OHCI1394_MAX_AT_REQ_RETRIES
|
1124 (OHCI1394_MAX_AT_RESP_RETRIES
<< 4) |
1125 (OHCI1394_MAX_PHYS_RESP_RETRIES
<< 8));
1127 ar_context_run(&ohci
->ar_request_ctx
);
1128 ar_context_run(&ohci
->ar_response_ctx
);
1130 reg_write(ohci
, OHCI1394_SelfIDBuffer
, ohci
->self_id_bus
);
1131 reg_write(ohci
, OHCI1394_PhyUpperBound
, 0x00010000);
1132 reg_write(ohci
, OHCI1394_IntEventClear
, ~0);
1133 reg_write(ohci
, OHCI1394_IntMaskClear
, ~0);
1134 reg_write(ohci
, OHCI1394_IntMaskSet
,
1135 OHCI1394_selfIDComplete
|
1136 OHCI1394_RQPkt
| OHCI1394_RSPkt
|
1137 OHCI1394_reqTxComplete
| OHCI1394_respTxComplete
|
1138 OHCI1394_isochRx
| OHCI1394_isochTx
|
1139 OHCI1394_postedWriteErr
| OHCI1394_cycle64Seconds
|
1140 OHCI1394_masterIntEnable
);
1142 /* Activate link_on bit and contender bit in our self ID packets.*/
1143 if (ohci_update_phy_reg(card
, 4, 0,
1144 PHY_LINK_ACTIVE
| PHY_CONTENDER
) < 0)
1148 * When the link is not yet enabled, the atomic config rom
1149 * update mechanism described below in ohci_set_config_rom()
1150 * is not active. We have to update ConfigRomHeader and
1151 * BusOptions manually, and the write to ConfigROMmap takes
1152 * effect immediately. We tie this to the enabling of the
1153 * link, so we have a valid config rom before enabling - the
1154 * OHCI requires that ConfigROMhdr and BusOptions have valid
1155 * values before enabling.
1157 * However, when the ConfigROMmap is written, some controllers
1158 * always read back quadlets 0 and 2 from the config rom to
1159 * the ConfigRomHeader and BusOptions registers on bus reset.
1160 * They shouldn't do that in this initial case where the link
1161 * isn't enabled. This means we have to use the same
1162 * workaround here, setting the bus header to 0 and then write
1163 * the right values in the bus reset tasklet.
1167 ohci
->next_config_rom
=
1168 dma_alloc_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
1169 &ohci
->next_config_rom_bus
,
1171 if (ohci
->next_config_rom
== NULL
)
1174 memset(ohci
->next_config_rom
, 0, CONFIG_ROM_SIZE
);
1175 fw_memcpy_to_be32(ohci
->next_config_rom
, config_rom
, length
* 4);
1178 * In the suspend case, config_rom is NULL, which
1179 * means that we just reuse the old config rom.
1181 ohci
->next_config_rom
= ohci
->config_rom
;
1182 ohci
->next_config_rom_bus
= ohci
->config_rom_bus
;
1185 ohci
->next_header
= be32_to_cpu(ohci
->next_config_rom
[0]);
1186 ohci
->next_config_rom
[0] = 0;
1187 reg_write(ohci
, OHCI1394_ConfigROMhdr
, 0);
1188 reg_write(ohci
, OHCI1394_BusOptions
,
1189 be32_to_cpu(ohci
->next_config_rom
[2]));
1190 reg_write(ohci
, OHCI1394_ConfigROMmap
, ohci
->next_config_rom_bus
);
1192 reg_write(ohci
, OHCI1394_AsReqFilterHiSet
, 0x80000000);
1194 if (request_irq(dev
->irq
, irq_handler
,
1195 IRQF_SHARED
, ohci_driver_name
, ohci
)) {
1196 fw_error("Failed to allocate shared interrupt %d.\n",
1198 dma_free_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
1199 ohci
->config_rom
, ohci
->config_rom_bus
);
1203 reg_write(ohci
, OHCI1394_HCControlSet
,
1204 OHCI1394_HCControl_linkEnable
|
1205 OHCI1394_HCControl_BIBimageValid
);
1209 * We are ready to go, initiate bus reset to finish the
1213 fw_core_initiate_bus_reset(&ohci
->card
, 1);
1219 ohci_set_config_rom(struct fw_card
*card
, u32
*config_rom
, size_t length
)
1221 struct fw_ohci
*ohci
;
1222 unsigned long flags
;
1223 int retval
= -EBUSY
;
1224 __be32
*next_config_rom
;
1225 dma_addr_t next_config_rom_bus
;
1227 ohci
= fw_ohci(card
);
1230 * When the OHCI controller is enabled, the config rom update
1231 * mechanism is a bit tricky, but easy enough to use. See
1232 * section 5.5.6 in the OHCI specification.
1234 * The OHCI controller caches the new config rom address in a
1235 * shadow register (ConfigROMmapNext) and needs a bus reset
1236 * for the changes to take place. When the bus reset is
1237 * detected, the controller loads the new values for the
1238 * ConfigRomHeader and BusOptions registers from the specified
1239 * config rom and loads ConfigROMmap from the ConfigROMmapNext
1240 * shadow register. All automatically and atomically.
1242 * Now, there's a twist to this story. The automatic load of
1243 * ConfigRomHeader and BusOptions doesn't honor the
1244 * noByteSwapData bit, so with a be32 config rom, the
1245 * controller will load be32 values in to these registers
1246 * during the atomic update, even on litte endian
1247 * architectures. The workaround we use is to put a 0 in the
1248 * header quadlet; 0 is endian agnostic and means that the
1249 * config rom isn't ready yet. In the bus reset tasklet we
1250 * then set up the real values for the two registers.
1252 * We use ohci->lock to avoid racing with the code that sets
1253 * ohci->next_config_rom to NULL (see bus_reset_tasklet).
1257 dma_alloc_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
1258 &next_config_rom_bus
, GFP_KERNEL
);
1259 if (next_config_rom
== NULL
)
1262 spin_lock_irqsave(&ohci
->lock
, flags
);
1264 if (ohci
->next_config_rom
== NULL
) {
1265 ohci
->next_config_rom
= next_config_rom
;
1266 ohci
->next_config_rom_bus
= next_config_rom_bus
;
1268 memset(ohci
->next_config_rom
, 0, CONFIG_ROM_SIZE
);
1269 fw_memcpy_to_be32(ohci
->next_config_rom
, config_rom
,
1272 ohci
->next_header
= config_rom
[0];
1273 ohci
->next_config_rom
[0] = 0;
1275 reg_write(ohci
, OHCI1394_ConfigROMmap
,
1276 ohci
->next_config_rom_bus
);
1280 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1283 * Now initiate a bus reset to have the changes take
1284 * effect. We clean up the old config rom memory and DMA
1285 * mappings in the bus reset tasklet, since the OHCI
1286 * controller could need to access it before the bus reset
1290 fw_core_initiate_bus_reset(&ohci
->card
, 1);
1292 dma_free_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
1293 next_config_rom
, next_config_rom_bus
);
1298 static void ohci_send_request(struct fw_card
*card
, struct fw_packet
*packet
)
1300 struct fw_ohci
*ohci
= fw_ohci(card
);
1302 at_context_transmit(&ohci
->at_request_ctx
, packet
);
1305 static void ohci_send_response(struct fw_card
*card
, struct fw_packet
*packet
)
1307 struct fw_ohci
*ohci
= fw_ohci(card
);
1309 at_context_transmit(&ohci
->at_response_ctx
, packet
);
1312 static int ohci_cancel_packet(struct fw_card
*card
, struct fw_packet
*packet
)
1314 struct fw_ohci
*ohci
= fw_ohci(card
);
1315 struct context
*ctx
= &ohci
->at_request_ctx
;
1316 struct driver_data
*driver_data
= packet
->driver_data
;
1317 int retval
= -ENOENT
;
1319 tasklet_disable(&ctx
->tasklet
);
1321 if (packet
->ack
!= 0)
1324 driver_data
->packet
= NULL
;
1325 packet
->ack
= RCODE_CANCELLED
;
1326 packet
->callback(packet
, &ohci
->card
, packet
->ack
);
1330 tasklet_enable(&ctx
->tasklet
);
1336 ohci_enable_phys_dma(struct fw_card
*card
, int node_id
, int generation
)
1338 struct fw_ohci
*ohci
= fw_ohci(card
);
1339 unsigned long flags
;
1343 * FIXME: Make sure this bitmask is cleared when we clear the busReset
1344 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
1347 spin_lock_irqsave(&ohci
->lock
, flags
);
1349 if (ohci
->generation
!= generation
) {
1355 * Note, if the node ID contains a non-local bus ID, physical DMA is
1356 * enabled for _all_ nodes on remote buses.
1359 n
= (node_id
& 0xffc0) == LOCAL_BUS
? node_id
& 0x3f : 63;
1361 reg_write(ohci
, OHCI1394_PhyReqFilterLoSet
, 1 << n
);
1363 reg_write(ohci
, OHCI1394_PhyReqFilterHiSet
, 1 << (n
- 32));
1367 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1372 ohci_get_bus_time(struct fw_card
*card
)
1374 struct fw_ohci
*ohci
= fw_ohci(card
);
1378 cycle_time
= reg_read(ohci
, OHCI1394_IsochronousCycleTimer
);
1379 bus_time
= ((u64
) ohci
->bus_seconds
<< 32) | cycle_time
;
1384 static int handle_ir_dualbuffer_packet(struct context
*context
,
1385 struct descriptor
*d
,
1386 struct descriptor
*last
)
1388 struct iso_context
*ctx
=
1389 container_of(context
, struct iso_context
, context
);
1390 struct db_descriptor
*db
= (struct db_descriptor
*) d
;
1392 size_t header_length
;
1396 if (db
->first_res_count
> 0 && db
->second_res_count
> 0)
1397 /* This descriptor isn't done yet, stop iteration. */
1400 header_length
= le16_to_cpu(db
->first_req_count
) -
1401 le16_to_cpu(db
->first_res_count
);
1403 i
= ctx
->header_length
;
1405 end
= p
+ header_length
;
1406 while (p
< end
&& i
+ ctx
->base
.header_size
<= PAGE_SIZE
) {
1408 * The iso header is byteswapped to little endian by
1409 * the controller, but the remaining header quadlets
1410 * are big endian. We want to present all the headers
1411 * as big endian, so we have to swap the first
1414 *(u32
*) (ctx
->header
+ i
) = __swab32(*(u32
*) (p
+ 4));
1415 memcpy(ctx
->header
+ i
+ 4, p
+ 8, ctx
->base
.header_size
- 4);
1416 i
+= ctx
->base
.header_size
;
1417 p
+= ctx
->base
.header_size
+ 4;
1420 ctx
->header_length
= i
;
1422 if (le16_to_cpu(db
->control
) & DESCRIPTOR_IRQ_ALWAYS
) {
1423 ir_header
= (__le32
*) (db
+ 1);
1424 ctx
->base
.callback(&ctx
->base
,
1425 le32_to_cpu(ir_header
[0]) & 0xffff,
1426 ctx
->header_length
, ctx
->header
,
1427 ctx
->base
.callback_data
);
1428 ctx
->header_length
= 0;
1434 static int handle_it_packet(struct context
*context
,
1435 struct descriptor
*d
,
1436 struct descriptor
*last
)
1438 struct iso_context
*ctx
=
1439 container_of(context
, struct iso_context
, context
);
1441 if (last
->transfer_status
== 0)
1442 /* This descriptor isn't done yet, stop iteration. */
1445 if (le16_to_cpu(last
->control
) & DESCRIPTOR_IRQ_ALWAYS
)
1446 ctx
->base
.callback(&ctx
->base
, le16_to_cpu(last
->res_count
),
1447 0, NULL
, ctx
->base
.callback_data
);
1452 static struct fw_iso_context
*
1453 ohci_allocate_iso_context(struct fw_card
*card
, int type
, size_t header_size
)
1455 struct fw_ohci
*ohci
= fw_ohci(card
);
1456 struct iso_context
*ctx
, *list
;
1457 descriptor_callback_t callback
;
1459 unsigned long flags
;
1460 int index
, retval
= -ENOMEM
;
1462 if (type
== FW_ISO_CONTEXT_TRANSMIT
) {
1463 mask
= &ohci
->it_context_mask
;
1464 list
= ohci
->it_context_list
;
1465 callback
= handle_it_packet
;
1467 mask
= &ohci
->ir_context_mask
;
1468 list
= ohci
->ir_context_list
;
1469 callback
= handle_ir_dualbuffer_packet
;
1472 /* FIXME: We need a fallback for pre 1.1 OHCI. */
1473 if (callback
== handle_ir_dualbuffer_packet
&&
1474 ohci
->version
< OHCI_VERSION_1_1
)
1475 return ERR_PTR(-ENOSYS
);
1477 spin_lock_irqsave(&ohci
->lock
, flags
);
1478 index
= ffs(*mask
) - 1;
1480 *mask
&= ~(1 << index
);
1481 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1484 return ERR_PTR(-EBUSY
);
1486 if (type
== FW_ISO_CONTEXT_TRANSMIT
)
1487 regs
= OHCI1394_IsoXmitContextBase(index
);
1489 regs
= OHCI1394_IsoRcvContextBase(index
);
1492 memset(ctx
, 0, sizeof(*ctx
));
1493 ctx
->header_length
= 0;
1494 ctx
->header
= (void *) __get_free_page(GFP_KERNEL
);
1495 if (ctx
->header
== NULL
)
1498 retval
= context_init(&ctx
->context
, ohci
, ISO_BUFFER_SIZE
,
1501 goto out_with_header
;
1506 free_page((unsigned long)ctx
->header
);
1508 spin_lock_irqsave(&ohci
->lock
, flags
);
1509 *mask
|= 1 << index
;
1510 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1512 return ERR_PTR(retval
);
1515 static int ohci_start_iso(struct fw_iso_context
*base
,
1516 s32 cycle
, u32 sync
, u32 tags
)
1518 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
1519 struct fw_ohci
*ohci
= ctx
->context
.ohci
;
1523 if (ctx
->base
.type
== FW_ISO_CONTEXT_TRANSMIT
) {
1524 index
= ctx
- ohci
->it_context_list
;
1527 match
= IT_CONTEXT_CYCLE_MATCH_ENABLE
|
1528 (cycle
& 0x7fff) << 16;
1530 reg_write(ohci
, OHCI1394_IsoXmitIntEventClear
, 1 << index
);
1531 reg_write(ohci
, OHCI1394_IsoXmitIntMaskSet
, 1 << index
);
1532 context_run(&ctx
->context
, match
);
1534 index
= ctx
- ohci
->ir_context_list
;
1535 control
= IR_CONTEXT_DUAL_BUFFER_MODE
| IR_CONTEXT_ISOCH_HEADER
;
1536 match
= (tags
<< 28) | (sync
<< 8) | ctx
->base
.channel
;
1538 match
|= (cycle
& 0x07fff) << 12;
1539 control
|= IR_CONTEXT_CYCLE_MATCH_ENABLE
;
1542 reg_write(ohci
, OHCI1394_IsoRecvIntEventClear
, 1 << index
);
1543 reg_write(ohci
, OHCI1394_IsoRecvIntMaskSet
, 1 << index
);
1544 reg_write(ohci
, CONTEXT_MATCH(ctx
->context
.regs
), match
);
1545 context_run(&ctx
->context
, control
);
1551 static int ohci_stop_iso(struct fw_iso_context
*base
)
1553 struct fw_ohci
*ohci
= fw_ohci(base
->card
);
1554 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
1557 if (ctx
->base
.type
== FW_ISO_CONTEXT_TRANSMIT
) {
1558 index
= ctx
- ohci
->it_context_list
;
1559 reg_write(ohci
, OHCI1394_IsoXmitIntMaskClear
, 1 << index
);
1561 index
= ctx
- ohci
->ir_context_list
;
1562 reg_write(ohci
, OHCI1394_IsoRecvIntMaskClear
, 1 << index
);
1565 context_stop(&ctx
->context
);
1570 static void ohci_free_iso_context(struct fw_iso_context
*base
)
1572 struct fw_ohci
*ohci
= fw_ohci(base
->card
);
1573 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
1574 unsigned long flags
;
1577 ohci_stop_iso(base
);
1578 context_release(&ctx
->context
);
1579 free_page((unsigned long)ctx
->header
);
1581 spin_lock_irqsave(&ohci
->lock
, flags
);
1583 if (ctx
->base
.type
== FW_ISO_CONTEXT_TRANSMIT
) {
1584 index
= ctx
- ohci
->it_context_list
;
1585 ohci
->it_context_mask
|= 1 << index
;
1587 index
= ctx
- ohci
->ir_context_list
;
1588 ohci
->ir_context_mask
|= 1 << index
;
1591 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1595 ohci_queue_iso_transmit(struct fw_iso_context
*base
,
1596 struct fw_iso_packet
*packet
,
1597 struct fw_iso_buffer
*buffer
,
1598 unsigned long payload
)
1600 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
1601 struct descriptor
*d
, *last
, *pd
;
1602 struct fw_iso_packet
*p
;
1604 dma_addr_t d_bus
, page_bus
;
1605 u32 z
, header_z
, payload_z
, irq
;
1606 u32 payload_index
, payload_end_index
, next_page_index
;
1607 int page
, end_page
, i
, length
, offset
;
1610 * FIXME: Cycle lost behavior should be configurable: lose
1611 * packet, retransmit or terminate..
1615 payload_index
= payload
;
1621 if (p
->header_length
> 0)
1624 /* Determine the first page the payload isn't contained in. */
1625 end_page
= PAGE_ALIGN(payload_index
+ p
->payload_length
) >> PAGE_SHIFT
;
1626 if (p
->payload_length
> 0)
1627 payload_z
= end_page
- (payload_index
>> PAGE_SHIFT
);
1633 /* Get header size in number of descriptors. */
1634 header_z
= DIV_ROUND_UP(p
->header_length
, sizeof(*d
));
1636 d
= context_get_descriptors(&ctx
->context
, z
+ header_z
, &d_bus
);
1641 d
[0].control
= cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE
);
1642 d
[0].req_count
= cpu_to_le16(8);
1644 header
= (__le32
*) &d
[1];
1645 header
[0] = cpu_to_le32(IT_HEADER_SY(p
->sy
) |
1646 IT_HEADER_TAG(p
->tag
) |
1647 IT_HEADER_TCODE(TCODE_STREAM_DATA
) |
1648 IT_HEADER_CHANNEL(ctx
->base
.channel
) |
1649 IT_HEADER_SPEED(ctx
->base
.speed
));
1651 cpu_to_le32(IT_HEADER_DATA_LENGTH(p
->header_length
+
1652 p
->payload_length
));
1655 if (p
->header_length
> 0) {
1656 d
[2].req_count
= cpu_to_le16(p
->header_length
);
1657 d
[2].data_address
= cpu_to_le32(d_bus
+ z
* sizeof(*d
));
1658 memcpy(&d
[z
], p
->header
, p
->header_length
);
1661 pd
= d
+ z
- payload_z
;
1662 payload_end_index
= payload_index
+ p
->payload_length
;
1663 for (i
= 0; i
< payload_z
; i
++) {
1664 page
= payload_index
>> PAGE_SHIFT
;
1665 offset
= payload_index
& ~PAGE_MASK
;
1666 next_page_index
= (page
+ 1) << PAGE_SHIFT
;
1668 min(next_page_index
, payload_end_index
) - payload_index
;
1669 pd
[i
].req_count
= cpu_to_le16(length
);
1671 page_bus
= page_private(buffer
->pages
[page
]);
1672 pd
[i
].data_address
= cpu_to_le32(page_bus
+ offset
);
1674 payload_index
+= length
;
1678 irq
= DESCRIPTOR_IRQ_ALWAYS
;
1680 irq
= DESCRIPTOR_NO_IRQ
;
1682 last
= z
== 2 ? d
: d
+ z
- 1;
1683 last
->control
|= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST
|
1685 DESCRIPTOR_BRANCH_ALWAYS
|
1688 context_append(&ctx
->context
, d
, z
, header_z
);
1694 ohci_queue_iso_receive_dualbuffer(struct fw_iso_context
*base
,
1695 struct fw_iso_packet
*packet
,
1696 struct fw_iso_buffer
*buffer
,
1697 unsigned long payload
)
1699 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
1700 struct db_descriptor
*db
= NULL
;
1701 struct descriptor
*d
;
1702 struct fw_iso_packet
*p
;
1703 dma_addr_t d_bus
, page_bus
;
1704 u32 z
, header_z
, length
, rest
;
1705 int page
, offset
, packet_count
, header_size
;
1708 * FIXME: Cycle lost behavior should be configurable: lose
1709 * packet, retransmit or terminate..
1713 d
= context_get_descriptors(&ctx
->context
, 2, &d_bus
);
1717 db
= (struct db_descriptor
*) d
;
1718 db
->control
= cpu_to_le16(DESCRIPTOR_STATUS
|
1719 DESCRIPTOR_BRANCH_ALWAYS
|
1721 db
->first_size
= cpu_to_le16(ctx
->base
.header_size
+ 4);
1722 context_append(&ctx
->context
, d
, 2, 0);
1729 * The OHCI controller puts the status word in the header
1730 * buffer too, so we need 4 extra bytes per packet.
1732 packet_count
= p
->header_length
/ ctx
->base
.header_size
;
1733 header_size
= packet_count
* (ctx
->base
.header_size
+ 4);
1735 /* Get header size in number of descriptors. */
1736 header_z
= DIV_ROUND_UP(header_size
, sizeof(*d
));
1737 page
= payload
>> PAGE_SHIFT
;
1738 offset
= payload
& ~PAGE_MASK
;
1739 rest
= p
->payload_length
;
1741 /* FIXME: OHCI 1.0 doesn't support dual buffer receive */
1742 /* FIXME: make packet-per-buffer/dual-buffer a context option */
1744 d
= context_get_descriptors(&ctx
->context
,
1745 z
+ header_z
, &d_bus
);
1749 db
= (struct db_descriptor
*) d
;
1750 db
->control
= cpu_to_le16(DESCRIPTOR_STATUS
|
1751 DESCRIPTOR_BRANCH_ALWAYS
);
1752 db
->first_size
= cpu_to_le16(ctx
->base
.header_size
+ 4);
1753 db
->first_req_count
= cpu_to_le16(header_size
);
1754 db
->first_res_count
= db
->first_req_count
;
1755 db
->first_buffer
= cpu_to_le32(d_bus
+ sizeof(*db
));
1757 if (offset
+ rest
< PAGE_SIZE
)
1760 length
= PAGE_SIZE
- offset
;
1762 db
->second_req_count
= cpu_to_le16(length
);
1763 db
->second_res_count
= db
->second_req_count
;
1764 page_bus
= page_private(buffer
->pages
[page
]);
1765 db
->second_buffer
= cpu_to_le32(page_bus
+ offset
);
1767 if (p
->interrupt
&& length
== rest
)
1768 db
->control
|= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS
);
1770 context_append(&ctx
->context
, d
, z
, header_z
);
1771 offset
= (offset
+ length
) & ~PAGE_MASK
;
1780 ohci_queue_iso(struct fw_iso_context
*base
,
1781 struct fw_iso_packet
*packet
,
1782 struct fw_iso_buffer
*buffer
,
1783 unsigned long payload
)
1785 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
1787 if (base
->type
== FW_ISO_CONTEXT_TRANSMIT
)
1788 return ohci_queue_iso_transmit(base
, packet
, buffer
, payload
);
1789 else if (ctx
->context
.ohci
->version
>= OHCI_VERSION_1_1
)
1790 return ohci_queue_iso_receive_dualbuffer(base
, packet
,
1793 /* FIXME: Implement fallback for OHCI 1.0 controllers. */
1797 static const struct fw_card_driver ohci_driver
= {
1798 .name
= ohci_driver_name
,
1799 .enable
= ohci_enable
,
1800 .update_phy_reg
= ohci_update_phy_reg
,
1801 .set_config_rom
= ohci_set_config_rom
,
1802 .send_request
= ohci_send_request
,
1803 .send_response
= ohci_send_response
,
1804 .cancel_packet
= ohci_cancel_packet
,
1805 .enable_phys_dma
= ohci_enable_phys_dma
,
1806 .get_bus_time
= ohci_get_bus_time
,
1808 .allocate_iso_context
= ohci_allocate_iso_context
,
1809 .free_iso_context
= ohci_free_iso_context
,
1810 .queue_iso
= ohci_queue_iso
,
1811 .start_iso
= ohci_start_iso
,
1812 .stop_iso
= ohci_stop_iso
,
1815 static int __devinit
1816 pci_probe(struct pci_dev
*dev
, const struct pci_device_id
*ent
)
1818 struct fw_ohci
*ohci
;
1819 u32 bus_options
, max_receive
, link_speed
;
1824 ohci
= kzalloc(sizeof(*ohci
), GFP_KERNEL
);
1826 fw_error("Could not malloc fw_ohci data.\n");
1830 fw_card_initialize(&ohci
->card
, &ohci_driver
, &dev
->dev
);
1832 err
= pci_enable_device(dev
);
1834 fw_error("Failed to enable OHCI hardware.\n");
1838 pci_set_master(dev
);
1839 pci_write_config_dword(dev
, OHCI1394_PCI_HCI_Control
, 0);
1840 pci_set_drvdata(dev
, ohci
);
1842 spin_lock_init(&ohci
->lock
);
1844 tasklet_init(&ohci
->bus_reset_tasklet
,
1845 bus_reset_tasklet
, (unsigned long)ohci
);
1847 err
= pci_request_region(dev
, 0, ohci_driver_name
);
1849 fw_error("MMIO resource unavailable\n");
1853 ohci
->registers
= pci_iomap(dev
, 0, OHCI1394_REGISTER_SIZE
);
1854 if (ohci
->registers
== NULL
) {
1855 fw_error("Failed to remap registers\n");
1860 ar_context_init(&ohci
->ar_request_ctx
, ohci
,
1861 OHCI1394_AsReqRcvContextControlSet
);
1863 ar_context_init(&ohci
->ar_response_ctx
, ohci
,
1864 OHCI1394_AsRspRcvContextControlSet
);
1866 context_init(&ohci
->at_request_ctx
, ohci
, AT_BUFFER_SIZE
,
1867 OHCI1394_AsReqTrContextControlSet
, handle_at_packet
);
1869 context_init(&ohci
->at_response_ctx
, ohci
, AT_BUFFER_SIZE
,
1870 OHCI1394_AsRspTrContextControlSet
, handle_at_packet
);
1872 reg_write(ohci
, OHCI1394_IsoRecvIntMaskSet
, ~0);
1873 ohci
->it_context_mask
= reg_read(ohci
, OHCI1394_IsoRecvIntMaskSet
);
1874 reg_write(ohci
, OHCI1394_IsoRecvIntMaskClear
, ~0);
1875 size
= sizeof(struct iso_context
) * hweight32(ohci
->it_context_mask
);
1876 ohci
->it_context_list
= kzalloc(size
, GFP_KERNEL
);
1878 reg_write(ohci
, OHCI1394_IsoXmitIntMaskSet
, ~0);
1879 ohci
->ir_context_mask
= reg_read(ohci
, OHCI1394_IsoXmitIntMaskSet
);
1880 reg_write(ohci
, OHCI1394_IsoXmitIntMaskClear
, ~0);
1881 size
= sizeof(struct iso_context
) * hweight32(ohci
->ir_context_mask
);
1882 ohci
->ir_context_list
= kzalloc(size
, GFP_KERNEL
);
1884 if (ohci
->it_context_list
== NULL
|| ohci
->ir_context_list
== NULL
) {
1885 fw_error("Out of memory for it/ir contexts.\n");
1887 goto fail_registers
;
1890 /* self-id dma buffer allocation */
1891 ohci
->self_id_cpu
= dma_alloc_coherent(ohci
->card
.device
,
1895 if (ohci
->self_id_cpu
== NULL
) {
1896 fw_error("Out of memory for self ID buffer.\n");
1898 goto fail_registers
;
1901 bus_options
= reg_read(ohci
, OHCI1394_BusOptions
);
1902 max_receive
= (bus_options
>> 12) & 0xf;
1903 link_speed
= bus_options
& 0x7;
1904 guid
= ((u64
) reg_read(ohci
, OHCI1394_GUIDHi
) << 32) |
1905 reg_read(ohci
, OHCI1394_GUIDLo
);
1907 err
= fw_card_add(&ohci
->card
, max_receive
, link_speed
, guid
);
1911 ohci
->version
= reg_read(ohci
, OHCI1394_Version
) & 0x00ff00ff;
1912 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
1913 dev
->dev
.bus_id
, ohci
->version
>> 16, ohci
->version
& 0xff);
1914 if (ohci
->version
< OHCI_VERSION_1_1
) {
1915 fw_notify(" Isochronous I/O is not yet implemented for "
1916 "OHCI 1.0 chips.\n");
1917 fw_notify(" Cameras, audio devices etc. won't work on "
1918 "this controller with this driver version.\n");
1923 dma_free_coherent(ohci
->card
.device
, SELF_ID_BUF_SIZE
,
1924 ohci
->self_id_cpu
, ohci
->self_id_bus
);
1926 kfree(ohci
->it_context_list
);
1927 kfree(ohci
->ir_context_list
);
1928 pci_iounmap(dev
, ohci
->registers
);
1930 pci_release_region(dev
, 0);
1932 pci_disable_device(dev
);
1934 fw_card_put(&ohci
->card
);
1939 static void pci_remove(struct pci_dev
*dev
)
1941 struct fw_ohci
*ohci
;
1943 ohci
= pci_get_drvdata(dev
);
1944 reg_write(ohci
, OHCI1394_IntMaskClear
, ~0);
1946 fw_core_remove_card(&ohci
->card
);
1949 * FIXME: Fail all pending packets here, now that the upper
1950 * layers can't queue any more.
1953 software_reset(ohci
);
1954 free_irq(dev
->irq
, ohci
);
1955 dma_free_coherent(ohci
->card
.device
, SELF_ID_BUF_SIZE
,
1956 ohci
->self_id_cpu
, ohci
->self_id_bus
);
1957 kfree(ohci
->it_context_list
);
1958 kfree(ohci
->ir_context_list
);
1959 pci_iounmap(dev
, ohci
->registers
);
1960 pci_release_region(dev
, 0);
1961 pci_disable_device(dev
);
1962 fw_card_put(&ohci
->card
);
1964 fw_notify("Removed fw-ohci device.\n");
1968 static int pci_suspend(struct pci_dev
*pdev
, pm_message_t state
)
1970 struct fw_ohci
*ohci
= pci_get_drvdata(pdev
);
1973 software_reset(ohci
);
1974 free_irq(pdev
->irq
, ohci
);
1975 err
= pci_save_state(pdev
);
1977 fw_error("pci_save_state failed\n");
1980 err
= pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
1982 fw_error("pci_set_power_state failed with %d\n", err
);
1987 static int pci_resume(struct pci_dev
*pdev
)
1989 struct fw_ohci
*ohci
= pci_get_drvdata(pdev
);
1992 pci_set_power_state(pdev
, PCI_D0
);
1993 pci_restore_state(pdev
);
1994 err
= pci_enable_device(pdev
);
1996 fw_error("pci_enable_device failed\n");
2000 return ohci_enable(&ohci
->card
, NULL
, 0);
2004 static struct pci_device_id pci_table
[] = {
2005 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI
, ~0) },
2009 MODULE_DEVICE_TABLE(pci
, pci_table
);
2011 static struct pci_driver fw_ohci_pci_driver
= {
2012 .name
= ohci_driver_name
,
2013 .id_table
= pci_table
,
2015 .remove
= pci_remove
,
2017 .resume
= pci_resume
,
2018 .suspend
= pci_suspend
,
2022 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
2023 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
2024 MODULE_LICENSE("GPL");
2026 /* Provide a module alias so root-on-sbp2 initrds don't break. */
2027 #ifndef CONFIG_IEEE1394_OHCI1394_MODULE
2028 MODULE_ALIAS("ohci1394");
2031 static int __init
fw_ohci_init(void)
2033 return pci_register_driver(&fw_ohci_pci_driver
);
2036 static void __exit
fw_ohci_cleanup(void)
2038 pci_unregister_driver(&fw_ohci_pci_driver
);
2041 module_init(fw_ohci_init
);
2042 module_exit(fw_ohci_cleanup
);