2 * Driver for OHCI 1394 controllers
4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/bug.h>
22 #include <linux/compiler.h>
23 #include <linux/delay.h>
24 #include <linux/device.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/firewire.h>
27 #include <linux/firewire-constants.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
31 #include <linux/kernel.h>
32 #include <linux/list.h>
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/mutex.h>
37 #include <linux/pci.h>
38 #include <linux/pci_ids.h>
39 #include <linux/slab.h>
40 #include <linux/spinlock.h>
41 #include <linux/string.h>
42 #include <linux/time.h>
44 #include <asm/byteorder.h>
46 #include <asm/system.h>
48 #ifdef CONFIG_PPC_PMAC
49 #include <asm/pmac_feature.h>
55 #define DESCRIPTOR_OUTPUT_MORE 0
56 #define DESCRIPTOR_OUTPUT_LAST (1 << 12)
57 #define DESCRIPTOR_INPUT_MORE (2 << 12)
58 #define DESCRIPTOR_INPUT_LAST (3 << 12)
59 #define DESCRIPTOR_STATUS (1 << 11)
60 #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
61 #define DESCRIPTOR_PING (1 << 7)
62 #define DESCRIPTOR_YY (1 << 6)
63 #define DESCRIPTOR_NO_IRQ (0 << 4)
64 #define DESCRIPTOR_IRQ_ERROR (1 << 4)
65 #define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
66 #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
67 #define DESCRIPTOR_WAIT (3 << 0)
73 __le32 branch_address
;
75 __le16 transfer_status
;
76 } __attribute__((aligned(16)));
78 #define CONTROL_SET(regs) (regs)
79 #define CONTROL_CLEAR(regs) ((regs) + 4)
80 #define COMMAND_PTR(regs) ((regs) + 12)
81 #define CONTEXT_MATCH(regs) ((regs) + 16)
84 struct descriptor descriptor
;
85 struct ar_buffer
*next
;
91 struct ar_buffer
*current_buffer
;
92 struct ar_buffer
*last_buffer
;
95 struct tasklet_struct tasklet
;
100 typedef int (*descriptor_callback_t
)(struct context
*ctx
,
101 struct descriptor
*d
,
102 struct descriptor
*last
);
105 * A buffer that contains a block of DMA-able coherent memory used for
106 * storing a portion of a DMA descriptor program.
108 struct descriptor_buffer
{
109 struct list_head list
;
110 dma_addr_t buffer_bus
;
113 struct descriptor buffer
[0];
117 struct fw_ohci
*ohci
;
119 int total_allocation
;
122 * List of page-sized buffers for storing DMA descriptors.
123 * Head of list contains buffers in use and tail of list contains
126 struct list_head buffer_list
;
129 * Pointer to a buffer inside buffer_list that contains the tail
130 * end of the current DMA program.
132 struct descriptor_buffer
*buffer_tail
;
135 * The descriptor containing the branch address of the first
136 * descriptor that has not yet been filled by the device.
138 struct descriptor
*last
;
141 * The last descriptor in the DMA program. It contains the branch
142 * address that must be updated upon appending a new descriptor.
144 struct descriptor
*prev
;
146 descriptor_callback_t callback
;
148 struct tasklet_struct tasklet
;
151 #define IT_HEADER_SY(v) ((v) << 0)
152 #define IT_HEADER_TCODE(v) ((v) << 4)
153 #define IT_HEADER_CHANNEL(v) ((v) << 8)
154 #define IT_HEADER_TAG(v) ((v) << 14)
155 #define IT_HEADER_SPEED(v) ((v) << 16)
156 #define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
159 struct fw_iso_context base
;
160 struct context context
;
163 size_t header_length
;
166 #define CONFIG_ROM_SIZE 1024
171 __iomem
char *registers
;
174 int request_generation
; /* for timestamping incoming requests */
176 unsigned int pri_req_max
;
179 bool csr_state_setclear_abdicate
;
182 * Spinlock for accessing fw_ohci data. Never call out of
183 * this driver with this lock held.
187 struct mutex phy_reg_mutex
;
189 struct ar_context ar_request_ctx
;
190 struct ar_context ar_response_ctx
;
191 struct context at_request_ctx
;
192 struct context at_response_ctx
;
194 u32 it_context_mask
; /* unoccupied IT contexts */
195 struct iso_context
*it_context_list
;
196 u64 ir_context_channels
; /* unoccupied channels */
197 u32 ir_context_mask
; /* unoccupied IR contexts */
198 struct iso_context
*ir_context_list
;
199 u64 mc_channels
; /* channels in use by the multichannel IR context */
203 dma_addr_t config_rom_bus
;
204 __be32
*next_config_rom
;
205 dma_addr_t next_config_rom_bus
;
209 dma_addr_t self_id_bus
;
210 struct tasklet_struct bus_reset_tasklet
;
212 u32 self_id_buffer
[512];
215 static inline struct fw_ohci
*fw_ohci(struct fw_card
*card
)
217 return container_of(card
, struct fw_ohci
, card
);
220 #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
221 #define IR_CONTEXT_BUFFER_FILL 0x80000000
222 #define IR_CONTEXT_ISOCH_HEADER 0x40000000
223 #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
224 #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
225 #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
227 #define CONTEXT_RUN 0x8000
228 #define CONTEXT_WAKE 0x1000
229 #define CONTEXT_DEAD 0x0800
230 #define CONTEXT_ACTIVE 0x0400
232 #define OHCI1394_MAX_AT_REQ_RETRIES 0xf
233 #define OHCI1394_MAX_AT_RESP_RETRIES 0x2
234 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
236 #define OHCI1394_REGISTER_SIZE 0x800
237 #define OHCI_LOOP_COUNT 500
238 #define OHCI1394_PCI_HCI_Control 0x40
239 #define SELF_ID_BUF_SIZE 0x800
240 #define OHCI_TCODE_PHY_PACKET 0x0e
241 #define OHCI_VERSION_1_1 0x010010
243 static char ohci_driver_name
[] = KBUILD_MODNAME
;
245 #define PCI_DEVICE_ID_AGERE_FW643 0x5901
246 #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
247 #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
249 #define QUIRK_CYCLE_TIMER 1
250 #define QUIRK_RESET_PACKET 2
251 #define QUIRK_BE_HEADERS 4
252 #define QUIRK_NO_1394A 8
253 #define QUIRK_NO_MSI 16
255 /* In case of multiple matches in ohci_quirks[], only the first one is used. */
256 static const struct {
257 unsigned short vendor
, device
, revision
, flags
;
259 {PCI_VENDOR_ID_AL
, PCI_ANY_ID
, PCI_ANY_ID
,
262 {PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_FW
, PCI_ANY_ID
,
265 {PCI_VENDOR_ID_ATT
, PCI_DEVICE_ID_AGERE_FW643
, 6,
268 {PCI_VENDOR_ID_JMICRON
, PCI_DEVICE_ID_JMICRON_JMB38X_FW
, PCI_ANY_ID
,
271 {PCI_VENDOR_ID_NEC
, PCI_ANY_ID
, PCI_ANY_ID
,
274 {PCI_VENDOR_ID_RICOH
, PCI_ANY_ID
, PCI_ANY_ID
,
277 {PCI_VENDOR_ID_TI
, PCI_DEVICE_ID_TI_TSB12LV22
, PCI_ANY_ID
,
278 QUIRK_CYCLE_TIMER
| QUIRK_RESET_PACKET
| QUIRK_NO_1394A
},
280 {PCI_VENDOR_ID_TI
, PCI_ANY_ID
, PCI_ANY_ID
,
283 {PCI_VENDOR_ID_VIA
, PCI_ANY_ID
, PCI_ANY_ID
,
284 QUIRK_CYCLE_TIMER
| QUIRK_NO_MSI
},
287 /* This overrides anything that was found in ohci_quirks[]. */
288 static int param_quirks
;
289 module_param_named(quirks
, param_quirks
, int, 0644);
290 MODULE_PARM_DESC(quirks
, "Chip quirks (default = 0"
291 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER
)
292 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET
)
293 ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS
)
294 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A
)
295 ", disable MSI = " __stringify(QUIRK_NO_MSI
)
298 #define OHCI_PARAM_DEBUG_AT_AR 1
299 #define OHCI_PARAM_DEBUG_SELFIDS 2
300 #define OHCI_PARAM_DEBUG_IRQS 4
301 #define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
303 #ifdef CONFIG_FIREWIRE_OHCI_DEBUG
305 static int param_debug
;
306 module_param_named(debug
, param_debug
, int, 0644);
307 MODULE_PARM_DESC(debug
, "Verbose logging (default = 0"
308 ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR
)
309 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS
)
310 ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS
)
311 ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS
)
312 ", or a combination, or all = -1)");
314 static void log_irqs(u32 evt
)
316 if (likely(!(param_debug
&
317 (OHCI_PARAM_DEBUG_IRQS
| OHCI_PARAM_DEBUG_BUSRESETS
))))
320 if (!(param_debug
& OHCI_PARAM_DEBUG_IRQS
) &&
321 !(evt
& OHCI1394_busReset
))
324 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt
,
325 evt
& OHCI1394_selfIDComplete
? " selfID" : "",
326 evt
& OHCI1394_RQPkt
? " AR_req" : "",
327 evt
& OHCI1394_RSPkt
? " AR_resp" : "",
328 evt
& OHCI1394_reqTxComplete
? " AT_req" : "",
329 evt
& OHCI1394_respTxComplete
? " AT_resp" : "",
330 evt
& OHCI1394_isochRx
? " IR" : "",
331 evt
& OHCI1394_isochTx
? " IT" : "",
332 evt
& OHCI1394_postedWriteErr
? " postedWriteErr" : "",
333 evt
& OHCI1394_cycleTooLong
? " cycleTooLong" : "",
334 evt
& OHCI1394_cycle64Seconds
? " cycle64Seconds" : "",
335 evt
& OHCI1394_cycleInconsistent
? " cycleInconsistent" : "",
336 evt
& OHCI1394_regAccessFail
? " regAccessFail" : "",
337 evt
& OHCI1394_busReset
? " busReset" : "",
338 evt
& ~(OHCI1394_selfIDComplete
| OHCI1394_RQPkt
|
339 OHCI1394_RSPkt
| OHCI1394_reqTxComplete
|
340 OHCI1394_respTxComplete
| OHCI1394_isochRx
|
341 OHCI1394_isochTx
| OHCI1394_postedWriteErr
|
342 OHCI1394_cycleTooLong
| OHCI1394_cycle64Seconds
|
343 OHCI1394_cycleInconsistent
|
344 OHCI1394_regAccessFail
| OHCI1394_busReset
)
348 static const char *speed
[] = {
349 [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
351 static const char *power
[] = {
352 [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
353 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
355 static const char port
[] = { '.', '-', 'p', 'c', };
357 static char _p(u32
*s
, int shift
)
359 return port
[*s
>> shift
& 3];
362 static void log_selfids(int node_id
, int generation
, int self_id_count
, u32
*s
)
364 if (likely(!(param_debug
& OHCI_PARAM_DEBUG_SELFIDS
)))
367 fw_notify("%d selfIDs, generation %d, local node ID %04x\n",
368 self_id_count
, generation
, node_id
);
370 for (; self_id_count
--; ++s
)
371 if ((*s
& 1 << 23) == 0)
372 fw_notify("selfID 0: %08x, phy %d [%c%c%c] "
373 "%s gc=%d %s %s%s%s\n",
374 *s
, *s
>> 24 & 63, _p(s
, 6), _p(s
, 4), _p(s
, 2),
375 speed
[*s
>> 14 & 3], *s
>> 16 & 63,
376 power
[*s
>> 8 & 7], *s
>> 22 & 1 ? "L" : "",
377 *s
>> 11 & 1 ? "c" : "", *s
& 2 ? "i" : "");
379 fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
381 _p(s
, 16), _p(s
, 14), _p(s
, 12), _p(s
, 10),
382 _p(s
, 8), _p(s
, 6), _p(s
, 4), _p(s
, 2));
385 static const char *evts
[] = {
386 [0x00] = "evt_no_status", [0x01] = "-reserved-",
387 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
388 [0x04] = "evt_underrun", [0x05] = "evt_overrun",
389 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
390 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
391 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
392 [0x0c] = "-reserved-", [0x0d] = "-reserved-",
393 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
394 [0x10] = "-reserved-", [0x11] = "ack_complete",
395 [0x12] = "ack_pending ", [0x13] = "-reserved-",
396 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
397 [0x16] = "ack_busy_B", [0x17] = "-reserved-",
398 [0x18] = "-reserved-", [0x19] = "-reserved-",
399 [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
400 [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
401 [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
402 [0x20] = "pending/cancelled",
404 static const char *tcodes
[] = {
405 [0x0] = "QW req", [0x1] = "BW req",
406 [0x2] = "W resp", [0x3] = "-reserved-",
407 [0x4] = "QR req", [0x5] = "BR req",
408 [0x6] = "QR resp", [0x7] = "BR resp",
409 [0x8] = "cycle start", [0x9] = "Lk req",
410 [0xa] = "async stream packet", [0xb] = "Lk resp",
411 [0xc] = "-reserved-", [0xd] = "-reserved-",
412 [0xe] = "link internal", [0xf] = "-reserved-",
414 static const char *phys
[] = {
415 [0x0] = "phy config packet", [0x1] = "link-on packet",
416 [0x2] = "self-id packet", [0x3] = "-reserved-",
419 static void log_ar_at_event(char dir
, int speed
, u32
*header
, int evt
)
421 int tcode
= header
[0] >> 4 & 0xf;
424 if (likely(!(param_debug
& OHCI_PARAM_DEBUG_AT_AR
)))
427 if (unlikely(evt
>= ARRAY_SIZE(evts
)))
430 if (evt
== OHCI1394_evt_bus_reset
) {
431 fw_notify("A%c evt_bus_reset, generation %d\n",
432 dir
, (header
[2] >> 16) & 0xff);
436 if (header
[0] == ~header
[1]) {
437 fw_notify("A%c %s, %s, %08x\n",
438 dir
, evts
[evt
], phys
[header
[0] >> 30 & 0x3], header
[0]);
443 case 0x0: case 0x6: case 0x8:
444 snprintf(specific
, sizeof(specific
), " = %08x",
445 be32_to_cpu((__force __be32
)header
[3]));
447 case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
448 snprintf(specific
, sizeof(specific
), " %x,%x",
449 header
[3] >> 16, header
[3] & 0xffff);
457 fw_notify("A%c %s, %s\n", dir
, evts
[evt
], tcodes
[tcode
]);
459 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
460 fw_notify("A%c spd %x tl %02x, "
463 dir
, speed
, header
[0] >> 10 & 0x3f,
464 header
[1] >> 16, header
[0] >> 16, evts
[evt
],
465 tcodes
[tcode
], header
[1] & 0xffff, header
[2], specific
);
468 fw_notify("A%c spd %x tl %02x, "
471 dir
, speed
, header
[0] >> 10 & 0x3f,
472 header
[1] >> 16, header
[0] >> 16, evts
[evt
],
473 tcodes
[tcode
], specific
);
479 #define param_debug 0
480 static inline void log_irqs(u32 evt
) {}
481 static inline void log_selfids(int node_id
, int generation
, int self_id_count
, u32
*s
) {}
482 static inline void log_ar_at_event(char dir
, int speed
, u32
*header
, int evt
) {}
484 #endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
486 static inline void reg_write(const struct fw_ohci
*ohci
, int offset
, u32 data
)
488 writel(data
, ohci
->registers
+ offset
);
491 static inline u32
reg_read(const struct fw_ohci
*ohci
, int offset
)
493 return readl(ohci
->registers
+ offset
);
496 static inline void flush_writes(const struct fw_ohci
*ohci
)
498 /* Do a dummy read to flush writes. */
499 reg_read(ohci
, OHCI1394_Version
);
502 static int read_phy_reg(struct fw_ohci
*ohci
, int addr
)
507 reg_write(ohci
, OHCI1394_PhyControl
, OHCI1394_PhyControl_Read(addr
));
508 for (i
= 0; i
< 3 + 100; i
++) {
509 val
= reg_read(ohci
, OHCI1394_PhyControl
);
510 if (val
& OHCI1394_PhyControl_ReadDone
)
511 return OHCI1394_PhyControl_ReadData(val
);
514 * Try a few times without waiting. Sleeping is necessary
515 * only when the link/PHY interface is busy.
520 fw_error("failed to read phy reg\n");
525 static int write_phy_reg(const struct fw_ohci
*ohci
, int addr
, u32 val
)
529 reg_write(ohci
, OHCI1394_PhyControl
,
530 OHCI1394_PhyControl_Write(addr
, val
));
531 for (i
= 0; i
< 3 + 100; i
++) {
532 val
= reg_read(ohci
, OHCI1394_PhyControl
);
533 if (!(val
& OHCI1394_PhyControl_WritePending
))
539 fw_error("failed to write phy reg\n");
544 static int update_phy_reg(struct fw_ohci
*ohci
, int addr
,
545 int clear_bits
, int set_bits
)
547 int ret
= read_phy_reg(ohci
, addr
);
552 * The interrupt status bits are cleared by writing a one bit.
553 * Avoid clearing them unless explicitly requested in set_bits.
556 clear_bits
|= PHY_INT_STATUS_BITS
;
558 return write_phy_reg(ohci
, addr
, (ret
& ~clear_bits
) | set_bits
);
561 static int read_paged_phy_reg(struct fw_ohci
*ohci
, int page
, int addr
)
565 ret
= update_phy_reg(ohci
, 7, PHY_PAGE_SELECT
, page
<< 5);
569 return read_phy_reg(ohci
, addr
);
572 static int ohci_read_phy_reg(struct fw_card
*card
, int addr
)
574 struct fw_ohci
*ohci
= fw_ohci(card
);
577 mutex_lock(&ohci
->phy_reg_mutex
);
578 ret
= read_phy_reg(ohci
, addr
);
579 mutex_unlock(&ohci
->phy_reg_mutex
);
584 static int ohci_update_phy_reg(struct fw_card
*card
, int addr
,
585 int clear_bits
, int set_bits
)
587 struct fw_ohci
*ohci
= fw_ohci(card
);
590 mutex_lock(&ohci
->phy_reg_mutex
);
591 ret
= update_phy_reg(ohci
, addr
, clear_bits
, set_bits
);
592 mutex_unlock(&ohci
->phy_reg_mutex
);
597 static void ar_context_link_page(struct ar_context
*ctx
,
598 struct ar_buffer
*ab
, dma_addr_t ab_bus
)
603 memset(&ab
->descriptor
, 0, sizeof(ab
->descriptor
));
604 ab
->descriptor
.control
= cpu_to_le16(DESCRIPTOR_INPUT_MORE
|
606 DESCRIPTOR_BRANCH_ALWAYS
);
607 offset
= offsetof(struct ar_buffer
, data
);
608 ab
->descriptor
.req_count
= cpu_to_le16(PAGE_SIZE
- offset
);
609 ab
->descriptor
.data_address
= cpu_to_le32(ab_bus
+ offset
);
610 ab
->descriptor
.res_count
= cpu_to_le16(PAGE_SIZE
- offset
);
611 ab
->descriptor
.branch_address
= 0;
613 wmb(); /* finish init of new descriptors before branch_address update */
614 ctx
->last_buffer
->descriptor
.branch_address
= cpu_to_le32(ab_bus
| 1);
615 ctx
->last_buffer
->next
= ab
;
616 ctx
->last_buffer
= ab
;
618 reg_write(ctx
->ohci
, CONTROL_SET(ctx
->regs
), CONTEXT_WAKE
);
619 flush_writes(ctx
->ohci
);
622 static int ar_context_add_page(struct ar_context
*ctx
)
624 struct device
*dev
= ctx
->ohci
->card
.device
;
625 struct ar_buffer
*ab
;
626 dma_addr_t
uninitialized_var(ab_bus
);
628 ab
= dma_alloc_coherent(dev
, PAGE_SIZE
, &ab_bus
, GFP_ATOMIC
);
632 ar_context_link_page(ctx
, ab
, ab_bus
);
637 static void ar_context_release(struct ar_context
*ctx
)
639 struct ar_buffer
*ab
, *ab_next
;
643 for (ab
= ctx
->current_buffer
; ab
; ab
= ab_next
) {
645 offset
= offsetof(struct ar_buffer
, data
);
646 ab_bus
= le32_to_cpu(ab
->descriptor
.data_address
) - offset
;
647 dma_free_coherent(ctx
->ohci
->card
.device
, PAGE_SIZE
,
652 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
653 #define cond_le32_to_cpu(v) \
654 (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
656 #define cond_le32_to_cpu(v) le32_to_cpu(v)
659 static __le32
*handle_ar_packet(struct ar_context
*ctx
, __le32
*buffer
)
661 struct fw_ohci
*ohci
= ctx
->ohci
;
663 u32 status
, length
, tcode
;
666 p
.header
[0] = cond_le32_to_cpu(buffer
[0]);
667 p
.header
[1] = cond_le32_to_cpu(buffer
[1]);
668 p
.header
[2] = cond_le32_to_cpu(buffer
[2]);
670 tcode
= (p
.header
[0] >> 4) & 0x0f;
672 case TCODE_WRITE_QUADLET_REQUEST
:
673 case TCODE_READ_QUADLET_RESPONSE
:
674 p
.header
[3] = (__force __u32
) buffer
[3];
675 p
.header_length
= 16;
676 p
.payload_length
= 0;
679 case TCODE_READ_BLOCK_REQUEST
:
680 p
.header
[3] = cond_le32_to_cpu(buffer
[3]);
681 p
.header_length
= 16;
682 p
.payload_length
= 0;
685 case TCODE_WRITE_BLOCK_REQUEST
:
686 case TCODE_READ_BLOCK_RESPONSE
:
687 case TCODE_LOCK_REQUEST
:
688 case TCODE_LOCK_RESPONSE
:
689 p
.header
[3] = cond_le32_to_cpu(buffer
[3]);
690 p
.header_length
= 16;
691 p
.payload_length
= p
.header
[3] >> 16;
694 case TCODE_WRITE_RESPONSE
:
695 case TCODE_READ_QUADLET_REQUEST
:
696 case OHCI_TCODE_PHY_PACKET
:
697 p
.header_length
= 12;
698 p
.payload_length
= 0;
702 /* FIXME: Stop context, discard everything, and restart? */
704 p
.payload_length
= 0;
707 p
.payload
= (void *) buffer
+ p
.header_length
;
709 /* FIXME: What to do about evt_* errors? */
710 length
= (p
.header_length
+ p
.payload_length
+ 3) / 4;
711 status
= cond_le32_to_cpu(buffer
[length
]);
712 evt
= (status
>> 16) & 0x1f;
715 p
.speed
= (status
>> 21) & 0x7;
716 p
.timestamp
= status
& 0xffff;
717 p
.generation
= ohci
->request_generation
;
719 log_ar_at_event('R', p
.speed
, p
.header
, evt
);
722 * Several controllers, notably from NEC and VIA, forget to
723 * write ack_complete status at PHY packet reception.
725 if (evt
== OHCI1394_evt_no_status
&&
726 (p
.header
[0] & 0xff) == (OHCI1394_phy_tcode
<< 4))
727 p
.ack
= ACK_COMPLETE
;
730 * The OHCI bus reset handler synthesizes a PHY packet with
731 * the new generation number when a bus reset happens (see
732 * section 8.4.2.3). This helps us determine when a request
733 * was received and make sure we send the response in the same
734 * generation. We only need this for requests; for responses
735 * we use the unique tlabel for finding the matching
738 * Alas some chips sometimes emit bus reset packets with a
739 * wrong generation. We set the correct generation for these
740 * at a slightly incorrect time (in bus_reset_tasklet).
742 if (evt
== OHCI1394_evt_bus_reset
) {
743 if (!(ohci
->quirks
& QUIRK_RESET_PACKET
))
744 ohci
->request_generation
= (p
.header
[2] >> 16) & 0xff;
745 } else if (ctx
== &ohci
->ar_request_ctx
) {
746 fw_core_handle_request(&ohci
->card
, &p
);
748 fw_core_handle_response(&ohci
->card
, &p
);
751 return buffer
+ length
+ 1;
754 static void ar_context_tasklet(unsigned long data
)
756 struct ar_context
*ctx
= (struct ar_context
*)data
;
757 struct ar_buffer
*ab
;
758 struct descriptor
*d
;
762 ab
= ctx
->current_buffer
;
765 res_count
= ACCESS_ONCE(d
->res_count
);
766 if (res_count
== 0) {
767 size_t size
, size2
, rest
, pktsize
, size3
, offset
;
768 dma_addr_t start_bus
;
772 * This descriptor is finished and we may have a
773 * packet split across this and the next buffer. We
774 * reuse the page for reassembling the split packet.
777 offset
= offsetof(struct ar_buffer
, data
);
779 start_bus
= le32_to_cpu(ab
->descriptor
.data_address
) - offset
;
784 size
= start
+ PAGE_SIZE
- ctx
->pointer
;
785 /* valid buffer data in the next page */
786 rest
= le16_to_cpu(d
->req_count
) - le16_to_cpu(d
->res_count
);
787 /* what actually fits in this page */
788 size2
= min(rest
, (size_t)PAGE_SIZE
- offset
- size
);
789 memmove(buffer
, ctx
->pointer
, size
);
790 memcpy(buffer
+ size
, ab
->data
, size2
);
793 void *next
= handle_ar_packet(ctx
, buffer
);
794 pktsize
= next
- buffer
;
795 if (pktsize
>= size
) {
797 * We have handled all the data that was
798 * originally in this page, so we can now
799 * continue in the next page.
804 /* move the next packet to the start of the buffer */
805 memmove(buffer
, next
, size
+ size2
- pktsize
);
807 /* fill up this page again */
808 size3
= min(rest
- size2
,
809 (size_t)PAGE_SIZE
- offset
- size
- size2
);
810 memcpy(buffer
+ size
+ size2
,
811 (void *) ab
->data
+ size2
, size3
);
816 /* handle the packets that are fully in the next page */
817 buffer
= (void *) ab
->data
+
818 (buffer
- (start
+ offset
+ size
));
819 end
= (void *) ab
->data
+ rest
;
822 buffer
= handle_ar_packet(ctx
, buffer
);
824 ctx
->current_buffer
= ab
;
827 ar_context_link_page(ctx
, start
, start_bus
);
829 ctx
->pointer
= start
+ PAGE_SIZE
;
832 buffer
= ctx
->pointer
;
834 (void *) ab
+ PAGE_SIZE
- le16_to_cpu(res_count
);
837 buffer
= handle_ar_packet(ctx
, buffer
);
841 static int ar_context_init(struct ar_context
*ctx
,
842 struct fw_ohci
*ohci
, u32 regs
)
848 ctx
->last_buffer
= &ab
;
849 tasklet_init(&ctx
->tasklet
, ar_context_tasklet
, (unsigned long)ctx
);
851 ar_context_add_page(ctx
);
852 ar_context_add_page(ctx
);
853 ctx
->current_buffer
= ab
.next
;
854 ctx
->pointer
= ctx
->current_buffer
->data
;
859 static void ar_context_run(struct ar_context
*ctx
)
861 struct ar_buffer
*ab
= ctx
->current_buffer
;
865 offset
= offsetof(struct ar_buffer
, data
);
866 ab_bus
= le32_to_cpu(ab
->descriptor
.data_address
) - offset
;
868 reg_write(ctx
->ohci
, COMMAND_PTR(ctx
->regs
), ab_bus
| 1);
869 reg_write(ctx
->ohci
, CONTROL_SET(ctx
->regs
), CONTEXT_RUN
);
870 flush_writes(ctx
->ohci
);
873 static struct descriptor
*find_branch_descriptor(struct descriptor
*d
, int z
)
877 b
= (le16_to_cpu(d
->control
) & DESCRIPTOR_BRANCH_ALWAYS
) >> 2;
878 key
= (le16_to_cpu(d
->control
) & DESCRIPTOR_KEY_IMMEDIATE
) >> 8;
880 /* figure out which descriptor the branch address goes in */
881 if (z
== 2 && (b
== 3 || key
== 2))
887 static void context_tasklet(unsigned long data
)
889 struct context
*ctx
= (struct context
*) data
;
890 struct descriptor
*d
, *last
;
893 struct descriptor_buffer
*desc
;
895 desc
= list_entry(ctx
->buffer_list
.next
,
896 struct descriptor_buffer
, list
);
898 while (last
->branch_address
!= 0) {
899 struct descriptor_buffer
*old_desc
= desc
;
900 address
= le32_to_cpu(last
->branch_address
);
904 /* If the branch address points to a buffer outside of the
905 * current buffer, advance to the next buffer. */
906 if (address
< desc
->buffer_bus
||
907 address
>= desc
->buffer_bus
+ desc
->used
)
908 desc
= list_entry(desc
->list
.next
,
909 struct descriptor_buffer
, list
);
910 d
= desc
->buffer
+ (address
- desc
->buffer_bus
) / sizeof(*d
);
911 last
= find_branch_descriptor(d
, z
);
913 if (!ctx
->callback(ctx
, d
, last
))
916 if (old_desc
!= desc
) {
917 /* If we've advanced to the next buffer, move the
918 * previous buffer to the free list. */
921 spin_lock_irqsave(&ctx
->ohci
->lock
, flags
);
922 list_move_tail(&old_desc
->list
, &ctx
->buffer_list
);
923 spin_unlock_irqrestore(&ctx
->ohci
->lock
, flags
);
930 * Allocate a new buffer and add it to the list of free buffers for this
931 * context. Must be called with ohci->lock held.
933 static int context_add_buffer(struct context
*ctx
)
935 struct descriptor_buffer
*desc
;
936 dma_addr_t
uninitialized_var(bus_addr
);
940 * 16MB of descriptors should be far more than enough for any DMA
941 * program. This will catch run-away userspace or DoS attacks.
943 if (ctx
->total_allocation
>= 16*1024*1024)
946 desc
= dma_alloc_coherent(ctx
->ohci
->card
.device
, PAGE_SIZE
,
947 &bus_addr
, GFP_ATOMIC
);
951 offset
= (void *)&desc
->buffer
- (void *)desc
;
952 desc
->buffer_size
= PAGE_SIZE
- offset
;
953 desc
->buffer_bus
= bus_addr
+ offset
;
956 list_add_tail(&desc
->list
, &ctx
->buffer_list
);
957 ctx
->total_allocation
+= PAGE_SIZE
;
962 static int context_init(struct context
*ctx
, struct fw_ohci
*ohci
,
963 u32 regs
, descriptor_callback_t callback
)
967 ctx
->total_allocation
= 0;
969 INIT_LIST_HEAD(&ctx
->buffer_list
);
970 if (context_add_buffer(ctx
) < 0)
973 ctx
->buffer_tail
= list_entry(ctx
->buffer_list
.next
,
974 struct descriptor_buffer
, list
);
976 tasklet_init(&ctx
->tasklet
, context_tasklet
, (unsigned long)ctx
);
977 ctx
->callback
= callback
;
980 * We put a dummy descriptor in the buffer that has a NULL
981 * branch address and looks like it's been sent. That way we
982 * have a descriptor to append DMA programs to.
984 memset(ctx
->buffer_tail
->buffer
, 0, sizeof(*ctx
->buffer_tail
->buffer
));
985 ctx
->buffer_tail
->buffer
->control
= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST
);
986 ctx
->buffer_tail
->buffer
->transfer_status
= cpu_to_le16(0x8011);
987 ctx
->buffer_tail
->used
+= sizeof(*ctx
->buffer_tail
->buffer
);
988 ctx
->last
= ctx
->buffer_tail
->buffer
;
989 ctx
->prev
= ctx
->buffer_tail
->buffer
;
994 static void context_release(struct context
*ctx
)
996 struct fw_card
*card
= &ctx
->ohci
->card
;
997 struct descriptor_buffer
*desc
, *tmp
;
999 list_for_each_entry_safe(desc
, tmp
, &ctx
->buffer_list
, list
)
1000 dma_free_coherent(card
->device
, PAGE_SIZE
, desc
,
1002 ((void *)&desc
->buffer
- (void *)desc
));
1005 /* Must be called with ohci->lock held */
1006 static struct descriptor
*context_get_descriptors(struct context
*ctx
,
1007 int z
, dma_addr_t
*d_bus
)
1009 struct descriptor
*d
= NULL
;
1010 struct descriptor_buffer
*desc
= ctx
->buffer_tail
;
1012 if (z
* sizeof(*d
) > desc
->buffer_size
)
1015 if (z
* sizeof(*d
) > desc
->buffer_size
- desc
->used
) {
1016 /* No room for the descriptor in this buffer, so advance to the
1019 if (desc
->list
.next
== &ctx
->buffer_list
) {
1020 /* If there is no free buffer next in the list,
1022 if (context_add_buffer(ctx
) < 0)
1025 desc
= list_entry(desc
->list
.next
,
1026 struct descriptor_buffer
, list
);
1027 ctx
->buffer_tail
= desc
;
1030 d
= desc
->buffer
+ desc
->used
/ sizeof(*d
);
1031 memset(d
, 0, z
* sizeof(*d
));
1032 *d_bus
= desc
->buffer_bus
+ desc
->used
;
1037 static void context_run(struct context
*ctx
, u32 extra
)
1039 struct fw_ohci
*ohci
= ctx
->ohci
;
1041 reg_write(ohci
, COMMAND_PTR(ctx
->regs
),
1042 le32_to_cpu(ctx
->last
->branch_address
));
1043 reg_write(ohci
, CONTROL_CLEAR(ctx
->regs
), ~0);
1044 reg_write(ohci
, CONTROL_SET(ctx
->regs
), CONTEXT_RUN
| extra
);
1048 static void context_append(struct context
*ctx
,
1049 struct descriptor
*d
, int z
, int extra
)
1052 struct descriptor_buffer
*desc
= ctx
->buffer_tail
;
1054 d_bus
= desc
->buffer_bus
+ (d
- desc
->buffer
) * sizeof(*d
);
1056 desc
->used
+= (z
+ extra
) * sizeof(*d
);
1058 wmb(); /* finish init of new descriptors before branch_address update */
1059 ctx
->prev
->branch_address
= cpu_to_le32(d_bus
| z
);
1060 ctx
->prev
= find_branch_descriptor(d
, z
);
1062 reg_write(ctx
->ohci
, CONTROL_SET(ctx
->regs
), CONTEXT_WAKE
);
1063 flush_writes(ctx
->ohci
);
1066 static void context_stop(struct context
*ctx
)
1071 reg_write(ctx
->ohci
, CONTROL_CLEAR(ctx
->regs
), CONTEXT_RUN
);
1072 flush_writes(ctx
->ohci
);
1074 for (i
= 0; i
< 10; i
++) {
1075 reg
= reg_read(ctx
->ohci
, CONTROL_SET(ctx
->regs
));
1076 if ((reg
& CONTEXT_ACTIVE
) == 0)
1081 fw_error("Error: DMA context still active (0x%08x)\n", reg
);
1084 struct driver_data
{
1085 struct fw_packet
*packet
;
1089 * This function apppends a packet to the DMA queue for transmission.
1090 * Must always be called with the ochi->lock held to ensure proper
1091 * generation handling and locking around packet queue manipulation.
1093 static int at_context_queue_packet(struct context
*ctx
,
1094 struct fw_packet
*packet
)
1096 struct fw_ohci
*ohci
= ctx
->ohci
;
1097 dma_addr_t d_bus
, uninitialized_var(payload_bus
);
1098 struct driver_data
*driver_data
;
1099 struct descriptor
*d
, *last
;
1104 d
= context_get_descriptors(ctx
, 4, &d_bus
);
1106 packet
->ack
= RCODE_SEND_ERROR
;
1110 d
[0].control
= cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE
);
1111 d
[0].res_count
= cpu_to_le16(packet
->timestamp
);
1114 * The DMA format for asyncronous link packets is different
1115 * from the IEEE1394 layout, so shift the fields around
1116 * accordingly. If header_length is 8, it's a PHY packet, to
1117 * which we need to prepend an extra quadlet.
1120 header
= (__le32
*) &d
[1];
1121 switch (packet
->header_length
) {
1124 header
[0] = cpu_to_le32((packet
->header
[0] & 0xffff) |
1125 (packet
->speed
<< 16));
1126 header
[1] = cpu_to_le32((packet
->header
[1] & 0xffff) |
1127 (packet
->header
[0] & 0xffff0000));
1128 header
[2] = cpu_to_le32(packet
->header
[2]);
1130 tcode
= (packet
->header
[0] >> 4) & 0x0f;
1131 if (TCODE_IS_BLOCK_PACKET(tcode
))
1132 header
[3] = cpu_to_le32(packet
->header
[3]);
1134 header
[3] = (__force __le32
) packet
->header
[3];
1136 d
[0].req_count
= cpu_to_le16(packet
->header_length
);
1140 header
[0] = cpu_to_le32((OHCI1394_phy_tcode
<< 4) |
1141 (packet
->speed
<< 16));
1142 header
[1] = cpu_to_le32(packet
->header
[0]);
1143 header
[2] = cpu_to_le32(packet
->header
[1]);
1144 d
[0].req_count
= cpu_to_le16(12);
1146 if (is_ping_packet(packet
->header
))
1147 d
[0].control
|= cpu_to_le16(DESCRIPTOR_PING
);
1151 header
[0] = cpu_to_le32((packet
->header
[0] & 0xffff) |
1152 (packet
->speed
<< 16));
1153 header
[1] = cpu_to_le32(packet
->header
[0] & 0xffff0000);
1154 d
[0].req_count
= cpu_to_le16(8);
1159 packet
->ack
= RCODE_SEND_ERROR
;
1163 driver_data
= (struct driver_data
*) &d
[3];
1164 driver_data
->packet
= packet
;
1165 packet
->driver_data
= driver_data
;
1167 if (packet
->payload_length
> 0) {
1169 dma_map_single(ohci
->card
.device
, packet
->payload
,
1170 packet
->payload_length
, DMA_TO_DEVICE
);
1171 if (dma_mapping_error(ohci
->card
.device
, payload_bus
)) {
1172 packet
->ack
= RCODE_SEND_ERROR
;
1175 packet
->payload_bus
= payload_bus
;
1176 packet
->payload_mapped
= true;
1178 d
[2].req_count
= cpu_to_le16(packet
->payload_length
);
1179 d
[2].data_address
= cpu_to_le32(payload_bus
);
1187 last
->control
|= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST
|
1188 DESCRIPTOR_IRQ_ALWAYS
|
1189 DESCRIPTOR_BRANCH_ALWAYS
);
1192 * If the controller and packet generations don't match, we need to
1193 * bail out and try again. If IntEvent.busReset is set, the AT context
1194 * is halted, so appending to the context and trying to run it is
1195 * futile. Most controllers do the right thing and just flush the AT
1196 * queue (per section 7.2.3.2 of the OHCI 1.1 specification), but
1197 * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind
1198 * up stalling out. So we just bail out in software and try again
1199 * later, and everyone is happy.
1200 * FIXME: Document how the locking works.
1202 if (ohci
->generation
!= packet
->generation
||
1203 reg_read(ohci
, OHCI1394_IntEventSet
) & OHCI1394_busReset
) {
1204 if (packet
->payload_mapped
)
1205 dma_unmap_single(ohci
->card
.device
, payload_bus
,
1206 packet
->payload_length
, DMA_TO_DEVICE
);
1207 packet
->ack
= RCODE_GENERATION
;
1211 context_append(ctx
, d
, z
, 4 - z
);
1213 /* If the context isn't already running, start it up. */
1214 reg
= reg_read(ctx
->ohci
, CONTROL_SET(ctx
->regs
));
1215 if ((reg
& CONTEXT_RUN
) == 0)
1216 context_run(ctx
, 0);
1221 static int handle_at_packet(struct context
*context
,
1222 struct descriptor
*d
,
1223 struct descriptor
*last
)
1225 struct driver_data
*driver_data
;
1226 struct fw_packet
*packet
;
1227 struct fw_ohci
*ohci
= context
->ohci
;
1230 if (last
->transfer_status
== 0)
1231 /* This descriptor isn't done yet, stop iteration. */
1234 driver_data
= (struct driver_data
*) &d
[3];
1235 packet
= driver_data
->packet
;
1237 /* This packet was cancelled, just continue. */
1240 if (packet
->payload_mapped
)
1241 dma_unmap_single(ohci
->card
.device
, packet
->payload_bus
,
1242 packet
->payload_length
, DMA_TO_DEVICE
);
1244 evt
= le16_to_cpu(last
->transfer_status
) & 0x1f;
1245 packet
->timestamp
= le16_to_cpu(last
->res_count
);
1247 log_ar_at_event('T', packet
->speed
, packet
->header
, evt
);
1250 case OHCI1394_evt_timeout
:
1251 /* Async response transmit timed out. */
1252 packet
->ack
= RCODE_CANCELLED
;
1255 case OHCI1394_evt_flushed
:
1257 * The packet was flushed should give same error as
1258 * when we try to use a stale generation count.
1260 packet
->ack
= RCODE_GENERATION
;
1263 case OHCI1394_evt_missing_ack
:
1265 * Using a valid (current) generation count, but the
1266 * node is not on the bus or not sending acks.
1268 packet
->ack
= RCODE_NO_ACK
;
1271 case ACK_COMPLETE
+ 0x10:
1272 case ACK_PENDING
+ 0x10:
1273 case ACK_BUSY_X
+ 0x10:
1274 case ACK_BUSY_A
+ 0x10:
1275 case ACK_BUSY_B
+ 0x10:
1276 case ACK_DATA_ERROR
+ 0x10:
1277 case ACK_TYPE_ERROR
+ 0x10:
1278 packet
->ack
= evt
- 0x10;
1282 packet
->ack
= RCODE_SEND_ERROR
;
1286 packet
->callback(packet
, &ohci
->card
, packet
->ack
);
1291 #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
1292 #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
1293 #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
1294 #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
1295 #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
1297 static void handle_local_rom(struct fw_ohci
*ohci
,
1298 struct fw_packet
*packet
, u32 csr
)
1300 struct fw_packet response
;
1301 int tcode
, length
, i
;
1303 tcode
= HEADER_GET_TCODE(packet
->header
[0]);
1304 if (TCODE_IS_BLOCK_PACKET(tcode
))
1305 length
= HEADER_GET_DATA_LENGTH(packet
->header
[3]);
1309 i
= csr
- CSR_CONFIG_ROM
;
1310 if (i
+ length
> CONFIG_ROM_SIZE
) {
1311 fw_fill_response(&response
, packet
->header
,
1312 RCODE_ADDRESS_ERROR
, NULL
, 0);
1313 } else if (!TCODE_IS_READ_REQUEST(tcode
)) {
1314 fw_fill_response(&response
, packet
->header
,
1315 RCODE_TYPE_ERROR
, NULL
, 0);
1317 fw_fill_response(&response
, packet
->header
, RCODE_COMPLETE
,
1318 (void *) ohci
->config_rom
+ i
, length
);
1321 fw_core_handle_response(&ohci
->card
, &response
);
1324 static void handle_local_lock(struct fw_ohci
*ohci
,
1325 struct fw_packet
*packet
, u32 csr
)
1327 struct fw_packet response
;
1328 int tcode
, length
, ext_tcode
, sel
, try;
1329 __be32
*payload
, lock_old
;
1330 u32 lock_arg
, lock_data
;
1332 tcode
= HEADER_GET_TCODE(packet
->header
[0]);
1333 length
= HEADER_GET_DATA_LENGTH(packet
->header
[3]);
1334 payload
= packet
->payload
;
1335 ext_tcode
= HEADER_GET_EXTENDED_TCODE(packet
->header
[3]);
1337 if (tcode
== TCODE_LOCK_REQUEST
&&
1338 ext_tcode
== EXTCODE_COMPARE_SWAP
&& length
== 8) {
1339 lock_arg
= be32_to_cpu(payload
[0]);
1340 lock_data
= be32_to_cpu(payload
[1]);
1341 } else if (tcode
== TCODE_READ_QUADLET_REQUEST
) {
1345 fw_fill_response(&response
, packet
->header
,
1346 RCODE_TYPE_ERROR
, NULL
, 0);
1350 sel
= (csr
- CSR_BUS_MANAGER_ID
) / 4;
1351 reg_write(ohci
, OHCI1394_CSRData
, lock_data
);
1352 reg_write(ohci
, OHCI1394_CSRCompareData
, lock_arg
);
1353 reg_write(ohci
, OHCI1394_CSRControl
, sel
);
1355 for (try = 0; try < 20; try++)
1356 if (reg_read(ohci
, OHCI1394_CSRControl
) & 0x80000000) {
1357 lock_old
= cpu_to_be32(reg_read(ohci
,
1359 fw_fill_response(&response
, packet
->header
,
1361 &lock_old
, sizeof(lock_old
));
1365 fw_error("swap not done (CSR lock timeout)\n");
1366 fw_fill_response(&response
, packet
->header
, RCODE_BUSY
, NULL
, 0);
1369 fw_core_handle_response(&ohci
->card
, &response
);
1372 static void handle_local_request(struct context
*ctx
, struct fw_packet
*packet
)
1376 if (ctx
== &ctx
->ohci
->at_request_ctx
) {
1377 packet
->ack
= ACK_PENDING
;
1378 packet
->callback(packet
, &ctx
->ohci
->card
, packet
->ack
);
1382 ((unsigned long long)
1383 HEADER_GET_OFFSET_HIGH(packet
->header
[1]) << 32) |
1385 csr
= offset
- CSR_REGISTER_BASE
;
1387 /* Handle config rom reads. */
1388 if (csr
>= CSR_CONFIG_ROM
&& csr
< CSR_CONFIG_ROM_END
)
1389 handle_local_rom(ctx
->ohci
, packet
, csr
);
1391 case CSR_BUS_MANAGER_ID
:
1392 case CSR_BANDWIDTH_AVAILABLE
:
1393 case CSR_CHANNELS_AVAILABLE_HI
:
1394 case CSR_CHANNELS_AVAILABLE_LO
:
1395 handle_local_lock(ctx
->ohci
, packet
, csr
);
1398 if (ctx
== &ctx
->ohci
->at_request_ctx
)
1399 fw_core_handle_request(&ctx
->ohci
->card
, packet
);
1401 fw_core_handle_response(&ctx
->ohci
->card
, packet
);
1405 if (ctx
== &ctx
->ohci
->at_response_ctx
) {
1406 packet
->ack
= ACK_COMPLETE
;
1407 packet
->callback(packet
, &ctx
->ohci
->card
, packet
->ack
);
1411 static void at_context_transmit(struct context
*ctx
, struct fw_packet
*packet
)
1413 unsigned long flags
;
1416 spin_lock_irqsave(&ctx
->ohci
->lock
, flags
);
1418 if (HEADER_GET_DESTINATION(packet
->header
[0]) == ctx
->ohci
->node_id
&&
1419 ctx
->ohci
->generation
== packet
->generation
) {
1420 spin_unlock_irqrestore(&ctx
->ohci
->lock
, flags
);
1421 handle_local_request(ctx
, packet
);
1425 ret
= at_context_queue_packet(ctx
, packet
);
1426 spin_unlock_irqrestore(&ctx
->ohci
->lock
, flags
);
1429 packet
->callback(packet
, &ctx
->ohci
->card
, packet
->ack
);
1433 static u32
cycle_timer_ticks(u32 cycle_timer
)
1437 ticks
= cycle_timer
& 0xfff;
1438 ticks
+= 3072 * ((cycle_timer
>> 12) & 0x1fff);
1439 ticks
+= (3072 * 8000) * (cycle_timer
>> 25);
1445 * Some controllers exhibit one or more of the following bugs when updating the
1446 * iso cycle timer register:
1447 * - When the lowest six bits are wrapping around to zero, a read that happens
1448 * at the same time will return garbage in the lowest ten bits.
1449 * - When the cycleOffset field wraps around to zero, the cycleCount field is
1450 * not incremented for about 60 ns.
1451 * - Occasionally, the entire register reads zero.
1453 * To catch these, we read the register three times and ensure that the
1454 * difference between each two consecutive reads is approximately the same, i.e.
1455 * less than twice the other. Furthermore, any negative difference indicates an
1456 * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
1457 * execute, so we have enough precision to compute the ratio of the differences.)
1459 static u32
get_cycle_time(struct fw_ohci
*ohci
)
1466 c2
= reg_read(ohci
, OHCI1394_IsochronousCycleTimer
);
1468 if (ohci
->quirks
& QUIRK_CYCLE_TIMER
) {
1471 c2
= reg_read(ohci
, OHCI1394_IsochronousCycleTimer
);
1475 c2
= reg_read(ohci
, OHCI1394_IsochronousCycleTimer
);
1476 t0
= cycle_timer_ticks(c0
);
1477 t1
= cycle_timer_ticks(c1
);
1478 t2
= cycle_timer_ticks(c2
);
1481 } while ((diff01
<= 0 || diff12
<= 0 ||
1482 diff01
/ diff12
>= 2 || diff12
/ diff01
>= 2)
1490 * This function has to be called at least every 64 seconds. The bus_time
1491 * field stores not only the upper 25 bits of the BUS_TIME register but also
1492 * the most significant bit of the cycle timer in bit 6 so that we can detect
1493 * changes in this bit.
1495 static u32
update_bus_time(struct fw_ohci
*ohci
)
1497 u32 cycle_time_seconds
= get_cycle_time(ohci
) >> 25;
1499 if ((ohci
->bus_time
& 0x40) != (cycle_time_seconds
& 0x40))
1500 ohci
->bus_time
+= 0x40;
1502 return ohci
->bus_time
| cycle_time_seconds
;
1505 static void bus_reset_tasklet(unsigned long data
)
1507 struct fw_ohci
*ohci
= (struct fw_ohci
*)data
;
1508 int self_id_count
, i
, j
, reg
;
1509 int generation
, new_generation
;
1510 unsigned long flags
;
1511 void *free_rom
= NULL
;
1512 dma_addr_t free_rom_bus
= 0;
1515 reg
= reg_read(ohci
, OHCI1394_NodeID
);
1516 if (!(reg
& OHCI1394_NodeID_idValid
)) {
1517 fw_notify("node ID not valid, new bus reset in progress\n");
1520 if ((reg
& OHCI1394_NodeID_nodeNumber
) == 63) {
1521 fw_notify("malconfigured bus\n");
1524 ohci
->node_id
= reg
& (OHCI1394_NodeID_busNumber
|
1525 OHCI1394_NodeID_nodeNumber
);
1527 is_new_root
= (reg
& OHCI1394_NodeID_root
) != 0;
1528 if (!(ohci
->is_root
&& is_new_root
))
1529 reg_write(ohci
, OHCI1394_LinkControlSet
,
1530 OHCI1394_LinkControl_cycleMaster
);
1531 ohci
->is_root
= is_new_root
;
1533 reg
= reg_read(ohci
, OHCI1394_SelfIDCount
);
1534 if (reg
& OHCI1394_SelfIDCount_selfIDError
) {
1535 fw_notify("inconsistent self IDs\n");
1539 * The count in the SelfIDCount register is the number of
1540 * bytes in the self ID receive buffer. Since we also receive
1541 * the inverted quadlets and a header quadlet, we shift one
1542 * bit extra to get the actual number of self IDs.
1544 self_id_count
= (reg
>> 3) & 0xff;
1545 if (self_id_count
== 0 || self_id_count
> 252) {
1546 fw_notify("inconsistent self IDs\n");
1549 generation
= (cond_le32_to_cpu(ohci
->self_id_cpu
[0]) >> 16) & 0xff;
1552 for (i
= 1, j
= 0; j
< self_id_count
; i
+= 2, j
++) {
1553 if (ohci
->self_id_cpu
[i
] != ~ohci
->self_id_cpu
[i
+ 1]) {
1554 fw_notify("inconsistent self IDs\n");
1557 ohci
->self_id_buffer
[j
] =
1558 cond_le32_to_cpu(ohci
->self_id_cpu
[i
]);
1563 * Check the consistency of the self IDs we just read. The
1564 * problem we face is that a new bus reset can start while we
1565 * read out the self IDs from the DMA buffer. If this happens,
1566 * the DMA buffer will be overwritten with new self IDs and we
1567 * will read out inconsistent data. The OHCI specification
1568 * (section 11.2) recommends a technique similar to
1569 * linux/seqlock.h, where we remember the generation of the
1570 * self IDs in the buffer before reading them out and compare
1571 * it to the current generation after reading them out. If
1572 * the two generations match we know we have a consistent set
1576 new_generation
= (reg_read(ohci
, OHCI1394_SelfIDCount
) >> 16) & 0xff;
1577 if (new_generation
!= generation
) {
1578 fw_notify("recursive bus reset detected, "
1579 "discarding self ids\n");
1583 /* FIXME: Document how the locking works. */
1584 spin_lock_irqsave(&ohci
->lock
, flags
);
1586 ohci
->generation
= generation
;
1587 context_stop(&ohci
->at_request_ctx
);
1588 context_stop(&ohci
->at_response_ctx
);
1589 reg_write(ohci
, OHCI1394_IntEventClear
, OHCI1394_busReset
);
1591 if (ohci
->quirks
& QUIRK_RESET_PACKET
)
1592 ohci
->request_generation
= generation
;
1595 * This next bit is unrelated to the AT context stuff but we
1596 * have to do it under the spinlock also. If a new config rom
1597 * was set up before this reset, the old one is now no longer
1598 * in use and we can free it. Update the config rom pointers
1599 * to point to the current config rom and clear the
1600 * next_config_rom pointer so a new update can take place.
1603 if (ohci
->next_config_rom
!= NULL
) {
1604 if (ohci
->next_config_rom
!= ohci
->config_rom
) {
1605 free_rom
= ohci
->config_rom
;
1606 free_rom_bus
= ohci
->config_rom_bus
;
1608 ohci
->config_rom
= ohci
->next_config_rom
;
1609 ohci
->config_rom_bus
= ohci
->next_config_rom_bus
;
1610 ohci
->next_config_rom
= NULL
;
1613 * Restore config_rom image and manually update
1614 * config_rom registers. Writing the header quadlet
1615 * will indicate that the config rom is ready, so we
1618 reg_write(ohci
, OHCI1394_BusOptions
,
1619 be32_to_cpu(ohci
->config_rom
[2]));
1620 ohci
->config_rom
[0] = ohci
->next_header
;
1621 reg_write(ohci
, OHCI1394_ConfigROMhdr
,
1622 be32_to_cpu(ohci
->next_header
));
1625 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1626 reg_write(ohci
, OHCI1394_PhyReqFilterHiSet
, ~0);
1627 reg_write(ohci
, OHCI1394_PhyReqFilterLoSet
, ~0);
1630 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1633 dma_free_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
1634 free_rom
, free_rom_bus
);
1636 log_selfids(ohci
->node_id
, generation
,
1637 self_id_count
, ohci
->self_id_buffer
);
1639 fw_core_handle_bus_reset(&ohci
->card
, ohci
->node_id
, generation
,
1640 self_id_count
, ohci
->self_id_buffer
,
1641 ohci
->csr_state_setclear_abdicate
);
1642 ohci
->csr_state_setclear_abdicate
= false;
1645 static irqreturn_t
irq_handler(int irq
, void *data
)
1647 struct fw_ohci
*ohci
= data
;
1648 u32 event
, iso_event
;
1651 event
= reg_read(ohci
, OHCI1394_IntEventClear
);
1653 if (!event
|| !~event
)
1656 /* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */
1657 reg_write(ohci
, OHCI1394_IntEventClear
, event
& ~OHCI1394_busReset
);
1660 if (event
& OHCI1394_selfIDComplete
)
1661 tasklet_schedule(&ohci
->bus_reset_tasklet
);
1663 if (event
& OHCI1394_RQPkt
)
1664 tasklet_schedule(&ohci
->ar_request_ctx
.tasklet
);
1666 if (event
& OHCI1394_RSPkt
)
1667 tasklet_schedule(&ohci
->ar_response_ctx
.tasklet
);
1669 if (event
& OHCI1394_reqTxComplete
)
1670 tasklet_schedule(&ohci
->at_request_ctx
.tasklet
);
1672 if (event
& OHCI1394_respTxComplete
)
1673 tasklet_schedule(&ohci
->at_response_ctx
.tasklet
);
1675 iso_event
= reg_read(ohci
, OHCI1394_IsoRecvIntEventClear
);
1676 reg_write(ohci
, OHCI1394_IsoRecvIntEventClear
, iso_event
);
1679 i
= ffs(iso_event
) - 1;
1680 tasklet_schedule(&ohci
->ir_context_list
[i
].context
.tasklet
);
1681 iso_event
&= ~(1 << i
);
1684 iso_event
= reg_read(ohci
, OHCI1394_IsoXmitIntEventClear
);
1685 reg_write(ohci
, OHCI1394_IsoXmitIntEventClear
, iso_event
);
1688 i
= ffs(iso_event
) - 1;
1689 tasklet_schedule(&ohci
->it_context_list
[i
].context
.tasklet
);
1690 iso_event
&= ~(1 << i
);
1693 if (unlikely(event
& OHCI1394_regAccessFail
))
1694 fw_error("Register access failure - "
1695 "please notify linux1394-devel@lists.sf.net\n");
1697 if (unlikely(event
& OHCI1394_postedWriteErr
))
1698 fw_error("PCI posted write error\n");
1700 if (unlikely(event
& OHCI1394_cycleTooLong
)) {
1701 if (printk_ratelimit())
1702 fw_notify("isochronous cycle too long\n");
1703 reg_write(ohci
, OHCI1394_LinkControlSet
,
1704 OHCI1394_LinkControl_cycleMaster
);
1707 if (unlikely(event
& OHCI1394_cycleInconsistent
)) {
1709 * We need to clear this event bit in order to make
1710 * cycleMatch isochronous I/O work. In theory we should
1711 * stop active cycleMatch iso contexts now and restart
1712 * them at least two cycles later. (FIXME?)
1714 if (printk_ratelimit())
1715 fw_notify("isochronous cycle inconsistent\n");
1718 if (event
& OHCI1394_cycle64Seconds
) {
1719 spin_lock(&ohci
->lock
);
1720 update_bus_time(ohci
);
1721 spin_unlock(&ohci
->lock
);
1727 static int software_reset(struct fw_ohci
*ohci
)
1731 reg_write(ohci
, OHCI1394_HCControlSet
, OHCI1394_HCControl_softReset
);
1733 for (i
= 0; i
< OHCI_LOOP_COUNT
; i
++) {
1734 if ((reg_read(ohci
, OHCI1394_HCControlSet
) &
1735 OHCI1394_HCControl_softReset
) == 0)
1743 static void copy_config_rom(__be32
*dest
, const __be32
*src
, size_t length
)
1745 size_t size
= length
* 4;
1747 memcpy(dest
, src
, size
);
1748 if (size
< CONFIG_ROM_SIZE
)
1749 memset(&dest
[length
], 0, CONFIG_ROM_SIZE
- size
);
1752 static int configure_1394a_enhancements(struct fw_ohci
*ohci
)
1755 int ret
, clear
, set
, offset
;
1757 /* Check if the driver should configure link and PHY. */
1758 if (!(reg_read(ohci
, OHCI1394_HCControlSet
) &
1759 OHCI1394_HCControl_programPhyEnable
))
1762 /* Paranoia: check whether the PHY supports 1394a, too. */
1763 enable_1394a
= false;
1764 ret
= read_phy_reg(ohci
, 2);
1767 if ((ret
& PHY_EXTENDED_REGISTERS
) == PHY_EXTENDED_REGISTERS
) {
1768 ret
= read_paged_phy_reg(ohci
, 1, 8);
1772 enable_1394a
= true;
1775 if (ohci
->quirks
& QUIRK_NO_1394A
)
1776 enable_1394a
= false;
1778 /* Configure PHY and link consistently. */
1781 set
= PHY_ENABLE_ACCEL
| PHY_ENABLE_MULTI
;
1783 clear
= PHY_ENABLE_ACCEL
| PHY_ENABLE_MULTI
;
1786 ret
= update_phy_reg(ohci
, 5, clear
, set
);
1791 offset
= OHCI1394_HCControlSet
;
1793 offset
= OHCI1394_HCControlClear
;
1794 reg_write(ohci
, offset
, OHCI1394_HCControl_aPhyEnhanceEnable
);
1796 /* Clean up: configuration has been taken care of. */
1797 reg_write(ohci
, OHCI1394_HCControlClear
,
1798 OHCI1394_HCControl_programPhyEnable
);
1803 static int ohci_enable(struct fw_card
*card
,
1804 const __be32
*config_rom
, size_t length
)
1806 struct fw_ohci
*ohci
= fw_ohci(card
);
1807 struct pci_dev
*dev
= to_pci_dev(card
->device
);
1808 u32 lps
, seconds
, version
, irqs
;
1811 if (software_reset(ohci
)) {
1812 fw_error("Failed to reset ohci card.\n");
1817 * Now enable LPS, which we need in order to start accessing
1818 * most of the registers. In fact, on some cards (ALI M5251),
1819 * accessing registers in the SClk domain without LPS enabled
1820 * will lock up the machine. Wait 50msec to make sure we have
1821 * full link enabled. However, with some cards (well, at least
1822 * a JMicron PCIe card), we have to try again sometimes.
1824 reg_write(ohci
, OHCI1394_HCControlSet
,
1825 OHCI1394_HCControl_LPS
|
1826 OHCI1394_HCControl_postedWriteEnable
);
1829 for (lps
= 0, i
= 0; !lps
&& i
< 3; i
++) {
1831 lps
= reg_read(ohci
, OHCI1394_HCControlSet
) &
1832 OHCI1394_HCControl_LPS
;
1836 fw_error("Failed to set Link Power Status\n");
1840 reg_write(ohci
, OHCI1394_HCControlClear
,
1841 OHCI1394_HCControl_noByteSwapData
);
1843 reg_write(ohci
, OHCI1394_SelfIDBuffer
, ohci
->self_id_bus
);
1844 reg_write(ohci
, OHCI1394_LinkControlSet
,
1845 OHCI1394_LinkControl_rcvSelfID
|
1846 OHCI1394_LinkControl_rcvPhyPkt
|
1847 OHCI1394_LinkControl_cycleTimerEnable
|
1848 OHCI1394_LinkControl_cycleMaster
);
1850 reg_write(ohci
, OHCI1394_ATRetries
,
1851 OHCI1394_MAX_AT_REQ_RETRIES
|
1852 (OHCI1394_MAX_AT_RESP_RETRIES
<< 4) |
1853 (OHCI1394_MAX_PHYS_RESP_RETRIES
<< 8) |
1856 seconds
= lower_32_bits(get_seconds());
1857 reg_write(ohci
, OHCI1394_IsochronousCycleTimer
, seconds
<< 25);
1858 ohci
->bus_time
= seconds
& ~0x3f;
1860 version
= reg_read(ohci
, OHCI1394_Version
) & 0x00ff00ff;
1861 if (version
>= OHCI_VERSION_1_1
) {
1862 reg_write(ohci
, OHCI1394_InitialChannelsAvailableHi
,
1864 card
->broadcast_channel_auto_allocated
= true;
1867 /* Get implemented bits of the priority arbitration request counter. */
1868 reg_write(ohci
, OHCI1394_FairnessControl
, 0x3f);
1869 ohci
->pri_req_max
= reg_read(ohci
, OHCI1394_FairnessControl
) & 0x3f;
1870 reg_write(ohci
, OHCI1394_FairnessControl
, 0);
1871 card
->priority_budget_implemented
= ohci
->pri_req_max
!= 0;
1873 ar_context_run(&ohci
->ar_request_ctx
);
1874 ar_context_run(&ohci
->ar_response_ctx
);
1876 reg_write(ohci
, OHCI1394_PhyUpperBound
, 0x00010000);
1877 reg_write(ohci
, OHCI1394_IntEventClear
, ~0);
1878 reg_write(ohci
, OHCI1394_IntMaskClear
, ~0);
1880 ret
= configure_1394a_enhancements(ohci
);
1884 /* Activate link_on bit and contender bit in our self ID packets.*/
1885 ret
= ohci_update_phy_reg(card
, 4, 0, PHY_LINK_ACTIVE
| PHY_CONTENDER
);
1890 * When the link is not yet enabled, the atomic config rom
1891 * update mechanism described below in ohci_set_config_rom()
1892 * is not active. We have to update ConfigRomHeader and
1893 * BusOptions manually, and the write to ConfigROMmap takes
1894 * effect immediately. We tie this to the enabling of the
1895 * link, so we have a valid config rom before enabling - the
1896 * OHCI requires that ConfigROMhdr and BusOptions have valid
1897 * values before enabling.
1899 * However, when the ConfigROMmap is written, some controllers
1900 * always read back quadlets 0 and 2 from the config rom to
1901 * the ConfigRomHeader and BusOptions registers on bus reset.
1902 * They shouldn't do that in this initial case where the link
1903 * isn't enabled. This means we have to use the same
1904 * workaround here, setting the bus header to 0 and then write
1905 * the right values in the bus reset tasklet.
1909 ohci
->next_config_rom
=
1910 dma_alloc_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
1911 &ohci
->next_config_rom_bus
,
1913 if (ohci
->next_config_rom
== NULL
)
1916 copy_config_rom(ohci
->next_config_rom
, config_rom
, length
);
1919 * In the suspend case, config_rom is NULL, which
1920 * means that we just reuse the old config rom.
1922 ohci
->next_config_rom
= ohci
->config_rom
;
1923 ohci
->next_config_rom_bus
= ohci
->config_rom_bus
;
1926 ohci
->next_header
= ohci
->next_config_rom
[0];
1927 ohci
->next_config_rom
[0] = 0;
1928 reg_write(ohci
, OHCI1394_ConfigROMhdr
, 0);
1929 reg_write(ohci
, OHCI1394_BusOptions
,
1930 be32_to_cpu(ohci
->next_config_rom
[2]));
1931 reg_write(ohci
, OHCI1394_ConfigROMmap
, ohci
->next_config_rom_bus
);
1933 reg_write(ohci
, OHCI1394_AsReqFilterHiSet
, 0x80000000);
1935 if (!(ohci
->quirks
& QUIRK_NO_MSI
))
1936 pci_enable_msi(dev
);
1937 if (request_irq(dev
->irq
, irq_handler
,
1938 pci_dev_msi_enabled(dev
) ? 0 : IRQF_SHARED
,
1939 ohci_driver_name
, ohci
)) {
1940 fw_error("Failed to allocate interrupt %d.\n", dev
->irq
);
1941 pci_disable_msi(dev
);
1942 dma_free_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
1943 ohci
->config_rom
, ohci
->config_rom_bus
);
1947 irqs
= OHCI1394_reqTxComplete
| OHCI1394_respTxComplete
|
1948 OHCI1394_RQPkt
| OHCI1394_RSPkt
|
1949 OHCI1394_isochTx
| OHCI1394_isochRx
|
1950 OHCI1394_postedWriteErr
|
1951 OHCI1394_selfIDComplete
|
1952 OHCI1394_regAccessFail
|
1953 OHCI1394_cycle64Seconds
|
1954 OHCI1394_cycleInconsistent
| OHCI1394_cycleTooLong
|
1955 OHCI1394_masterIntEnable
;
1956 if (param_debug
& OHCI_PARAM_DEBUG_BUSRESETS
)
1957 irqs
|= OHCI1394_busReset
;
1958 reg_write(ohci
, OHCI1394_IntMaskSet
, irqs
);
1960 reg_write(ohci
, OHCI1394_HCControlSet
,
1961 OHCI1394_HCControl_linkEnable
|
1962 OHCI1394_HCControl_BIBimageValid
);
1965 /* We are ready to go, reset bus to finish initialization. */
1966 fw_schedule_bus_reset(&ohci
->card
, false, true);
1971 static int ohci_set_config_rom(struct fw_card
*card
,
1972 const __be32
*config_rom
, size_t length
)
1974 struct fw_ohci
*ohci
;
1975 unsigned long flags
;
1977 __be32
*next_config_rom
;
1978 dma_addr_t
uninitialized_var(next_config_rom_bus
);
1980 ohci
= fw_ohci(card
);
1983 * When the OHCI controller is enabled, the config rom update
1984 * mechanism is a bit tricky, but easy enough to use. See
1985 * section 5.5.6 in the OHCI specification.
1987 * The OHCI controller caches the new config rom address in a
1988 * shadow register (ConfigROMmapNext) and needs a bus reset
1989 * for the changes to take place. When the bus reset is
1990 * detected, the controller loads the new values for the
1991 * ConfigRomHeader and BusOptions registers from the specified
1992 * config rom and loads ConfigROMmap from the ConfigROMmapNext
1993 * shadow register. All automatically and atomically.
1995 * Now, there's a twist to this story. The automatic load of
1996 * ConfigRomHeader and BusOptions doesn't honor the
1997 * noByteSwapData bit, so with a be32 config rom, the
1998 * controller will load be32 values in to these registers
1999 * during the atomic update, even on litte endian
2000 * architectures. The workaround we use is to put a 0 in the
2001 * header quadlet; 0 is endian agnostic and means that the
2002 * config rom isn't ready yet. In the bus reset tasklet we
2003 * then set up the real values for the two registers.
2005 * We use ohci->lock to avoid racing with the code that sets
2006 * ohci->next_config_rom to NULL (see bus_reset_tasklet).
2010 dma_alloc_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
2011 &next_config_rom_bus
, GFP_KERNEL
);
2012 if (next_config_rom
== NULL
)
2015 spin_lock_irqsave(&ohci
->lock
, flags
);
2017 if (ohci
->next_config_rom
== NULL
) {
2018 ohci
->next_config_rom
= next_config_rom
;
2019 ohci
->next_config_rom_bus
= next_config_rom_bus
;
2021 copy_config_rom(ohci
->next_config_rom
, config_rom
, length
);
2023 ohci
->next_header
= config_rom
[0];
2024 ohci
->next_config_rom
[0] = 0;
2026 reg_write(ohci
, OHCI1394_ConfigROMmap
,
2027 ohci
->next_config_rom_bus
);
2031 spin_unlock_irqrestore(&ohci
->lock
, flags
);
2034 * Now initiate a bus reset to have the changes take
2035 * effect. We clean up the old config rom memory and DMA
2036 * mappings in the bus reset tasklet, since the OHCI
2037 * controller could need to access it before the bus reset
2041 fw_schedule_bus_reset(&ohci
->card
, true, true);
2043 dma_free_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
2044 next_config_rom
, next_config_rom_bus
);
2049 static void ohci_send_request(struct fw_card
*card
, struct fw_packet
*packet
)
2051 struct fw_ohci
*ohci
= fw_ohci(card
);
2053 at_context_transmit(&ohci
->at_request_ctx
, packet
);
2056 static void ohci_send_response(struct fw_card
*card
, struct fw_packet
*packet
)
2058 struct fw_ohci
*ohci
= fw_ohci(card
);
2060 at_context_transmit(&ohci
->at_response_ctx
, packet
);
2063 static int ohci_cancel_packet(struct fw_card
*card
, struct fw_packet
*packet
)
2065 struct fw_ohci
*ohci
= fw_ohci(card
);
2066 struct context
*ctx
= &ohci
->at_request_ctx
;
2067 struct driver_data
*driver_data
= packet
->driver_data
;
2070 tasklet_disable(&ctx
->tasklet
);
2072 if (packet
->ack
!= 0)
2075 if (packet
->payload_mapped
)
2076 dma_unmap_single(ohci
->card
.device
, packet
->payload_bus
,
2077 packet
->payload_length
, DMA_TO_DEVICE
);
2079 log_ar_at_event('T', packet
->speed
, packet
->header
, 0x20);
2080 driver_data
->packet
= NULL
;
2081 packet
->ack
= RCODE_CANCELLED
;
2082 packet
->callback(packet
, &ohci
->card
, packet
->ack
);
2085 tasklet_enable(&ctx
->tasklet
);
2090 static int ohci_enable_phys_dma(struct fw_card
*card
,
2091 int node_id
, int generation
)
2093 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
2096 struct fw_ohci
*ohci
= fw_ohci(card
);
2097 unsigned long flags
;
2101 * FIXME: Make sure this bitmask is cleared when we clear the busReset
2102 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
2105 spin_lock_irqsave(&ohci
->lock
, flags
);
2107 if (ohci
->generation
!= generation
) {
2113 * Note, if the node ID contains a non-local bus ID, physical DMA is
2114 * enabled for _all_ nodes on remote buses.
2117 n
= (node_id
& 0xffc0) == LOCAL_BUS
? node_id
& 0x3f : 63;
2119 reg_write(ohci
, OHCI1394_PhyReqFilterLoSet
, 1 << n
);
2121 reg_write(ohci
, OHCI1394_PhyReqFilterHiSet
, 1 << (n
- 32));
2125 spin_unlock_irqrestore(&ohci
->lock
, flags
);
2128 #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
2131 static u32
ohci_read_csr(struct fw_card
*card
, int csr_offset
)
2133 struct fw_ohci
*ohci
= fw_ohci(card
);
2134 unsigned long flags
;
2137 switch (csr_offset
) {
2138 case CSR_STATE_CLEAR
:
2140 if (ohci
->is_root
&&
2141 (reg_read(ohci
, OHCI1394_LinkControlSet
) &
2142 OHCI1394_LinkControl_cycleMaster
))
2143 value
= CSR_STATE_BIT_CMSTR
;
2146 if (ohci
->csr_state_setclear_abdicate
)
2147 value
|= CSR_STATE_BIT_ABDICATE
;
2152 return reg_read(ohci
, OHCI1394_NodeID
) << 16;
2154 case CSR_CYCLE_TIME
:
2155 return get_cycle_time(ohci
);
2159 * We might be called just after the cycle timer has wrapped
2160 * around but just before the cycle64Seconds handler, so we
2161 * better check here, too, if the bus time needs to be updated.
2163 spin_lock_irqsave(&ohci
->lock
, flags
);
2164 value
= update_bus_time(ohci
);
2165 spin_unlock_irqrestore(&ohci
->lock
, flags
);
2168 case CSR_BUSY_TIMEOUT
:
2169 value
= reg_read(ohci
, OHCI1394_ATRetries
);
2170 return (value
>> 4) & 0x0ffff00f;
2172 case CSR_PRIORITY_BUDGET
:
2173 return (reg_read(ohci
, OHCI1394_FairnessControl
) & 0x3f) |
2174 (ohci
->pri_req_max
<< 8);
2182 static void ohci_write_csr(struct fw_card
*card
, int csr_offset
, u32 value
)
2184 struct fw_ohci
*ohci
= fw_ohci(card
);
2185 unsigned long flags
;
2187 switch (csr_offset
) {
2188 case CSR_STATE_CLEAR
:
2189 if ((value
& CSR_STATE_BIT_CMSTR
) && ohci
->is_root
) {
2190 reg_write(ohci
, OHCI1394_LinkControlClear
,
2191 OHCI1394_LinkControl_cycleMaster
);
2194 if (value
& CSR_STATE_BIT_ABDICATE
)
2195 ohci
->csr_state_setclear_abdicate
= false;
2199 if ((value
& CSR_STATE_BIT_CMSTR
) && ohci
->is_root
) {
2200 reg_write(ohci
, OHCI1394_LinkControlSet
,
2201 OHCI1394_LinkControl_cycleMaster
);
2204 if (value
& CSR_STATE_BIT_ABDICATE
)
2205 ohci
->csr_state_setclear_abdicate
= true;
2209 reg_write(ohci
, OHCI1394_NodeID
, value
>> 16);
2213 case CSR_CYCLE_TIME
:
2214 reg_write(ohci
, OHCI1394_IsochronousCycleTimer
, value
);
2215 reg_write(ohci
, OHCI1394_IntEventSet
,
2216 OHCI1394_cycleInconsistent
);
2221 spin_lock_irqsave(&ohci
->lock
, flags
);
2222 ohci
->bus_time
= (ohci
->bus_time
& 0x7f) | (value
& ~0x7f);
2223 spin_unlock_irqrestore(&ohci
->lock
, flags
);
2226 case CSR_BUSY_TIMEOUT
:
2227 value
= (value
& 0xf) | ((value
& 0xf) << 4) |
2228 ((value
& 0xf) << 8) | ((value
& 0x0ffff000) << 4);
2229 reg_write(ohci
, OHCI1394_ATRetries
, value
);
2233 case CSR_PRIORITY_BUDGET
:
2234 reg_write(ohci
, OHCI1394_FairnessControl
, value
& 0x3f);
2244 static void copy_iso_headers(struct iso_context
*ctx
, void *p
)
2246 int i
= ctx
->header_length
;
2248 if (i
+ ctx
->base
.header_size
> PAGE_SIZE
)
2252 * The iso header is byteswapped to little endian by
2253 * the controller, but the remaining header quadlets
2254 * are big endian. We want to present all the headers
2255 * as big endian, so we have to swap the first quadlet.
2257 if (ctx
->base
.header_size
> 0)
2258 *(u32
*) (ctx
->header
+ i
) = __swab32(*(u32
*) (p
+ 4));
2259 if (ctx
->base
.header_size
> 4)
2260 *(u32
*) (ctx
->header
+ i
+ 4) = __swab32(*(u32
*) p
);
2261 if (ctx
->base
.header_size
> 8)
2262 memcpy(ctx
->header
+ i
+ 8, p
+ 8, ctx
->base
.header_size
- 8);
2263 ctx
->header_length
+= ctx
->base
.header_size
;
2266 static int handle_ir_packet_per_buffer(struct context
*context
,
2267 struct descriptor
*d
,
2268 struct descriptor
*last
)
2270 struct iso_context
*ctx
=
2271 container_of(context
, struct iso_context
, context
);
2272 struct descriptor
*pd
;
2276 for (pd
= d
; pd
<= last
; pd
++)
2277 if (pd
->transfer_status
)
2280 /* Descriptor(s) not done yet, stop iteration */
2284 copy_iso_headers(ctx
, p
);
2286 if (le16_to_cpu(last
->control
) & DESCRIPTOR_IRQ_ALWAYS
) {
2287 ir_header
= (__le32
*) p
;
2288 ctx
->base
.callback
.sc(&ctx
->base
,
2289 le32_to_cpu(ir_header
[0]) & 0xffff,
2290 ctx
->header_length
, ctx
->header
,
2291 ctx
->base
.callback_data
);
2292 ctx
->header_length
= 0;
2298 /* d == last because each descriptor block is only a single descriptor. */
2299 static int handle_ir_buffer_fill(struct context
*context
,
2300 struct descriptor
*d
,
2301 struct descriptor
*last
)
2303 struct iso_context
*ctx
=
2304 container_of(context
, struct iso_context
, context
);
2306 if (!last
->transfer_status
)
2307 /* Descriptor(s) not done yet, stop iteration */
2310 if (le16_to_cpu(last
->control
) & DESCRIPTOR_IRQ_ALWAYS
)
2311 ctx
->base
.callback
.mc(&ctx
->base
,
2312 le32_to_cpu(last
->data_address
) +
2313 le16_to_cpu(last
->req_count
) -
2314 le16_to_cpu(last
->res_count
),
2315 ctx
->base
.callback_data
);
2320 static int handle_it_packet(struct context
*context
,
2321 struct descriptor
*d
,
2322 struct descriptor
*last
)
2324 struct iso_context
*ctx
=
2325 container_of(context
, struct iso_context
, context
);
2327 struct descriptor
*pd
;
2329 for (pd
= d
; pd
<= last
; pd
++)
2330 if (pd
->transfer_status
)
2333 /* Descriptor(s) not done yet, stop iteration */
2336 i
= ctx
->header_length
;
2337 if (i
+ 4 < PAGE_SIZE
) {
2338 /* Present this value as big-endian to match the receive code */
2339 *(__be32
*)(ctx
->header
+ i
) = cpu_to_be32(
2340 ((u32
)le16_to_cpu(pd
->transfer_status
) << 16) |
2341 le16_to_cpu(pd
->res_count
));
2342 ctx
->header_length
+= 4;
2344 if (le16_to_cpu(last
->control
) & DESCRIPTOR_IRQ_ALWAYS
) {
2345 ctx
->base
.callback
.sc(&ctx
->base
, le16_to_cpu(last
->res_count
),
2346 ctx
->header_length
, ctx
->header
,
2347 ctx
->base
.callback_data
);
2348 ctx
->header_length
= 0;
2353 static void set_multichannel_mask(struct fw_ohci
*ohci
, u64 channels
)
2355 u32 hi
= channels
>> 32, lo
= channels
;
2357 reg_write(ohci
, OHCI1394_IRMultiChanMaskHiClear
, ~hi
);
2358 reg_write(ohci
, OHCI1394_IRMultiChanMaskLoClear
, ~lo
);
2359 reg_write(ohci
, OHCI1394_IRMultiChanMaskHiSet
, hi
);
2360 reg_write(ohci
, OHCI1394_IRMultiChanMaskLoSet
, lo
);
2362 ohci
->mc_channels
= channels
;
2365 static struct fw_iso_context
*ohci_allocate_iso_context(struct fw_card
*card
,
2366 int type
, int channel
, size_t header_size
)
2368 struct fw_ohci
*ohci
= fw_ohci(card
);
2369 struct iso_context
*uninitialized_var(ctx
);
2370 descriptor_callback_t
uninitialized_var(callback
);
2371 u64
*uninitialized_var(channels
);
2372 u32
*uninitialized_var(mask
), uninitialized_var(regs
);
2373 unsigned long flags
;
2374 int index
, ret
= -EBUSY
;
2376 spin_lock_irqsave(&ohci
->lock
, flags
);
2379 case FW_ISO_CONTEXT_TRANSMIT
:
2380 mask
= &ohci
->it_context_mask
;
2381 callback
= handle_it_packet
;
2382 index
= ffs(*mask
) - 1;
2384 *mask
&= ~(1 << index
);
2385 regs
= OHCI1394_IsoXmitContextBase(index
);
2386 ctx
= &ohci
->it_context_list
[index
];
2390 case FW_ISO_CONTEXT_RECEIVE
:
2391 channels
= &ohci
->ir_context_channels
;
2392 mask
= &ohci
->ir_context_mask
;
2393 callback
= handle_ir_packet_per_buffer
;
2394 index
= *channels
& 1ULL << channel
? ffs(*mask
) - 1 : -1;
2396 *channels
&= ~(1ULL << channel
);
2397 *mask
&= ~(1 << index
);
2398 regs
= OHCI1394_IsoRcvContextBase(index
);
2399 ctx
= &ohci
->ir_context_list
[index
];
2403 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
:
2404 mask
= &ohci
->ir_context_mask
;
2405 callback
= handle_ir_buffer_fill
;
2406 index
= !ohci
->mc_allocated
? ffs(*mask
) - 1 : -1;
2408 ohci
->mc_allocated
= true;
2409 *mask
&= ~(1 << index
);
2410 regs
= OHCI1394_IsoRcvContextBase(index
);
2411 ctx
= &ohci
->ir_context_list
[index
];
2420 spin_unlock_irqrestore(&ohci
->lock
, flags
);
2423 return ERR_PTR(ret
);
2425 memset(ctx
, 0, sizeof(*ctx
));
2426 ctx
->header_length
= 0;
2427 ctx
->header
= (void *) __get_free_page(GFP_KERNEL
);
2428 if (ctx
->header
== NULL
) {
2432 ret
= context_init(&ctx
->context
, ohci
, regs
, callback
);
2434 goto out_with_header
;
2436 if (type
== FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
)
2437 set_multichannel_mask(ohci
, 0);
2442 free_page((unsigned long)ctx
->header
);
2444 spin_lock_irqsave(&ohci
->lock
, flags
);
2447 case FW_ISO_CONTEXT_RECEIVE
:
2448 *channels
|= 1ULL << channel
;
2451 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
:
2452 ohci
->mc_allocated
= false;
2455 *mask
|= 1 << index
;
2457 spin_unlock_irqrestore(&ohci
->lock
, flags
);
2459 return ERR_PTR(ret
);
2462 static int ohci_start_iso(struct fw_iso_context
*base
,
2463 s32 cycle
, u32 sync
, u32 tags
)
2465 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
2466 struct fw_ohci
*ohci
= ctx
->context
.ohci
;
2467 u32 control
= IR_CONTEXT_ISOCH_HEADER
, match
;
2470 switch (ctx
->base
.type
) {
2471 case FW_ISO_CONTEXT_TRANSMIT
:
2472 index
= ctx
- ohci
->it_context_list
;
2475 match
= IT_CONTEXT_CYCLE_MATCH_ENABLE
|
2476 (cycle
& 0x7fff) << 16;
2478 reg_write(ohci
, OHCI1394_IsoXmitIntEventClear
, 1 << index
);
2479 reg_write(ohci
, OHCI1394_IsoXmitIntMaskSet
, 1 << index
);
2480 context_run(&ctx
->context
, match
);
2483 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
:
2484 control
|= IR_CONTEXT_BUFFER_FILL
|IR_CONTEXT_MULTI_CHANNEL_MODE
;
2486 case FW_ISO_CONTEXT_RECEIVE
:
2487 index
= ctx
- ohci
->ir_context_list
;
2488 match
= (tags
<< 28) | (sync
<< 8) | ctx
->base
.channel
;
2490 match
|= (cycle
& 0x07fff) << 12;
2491 control
|= IR_CONTEXT_CYCLE_MATCH_ENABLE
;
2494 reg_write(ohci
, OHCI1394_IsoRecvIntEventClear
, 1 << index
);
2495 reg_write(ohci
, OHCI1394_IsoRecvIntMaskSet
, 1 << index
);
2496 reg_write(ohci
, CONTEXT_MATCH(ctx
->context
.regs
), match
);
2497 context_run(&ctx
->context
, control
);
2504 static int ohci_stop_iso(struct fw_iso_context
*base
)
2506 struct fw_ohci
*ohci
= fw_ohci(base
->card
);
2507 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
2510 switch (ctx
->base
.type
) {
2511 case FW_ISO_CONTEXT_TRANSMIT
:
2512 index
= ctx
- ohci
->it_context_list
;
2513 reg_write(ohci
, OHCI1394_IsoXmitIntMaskClear
, 1 << index
);
2516 case FW_ISO_CONTEXT_RECEIVE
:
2517 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
:
2518 index
= ctx
- ohci
->ir_context_list
;
2519 reg_write(ohci
, OHCI1394_IsoRecvIntMaskClear
, 1 << index
);
2523 context_stop(&ctx
->context
);
2528 static void ohci_free_iso_context(struct fw_iso_context
*base
)
2530 struct fw_ohci
*ohci
= fw_ohci(base
->card
);
2531 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
2532 unsigned long flags
;
2535 ohci_stop_iso(base
);
2536 context_release(&ctx
->context
);
2537 free_page((unsigned long)ctx
->header
);
2539 spin_lock_irqsave(&ohci
->lock
, flags
);
2541 switch (base
->type
) {
2542 case FW_ISO_CONTEXT_TRANSMIT
:
2543 index
= ctx
- ohci
->it_context_list
;
2544 ohci
->it_context_mask
|= 1 << index
;
2547 case FW_ISO_CONTEXT_RECEIVE
:
2548 index
= ctx
- ohci
->ir_context_list
;
2549 ohci
->ir_context_mask
|= 1 << index
;
2550 ohci
->ir_context_channels
|= 1ULL << base
->channel
;
2553 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
:
2554 index
= ctx
- ohci
->ir_context_list
;
2555 ohci
->ir_context_mask
|= 1 << index
;
2556 ohci
->ir_context_channels
|= ohci
->mc_channels
;
2557 ohci
->mc_channels
= 0;
2558 ohci
->mc_allocated
= false;
2562 spin_unlock_irqrestore(&ohci
->lock
, flags
);
2565 static int ohci_set_iso_channels(struct fw_iso_context
*base
, u64
*channels
)
2567 struct fw_ohci
*ohci
= fw_ohci(base
->card
);
2568 unsigned long flags
;
2571 switch (base
->type
) {
2572 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
:
2574 spin_lock_irqsave(&ohci
->lock
, flags
);
2576 /* Don't allow multichannel to grab other contexts' channels. */
2577 if (~ohci
->ir_context_channels
& ~ohci
->mc_channels
& *channels
) {
2578 *channels
= ohci
->ir_context_channels
;
2581 set_multichannel_mask(ohci
, *channels
);
2585 spin_unlock_irqrestore(&ohci
->lock
, flags
);
2595 static int queue_iso_transmit(struct iso_context
*ctx
,
2596 struct fw_iso_packet
*packet
,
2597 struct fw_iso_buffer
*buffer
,
2598 unsigned long payload
)
2600 struct descriptor
*d
, *last
, *pd
;
2601 struct fw_iso_packet
*p
;
2603 dma_addr_t d_bus
, page_bus
;
2604 u32 z
, header_z
, payload_z
, irq
;
2605 u32 payload_index
, payload_end_index
, next_page_index
;
2606 int page
, end_page
, i
, length
, offset
;
2609 payload_index
= payload
;
2615 if (p
->header_length
> 0)
2618 /* Determine the first page the payload isn't contained in. */
2619 end_page
= PAGE_ALIGN(payload_index
+ p
->payload_length
) >> PAGE_SHIFT
;
2620 if (p
->payload_length
> 0)
2621 payload_z
= end_page
- (payload_index
>> PAGE_SHIFT
);
2627 /* Get header size in number of descriptors. */
2628 header_z
= DIV_ROUND_UP(p
->header_length
, sizeof(*d
));
2630 d
= context_get_descriptors(&ctx
->context
, z
+ header_z
, &d_bus
);
2635 d
[0].control
= cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE
);
2636 d
[0].req_count
= cpu_to_le16(8);
2638 * Link the skip address to this descriptor itself. This causes
2639 * a context to skip a cycle whenever lost cycles or FIFO
2640 * overruns occur, without dropping the data. The application
2641 * should then decide whether this is an error condition or not.
2642 * FIXME: Make the context's cycle-lost behaviour configurable?
2644 d
[0].branch_address
= cpu_to_le32(d_bus
| z
);
2646 header
= (__le32
*) &d
[1];
2647 header
[0] = cpu_to_le32(IT_HEADER_SY(p
->sy
) |
2648 IT_HEADER_TAG(p
->tag
) |
2649 IT_HEADER_TCODE(TCODE_STREAM_DATA
) |
2650 IT_HEADER_CHANNEL(ctx
->base
.channel
) |
2651 IT_HEADER_SPEED(ctx
->base
.speed
));
2653 cpu_to_le32(IT_HEADER_DATA_LENGTH(p
->header_length
+
2654 p
->payload_length
));
2657 if (p
->header_length
> 0) {
2658 d
[2].req_count
= cpu_to_le16(p
->header_length
);
2659 d
[2].data_address
= cpu_to_le32(d_bus
+ z
* sizeof(*d
));
2660 memcpy(&d
[z
], p
->header
, p
->header_length
);
2663 pd
= d
+ z
- payload_z
;
2664 payload_end_index
= payload_index
+ p
->payload_length
;
2665 for (i
= 0; i
< payload_z
; i
++) {
2666 page
= payload_index
>> PAGE_SHIFT
;
2667 offset
= payload_index
& ~PAGE_MASK
;
2668 next_page_index
= (page
+ 1) << PAGE_SHIFT
;
2670 min(next_page_index
, payload_end_index
) - payload_index
;
2671 pd
[i
].req_count
= cpu_to_le16(length
);
2673 page_bus
= page_private(buffer
->pages
[page
]);
2674 pd
[i
].data_address
= cpu_to_le32(page_bus
+ offset
);
2676 payload_index
+= length
;
2680 irq
= DESCRIPTOR_IRQ_ALWAYS
;
2682 irq
= DESCRIPTOR_NO_IRQ
;
2684 last
= z
== 2 ? d
: d
+ z
- 1;
2685 last
->control
|= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST
|
2687 DESCRIPTOR_BRANCH_ALWAYS
|
2690 context_append(&ctx
->context
, d
, z
, header_z
);
2695 static int queue_iso_packet_per_buffer(struct iso_context
*ctx
,
2696 struct fw_iso_packet
*packet
,
2697 struct fw_iso_buffer
*buffer
,
2698 unsigned long payload
)
2700 struct descriptor
*d
, *pd
;
2701 dma_addr_t d_bus
, page_bus
;
2702 u32 z
, header_z
, rest
;
2704 int page
, offset
, packet_count
, header_size
, payload_per_buffer
;
2707 * The OHCI controller puts the isochronous header and trailer in the
2708 * buffer, so we need at least 8 bytes.
2710 packet_count
= packet
->header_length
/ ctx
->base
.header_size
;
2711 header_size
= max(ctx
->base
.header_size
, (size_t)8);
2713 /* Get header size in number of descriptors. */
2714 header_z
= DIV_ROUND_UP(header_size
, sizeof(*d
));
2715 page
= payload
>> PAGE_SHIFT
;
2716 offset
= payload
& ~PAGE_MASK
;
2717 payload_per_buffer
= packet
->payload_length
/ packet_count
;
2719 for (i
= 0; i
< packet_count
; i
++) {
2720 /* d points to the header descriptor */
2721 z
= DIV_ROUND_UP(payload_per_buffer
+ offset
, PAGE_SIZE
) + 1;
2722 d
= context_get_descriptors(&ctx
->context
,
2723 z
+ header_z
, &d_bus
);
2727 d
->control
= cpu_to_le16(DESCRIPTOR_STATUS
|
2728 DESCRIPTOR_INPUT_MORE
);
2729 if (packet
->skip
&& i
== 0)
2730 d
->control
|= cpu_to_le16(DESCRIPTOR_WAIT
);
2731 d
->req_count
= cpu_to_le16(header_size
);
2732 d
->res_count
= d
->req_count
;
2733 d
->transfer_status
= 0;
2734 d
->data_address
= cpu_to_le32(d_bus
+ (z
* sizeof(*d
)));
2736 rest
= payload_per_buffer
;
2738 for (j
= 1; j
< z
; j
++) {
2740 pd
->control
= cpu_to_le16(DESCRIPTOR_STATUS
|
2741 DESCRIPTOR_INPUT_MORE
);
2743 if (offset
+ rest
< PAGE_SIZE
)
2746 length
= PAGE_SIZE
- offset
;
2747 pd
->req_count
= cpu_to_le16(length
);
2748 pd
->res_count
= pd
->req_count
;
2749 pd
->transfer_status
= 0;
2751 page_bus
= page_private(buffer
->pages
[page
]);
2752 pd
->data_address
= cpu_to_le32(page_bus
+ offset
);
2754 offset
= (offset
+ length
) & ~PAGE_MASK
;
2759 pd
->control
= cpu_to_le16(DESCRIPTOR_STATUS
|
2760 DESCRIPTOR_INPUT_LAST
|
2761 DESCRIPTOR_BRANCH_ALWAYS
);
2762 if (packet
->interrupt
&& i
== packet_count
- 1)
2763 pd
->control
|= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS
);
2765 context_append(&ctx
->context
, d
, z
, header_z
);
2771 static int queue_iso_buffer_fill(struct iso_context
*ctx
,
2772 struct fw_iso_packet
*packet
,
2773 struct fw_iso_buffer
*buffer
,
2774 unsigned long payload
)
2776 struct descriptor
*d
;
2777 dma_addr_t d_bus
, page_bus
;
2778 int page
, offset
, rest
, z
, i
, length
;
2780 page
= payload
>> PAGE_SHIFT
;
2781 offset
= payload
& ~PAGE_MASK
;
2782 rest
= packet
->payload_length
;
2784 /* We need one descriptor for each page in the buffer. */
2785 z
= DIV_ROUND_UP(offset
+ rest
, PAGE_SIZE
);
2787 if (WARN_ON(offset
& 3 || rest
& 3 || page
+ z
> buffer
->page_count
))
2790 for (i
= 0; i
< z
; i
++) {
2791 d
= context_get_descriptors(&ctx
->context
, 1, &d_bus
);
2795 d
->control
= cpu_to_le16(DESCRIPTOR_INPUT_MORE
|
2796 DESCRIPTOR_BRANCH_ALWAYS
);
2797 if (packet
->skip
&& i
== 0)
2798 d
->control
|= cpu_to_le16(DESCRIPTOR_WAIT
);
2799 if (packet
->interrupt
&& i
== z
- 1)
2800 d
->control
|= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS
);
2802 if (offset
+ rest
< PAGE_SIZE
)
2805 length
= PAGE_SIZE
- offset
;
2806 d
->req_count
= cpu_to_le16(length
);
2807 d
->res_count
= d
->req_count
;
2808 d
->transfer_status
= 0;
2810 page_bus
= page_private(buffer
->pages
[page
]);
2811 d
->data_address
= cpu_to_le32(page_bus
+ offset
);
2817 context_append(&ctx
->context
, d
, 1, 0);
2823 static int ohci_queue_iso(struct fw_iso_context
*base
,
2824 struct fw_iso_packet
*packet
,
2825 struct fw_iso_buffer
*buffer
,
2826 unsigned long payload
)
2828 struct iso_context
*ctx
= container_of(base
, struct iso_context
, base
);
2829 unsigned long flags
;
2832 spin_lock_irqsave(&ctx
->context
.ohci
->lock
, flags
);
2833 switch (base
->type
) {
2834 case FW_ISO_CONTEXT_TRANSMIT
:
2835 ret
= queue_iso_transmit(ctx
, packet
, buffer
, payload
);
2837 case FW_ISO_CONTEXT_RECEIVE
:
2838 ret
= queue_iso_packet_per_buffer(ctx
, packet
, buffer
, payload
);
2840 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
:
2841 ret
= queue_iso_buffer_fill(ctx
, packet
, buffer
, payload
);
2844 spin_unlock_irqrestore(&ctx
->context
.ohci
->lock
, flags
);
2849 static const struct fw_card_driver ohci_driver
= {
2850 .enable
= ohci_enable
,
2851 .read_phy_reg
= ohci_read_phy_reg
,
2852 .update_phy_reg
= ohci_update_phy_reg
,
2853 .set_config_rom
= ohci_set_config_rom
,
2854 .send_request
= ohci_send_request
,
2855 .send_response
= ohci_send_response
,
2856 .cancel_packet
= ohci_cancel_packet
,
2857 .enable_phys_dma
= ohci_enable_phys_dma
,
2858 .read_csr
= ohci_read_csr
,
2859 .write_csr
= ohci_write_csr
,
2861 .allocate_iso_context
= ohci_allocate_iso_context
,
2862 .free_iso_context
= ohci_free_iso_context
,
2863 .set_iso_channels
= ohci_set_iso_channels
,
2864 .queue_iso
= ohci_queue_iso
,
2865 .start_iso
= ohci_start_iso
,
2866 .stop_iso
= ohci_stop_iso
,
2869 #ifdef CONFIG_PPC_PMAC
2870 static void pmac_ohci_on(struct pci_dev
*dev
)
2872 if (machine_is(powermac
)) {
2873 struct device_node
*ofn
= pci_device_to_OF_node(dev
);
2876 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER
, ofn
, 0, 1);
2877 pmac_call_feature(PMAC_FTR_1394_ENABLE
, ofn
, 0, 1);
2882 static void pmac_ohci_off(struct pci_dev
*dev
)
2884 if (machine_is(powermac
)) {
2885 struct device_node
*ofn
= pci_device_to_OF_node(dev
);
2888 pmac_call_feature(PMAC_FTR_1394_ENABLE
, ofn
, 0, 0);
2889 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER
, ofn
, 0, 0);
2894 static inline void pmac_ohci_on(struct pci_dev
*dev
) {}
2895 static inline void pmac_ohci_off(struct pci_dev
*dev
) {}
2896 #endif /* CONFIG_PPC_PMAC */
2898 static int __devinit
pci_probe(struct pci_dev
*dev
,
2899 const struct pci_device_id
*ent
)
2901 struct fw_ohci
*ohci
;
2902 u32 bus_options
, max_receive
, link_speed
, version
;
2904 int i
, err
, n_ir
, n_it
;
2907 ohci
= kzalloc(sizeof(*ohci
), GFP_KERNEL
);
2913 fw_card_initialize(&ohci
->card
, &ohci_driver
, &dev
->dev
);
2917 err
= pci_enable_device(dev
);
2919 fw_error("Failed to enable OHCI hardware\n");
2923 pci_set_master(dev
);
2924 pci_write_config_dword(dev
, OHCI1394_PCI_HCI_Control
, 0);
2925 pci_set_drvdata(dev
, ohci
);
2927 spin_lock_init(&ohci
->lock
);
2928 mutex_init(&ohci
->phy_reg_mutex
);
2930 tasklet_init(&ohci
->bus_reset_tasklet
,
2931 bus_reset_tasklet
, (unsigned long)ohci
);
2933 err
= pci_request_region(dev
, 0, ohci_driver_name
);
2935 fw_error("MMIO resource unavailable\n");
2939 ohci
->registers
= pci_iomap(dev
, 0, OHCI1394_REGISTER_SIZE
);
2940 if (ohci
->registers
== NULL
) {
2941 fw_error("Failed to remap registers\n");
2946 for (i
= 0; i
< ARRAY_SIZE(ohci_quirks
); i
++)
2947 if ((ohci_quirks
[i
].vendor
== dev
->vendor
) &&
2948 (ohci_quirks
[i
].device
== (unsigned short)PCI_ANY_ID
||
2949 ohci_quirks
[i
].device
== dev
->device
) &&
2950 (ohci_quirks
[i
].revision
== (unsigned short)PCI_ANY_ID
||
2951 ohci_quirks
[i
].revision
>= dev
->revision
)) {
2952 ohci
->quirks
= ohci_quirks
[i
].flags
;
2956 ohci
->quirks
= param_quirks
;
2958 ar_context_init(&ohci
->ar_request_ctx
, ohci
,
2959 OHCI1394_AsReqRcvContextControlSet
);
2961 ar_context_init(&ohci
->ar_response_ctx
, ohci
,
2962 OHCI1394_AsRspRcvContextControlSet
);
2964 context_init(&ohci
->at_request_ctx
, ohci
,
2965 OHCI1394_AsReqTrContextControlSet
, handle_at_packet
);
2967 context_init(&ohci
->at_response_ctx
, ohci
,
2968 OHCI1394_AsRspTrContextControlSet
, handle_at_packet
);
2970 reg_write(ohci
, OHCI1394_IsoRecvIntMaskSet
, ~0);
2971 ohci
->ir_context_channels
= ~0ULL;
2972 ohci
->ir_context_mask
= reg_read(ohci
, OHCI1394_IsoRecvIntMaskSet
);
2973 reg_write(ohci
, OHCI1394_IsoRecvIntMaskClear
, ~0);
2974 n_ir
= hweight32(ohci
->ir_context_mask
);
2975 size
= sizeof(struct iso_context
) * n_ir
;
2976 ohci
->ir_context_list
= kzalloc(size
, GFP_KERNEL
);
2978 reg_write(ohci
, OHCI1394_IsoXmitIntMaskSet
, ~0);
2979 ohci
->it_context_mask
= reg_read(ohci
, OHCI1394_IsoXmitIntMaskSet
);
2980 reg_write(ohci
, OHCI1394_IsoXmitIntMaskClear
, ~0);
2981 n_it
= hweight32(ohci
->it_context_mask
);
2982 size
= sizeof(struct iso_context
) * n_it
;
2983 ohci
->it_context_list
= kzalloc(size
, GFP_KERNEL
);
2985 if (ohci
->it_context_list
== NULL
|| ohci
->ir_context_list
== NULL
) {
2990 /* self-id dma buffer allocation */
2991 ohci
->self_id_cpu
= dma_alloc_coherent(ohci
->card
.device
,
2995 if (ohci
->self_id_cpu
== NULL
) {
3000 bus_options
= reg_read(ohci
, OHCI1394_BusOptions
);
3001 max_receive
= (bus_options
>> 12) & 0xf;
3002 link_speed
= bus_options
& 0x7;
3003 guid
= ((u64
) reg_read(ohci
, OHCI1394_GUIDHi
) << 32) |
3004 reg_read(ohci
, OHCI1394_GUIDLo
);
3006 err
= fw_card_add(&ohci
->card
, max_receive
, link_speed
, guid
);
3010 version
= reg_read(ohci
, OHCI1394_Version
) & 0x00ff00ff;
3011 fw_notify("Added fw-ohci device %s, OHCI v%x.%x, "
3012 "%d IR + %d IT contexts, quirks 0x%x\n",
3013 dev_name(&dev
->dev
), version
>> 16, version
& 0xff,
3014 n_ir
, n_it
, ohci
->quirks
);
3019 dma_free_coherent(ohci
->card
.device
, SELF_ID_BUF_SIZE
,
3020 ohci
->self_id_cpu
, ohci
->self_id_bus
);
3022 kfree(ohci
->ir_context_list
);
3023 kfree(ohci
->it_context_list
);
3024 context_release(&ohci
->at_response_ctx
);
3025 context_release(&ohci
->at_request_ctx
);
3026 ar_context_release(&ohci
->ar_response_ctx
);
3027 ar_context_release(&ohci
->ar_request_ctx
);
3028 pci_iounmap(dev
, ohci
->registers
);
3030 pci_release_region(dev
, 0);
3032 pci_disable_device(dev
);
3038 fw_error("Out of memory\n");
3043 static void pci_remove(struct pci_dev
*dev
)
3045 struct fw_ohci
*ohci
;
3047 ohci
= pci_get_drvdata(dev
);
3048 reg_write(ohci
, OHCI1394_IntMaskClear
, ~0);
3050 fw_core_remove_card(&ohci
->card
);
3053 * FIXME: Fail all pending packets here, now that the upper
3054 * layers can't queue any more.
3057 software_reset(ohci
);
3058 free_irq(dev
->irq
, ohci
);
3060 if (ohci
->next_config_rom
&& ohci
->next_config_rom
!= ohci
->config_rom
)
3061 dma_free_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
3062 ohci
->next_config_rom
, ohci
->next_config_rom_bus
);
3063 if (ohci
->config_rom
)
3064 dma_free_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
3065 ohci
->config_rom
, ohci
->config_rom_bus
);
3066 dma_free_coherent(ohci
->card
.device
, SELF_ID_BUF_SIZE
,
3067 ohci
->self_id_cpu
, ohci
->self_id_bus
);
3068 ar_context_release(&ohci
->ar_request_ctx
);
3069 ar_context_release(&ohci
->ar_response_ctx
);
3070 context_release(&ohci
->at_request_ctx
);
3071 context_release(&ohci
->at_response_ctx
);
3072 kfree(ohci
->it_context_list
);
3073 kfree(ohci
->ir_context_list
);
3074 pci_disable_msi(dev
);
3075 pci_iounmap(dev
, ohci
->registers
);
3076 pci_release_region(dev
, 0);
3077 pci_disable_device(dev
);
3081 fw_notify("Removed fw-ohci device.\n");
3085 static int pci_suspend(struct pci_dev
*dev
, pm_message_t state
)
3087 struct fw_ohci
*ohci
= pci_get_drvdata(dev
);
3090 software_reset(ohci
);
3091 free_irq(dev
->irq
, ohci
);
3092 pci_disable_msi(dev
);
3093 err
= pci_save_state(dev
);
3095 fw_error("pci_save_state failed\n");
3098 err
= pci_set_power_state(dev
, pci_choose_state(dev
, state
));
3100 fw_error("pci_set_power_state failed with %d\n", err
);
3106 static int pci_resume(struct pci_dev
*dev
)
3108 struct fw_ohci
*ohci
= pci_get_drvdata(dev
);
3112 pci_set_power_state(dev
, PCI_D0
);
3113 pci_restore_state(dev
);
3114 err
= pci_enable_device(dev
);
3116 fw_error("pci_enable_device failed\n");
3120 return ohci_enable(&ohci
->card
, NULL
, 0);
3124 static const struct pci_device_id pci_table
[] = {
3125 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI
, ~0) },
3129 MODULE_DEVICE_TABLE(pci
, pci_table
);
3131 static struct pci_driver fw_ohci_pci_driver
= {
3132 .name
= ohci_driver_name
,
3133 .id_table
= pci_table
,
3135 .remove
= pci_remove
,
3137 .resume
= pci_resume
,
3138 .suspend
= pci_suspend
,
3142 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
3143 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
3144 MODULE_LICENSE("GPL");
3146 /* Provide a module alias so root-on-sbp2 initrds don't break. */
3147 #ifndef CONFIG_IEEE1394_OHCI1394_MODULE
3148 MODULE_ALIAS("ohci1394");
3151 static int __init
fw_ohci_init(void)
3153 return pci_register_driver(&fw_ohci_pci_driver
);
3156 static void __exit
fw_ohci_cleanup(void)
3158 pci_unregister_driver(&fw_ohci_pci_driver
);
3161 module_init(fw_ohci_init
);
3162 module_exit(fw_ohci_cleanup
);