Commit | Line | Data |
---|---|---|
77c9a5da SR |
1 | #ifndef _LINUX_FIREWIRE_H |
2 | #define _LINUX_FIREWIRE_H | |
3 | ||
4 | #include <linux/completion.h> | |
f68c56b7 | 5 | #include <linux/device.h> |
c76acec6 | 6 | #include <linux/dma-mapping.h> |
77c9a5da SR |
7 | #include <linux/kernel.h> |
8 | #include <linux/kref.h> | |
9 | #include <linux/list.h> | |
10 | #include <linux/mutex.h> | |
11 | #include <linux/spinlock.h> | |
12 | #include <linux/sysfs.h> | |
13 | #include <linux/timer.h> | |
14 | #include <linux/types.h> | |
15 | #include <linux/workqueue.h> | |
16 | ||
60063497 | 17 | #include <linux/atomic.h> |
77c9a5da SR |
18 | #include <asm/byteorder.h> |
19 | ||
77c9a5da SR |
20 | #define CSR_REGISTER_BASE 0xfffff0000000ULL |
21 | ||
22 | /* register offsets are relative to CSR_REGISTER_BASE */ | |
23 | #define CSR_STATE_CLEAR 0x0 | |
24 | #define CSR_STATE_SET 0x4 | |
25 | #define CSR_NODE_IDS 0x8 | |
26 | #define CSR_RESET_START 0xc | |
27 | #define CSR_SPLIT_TIMEOUT_HI 0x18 | |
28 | #define CSR_SPLIT_TIMEOUT_LO 0x1c | |
29 | #define CSR_CYCLE_TIME 0x200 | |
30 | #define CSR_BUS_TIME 0x204 | |
31 | #define CSR_BUSY_TIMEOUT 0x210 | |
a1a1132b | 32 | #define CSR_PRIORITY_BUDGET 0x218 |
77c9a5da SR |
33 | #define CSR_BUS_MANAGER_ID 0x21c |
34 | #define CSR_BANDWIDTH_AVAILABLE 0x220 | |
35 | #define CSR_CHANNELS_AVAILABLE 0x224 | |
36 | #define CSR_CHANNELS_AVAILABLE_HI 0x224 | |
37 | #define CSR_CHANNELS_AVAILABLE_LO 0x228 | |
3d1f46eb | 38 | #define CSR_MAINT_UTILITY 0x230 |
77c9a5da SR |
39 | #define CSR_BROADCAST_CHANNEL 0x234 |
40 | #define CSR_CONFIG_ROM 0x400 | |
41 | #define CSR_CONFIG_ROM_END 0x800 | |
31ef9134 CL |
42 | #define CSR_OMPR 0x900 |
43 | #define CSR_OPCR(i) (0x904 + (i) * 4) | |
44 | #define CSR_IMPR 0x980 | |
45 | #define CSR_IPCR(i) (0x984 + (i) * 4) | |
77c9a5da SR |
46 | #define CSR_FCP_COMMAND 0xB00 |
47 | #define CSR_FCP_RESPONSE 0xD00 | |
48 | #define CSR_FCP_END 0xF00 | |
49 | #define CSR_TOPOLOGY_MAP 0x1000 | |
50 | #define CSR_TOPOLOGY_MAP_END 0x1400 | |
51 | #define CSR_SPEED_MAP 0x2000 | |
52 | #define CSR_SPEED_MAP_END 0x3000 | |
53 | ||
54 | #define CSR_OFFSET 0x40 | |
55 | #define CSR_LEAF 0x80 | |
56 | #define CSR_DIRECTORY 0xc0 | |
57 | ||
58 | #define CSR_DESCRIPTOR 0x01 | |
59 | #define CSR_VENDOR 0x03 | |
60 | #define CSR_HARDWARE_VERSION 0x04 | |
77c9a5da SR |
61 | #define CSR_UNIT 0x11 |
62 | #define CSR_SPECIFIER_ID 0x12 | |
63 | #define CSR_VERSION 0x13 | |
64 | #define CSR_DEPENDENT_INFO 0x14 | |
65 | #define CSR_MODEL 0x17 | |
77c9a5da SR |
66 | #define CSR_DIRECTORY_ID 0x20 |
67 | ||
68 | struct fw_csr_iterator { | |
13b302d0 SR |
69 | const u32 *p; |
70 | const u32 *end; | |
77c9a5da SR |
71 | }; |
72 | ||
13b302d0 | 73 | void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p); |
77c9a5da | 74 | int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value); |
13b302d0 | 75 | int fw_csr_string(const u32 *directory, int key, char *buf, size_t size); |
1f8fef7b | 76 | |
77c9a5da SR |
77 | extern struct bus_type fw_bus_type; |
78 | ||
79 | struct fw_card_driver; | |
80 | struct fw_node; | |
81 | ||
82 | struct fw_card { | |
83 | const struct fw_card_driver *driver; | |
84 | struct device *device; | |
85 | struct kref kref; | |
86 | struct completion done; | |
87 | ||
88 | int node_id; | |
89 | int generation; | |
1e626fdc SR |
90 | int current_tlabel; |
91 | u64 tlabel_mask; | |
77c9a5da | 92 | struct list_head transaction_list; |
e71084af | 93 | u64 reset_jiffies; |
77c9a5da | 94 | |
8e4b50f9 CL |
95 | u32 split_timeout_hi; |
96 | u32 split_timeout_lo; | |
97 | unsigned int split_timeout_cycles; | |
98 | unsigned int split_timeout_jiffies; | |
99 | ||
77c9a5da SR |
100 | unsigned long long guid; |
101 | unsigned max_receive; | |
102 | int link_speed; | |
103 | int config_rom_generation; | |
104 | ||
105 | spinlock_t lock; /* Take this lock when handling the lists in | |
106 | * this struct. */ | |
107 | struct fw_node *local_node; | |
108 | struct fw_node *root_node; | |
109 | struct fw_node *irm_node; | |
110 | u8 color; /* must be u8 to match the definition in struct fw_node */ | |
111 | int gap_count; | |
112 | bool beta_repeaters_present; | |
113 | ||
114 | int index; | |
77c9a5da SR |
115 | struct list_head link; |
116 | ||
bf54e146 SR |
117 | struct list_head phy_receiver_list; |
118 | ||
02d37bed SR |
119 | struct delayed_work br_work; /* bus reset job */ |
120 | bool br_short; | |
121 | ||
122 | struct delayed_work bm_work; /* bus manager job */ | |
77c9a5da SR |
123 | int bm_retries; |
124 | int bm_generation; | |
250b2b6d | 125 | int bm_node_id; |
c8a94ded | 126 | bool bm_abdicate; |
77c9a5da | 127 | |
db3c9cc1 SR |
128 | bool priority_budget_implemented; /* controller feature */ |
129 | bool broadcast_channel_auto_allocated; /* controller feature */ | |
130 | ||
77c9a5da SR |
131 | bool broadcast_channel_allocated; |
132 | u32 broadcast_channel; | |
cb7c96da | 133 | __be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; |
3d1f46eb CL |
134 | |
135 | __be32 maint_utility_register; | |
77c9a5da SR |
136 | }; |
137 | ||
fc5f80b1 CB |
138 | static inline struct fw_card *fw_card_get(struct fw_card *card) |
139 | { | |
140 | kref_get(&card->kref); | |
141 | ||
142 | return card; | |
143 | } | |
144 | ||
145 | void fw_card_release(struct kref *kref); | |
146 | ||
147 | static inline void fw_card_put(struct fw_card *card) | |
148 | { | |
149 | kref_put(&card->kref, fw_card_release); | |
150 | } | |
151 | ||
77c9a5da SR |
152 | struct fw_attribute_group { |
153 | struct attribute_group *groups[2]; | |
154 | struct attribute_group group; | |
baedee17 | 155 | struct attribute *attrs[13]; |
77c9a5da SR |
156 | }; |
157 | ||
158 | enum fw_device_state { | |
159 | FW_DEVICE_INITIALIZING, | |
160 | FW_DEVICE_RUNNING, | |
161 | FW_DEVICE_GONE, | |
162 | FW_DEVICE_SHUTDOWN, | |
163 | }; | |
164 | ||
165 | /* | |
166 | * Note, fw_device.generation always has to be read before fw_device.node_id. | |
167 | * Use SMP memory barriers to ensure this. Otherwise requests will be sent | |
168 | * to an outdated node_id if the generation was updated in the meantime due | |
169 | * to a bus reset. | |
170 | * | |
171 | * Likewise, fw-core will take care to update .node_id before .generation so | |
172 | * that whenever fw_device.generation is current WRT the actual bus generation, | |
173 | * fw_device.node_id is guaranteed to be current too. | |
174 | * | |
175 | * The same applies to fw_device.card->node_id vs. fw_device.generation. | |
176 | * | |
177 | * fw_device.config_rom and fw_device.config_rom_length may be accessed during | |
178 | * the lifetime of any fw_unit belonging to the fw_device, before device_del() | |
179 | * was called on the last fw_unit. Alternatively, they may be accessed while | |
180 | * holding fw_device_rwsem. | |
181 | */ | |
182 | struct fw_device { | |
183 | atomic_t state; | |
184 | struct fw_node *node; | |
185 | int node_id; | |
186 | int generation; | |
187 | unsigned max_speed; | |
188 | struct fw_card *card; | |
189 | struct device device; | |
190 | ||
191 | struct mutex client_list_mutex; | |
192 | struct list_head client_list; | |
193 | ||
13b302d0 | 194 | const u32 *config_rom; |
77c9a5da SR |
195 | size_t config_rom_length; |
196 | int config_rom_retries; | |
197 | unsigned is_local:1; | |
837ec787 | 198 | unsigned max_rec:4; |
77c9a5da | 199 | unsigned cmc:1; |
837ec787 | 200 | unsigned irmc:1; |
77c9a5da SR |
201 | unsigned bc_implemented:2; |
202 | ||
203 | struct delayed_work work; | |
204 | struct fw_attribute_group attribute_group; | |
205 | }; | |
206 | ||
207 | static inline struct fw_device *fw_device(struct device *dev) | |
208 | { | |
209 | return container_of(dev, struct fw_device, device); | |
210 | } | |
211 | ||
212 | static inline int fw_device_is_shutdown(struct fw_device *device) | |
213 | { | |
214 | return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN; | |
215 | } | |
216 | ||
77c9a5da SR |
217 | int fw_device_enable_phys_dma(struct fw_device *device); |
218 | ||
219 | /* | |
220 | * fw_unit.directory must not be accessed after device_del(&fw_unit.device). | |
221 | */ | |
222 | struct fw_unit { | |
223 | struct device device; | |
13b302d0 | 224 | const u32 *directory; |
77c9a5da SR |
225 | struct fw_attribute_group attribute_group; |
226 | }; | |
227 | ||
228 | static inline struct fw_unit *fw_unit(struct device *dev) | |
229 | { | |
230 | return container_of(dev, struct fw_unit, device); | |
231 | } | |
232 | ||
233 | static inline struct fw_unit *fw_unit_get(struct fw_unit *unit) | |
234 | { | |
235 | get_device(&unit->device); | |
236 | ||
237 | return unit; | |
238 | } | |
239 | ||
240 | static inline void fw_unit_put(struct fw_unit *unit) | |
241 | { | |
242 | put_device(&unit->device); | |
243 | } | |
244 | ||
e5110d01 SR |
245 | static inline struct fw_device *fw_parent_device(struct fw_unit *unit) |
246 | { | |
247 | return fw_device(unit->device.parent); | |
248 | } | |
249 | ||
77c9a5da SR |
250 | struct ieee1394_device_id; |
251 | ||
252 | struct fw_driver { | |
253 | struct device_driver driver; | |
94a87157 | 254 | int (*probe)(struct fw_unit *unit, const struct ieee1394_device_id *id); |
77c9a5da SR |
255 | /* Called when the parent device sits through a bus reset. */ |
256 | void (*update)(struct fw_unit *unit); | |
94a87157 | 257 | void (*remove)(struct fw_unit *unit); |
77c9a5da SR |
258 | const struct ieee1394_device_id *id_table; |
259 | }; | |
260 | ||
261 | struct fw_packet; | |
262 | struct fw_request; | |
263 | ||
264 | typedef void (*fw_packet_callback_t)(struct fw_packet *packet, | |
265 | struct fw_card *card, int status); | |
266 | typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, | |
267 | void *data, size_t length, | |
268 | void *callback_data); | |
269 | /* | |
4d50c443 SR |
270 | * This callback handles an inbound request subaction. It is called in |
271 | * RCU read-side context, therefore must not sleep. | |
272 | * | |
273 | * The callback should not initiate outbound request subactions directly. | |
274 | * Otherwise there is a danger of recursion of inbound and outbound | |
275 | * transactions from and to the local node. | |
276 | * | |
277 | * The callback is responsible that either fw_send_response() or kfree() | |
278 | * is called on the @request, except for FCP registers for which the core | |
279 | * takes care of that. | |
77c9a5da SR |
280 | */ |
281 | typedef void (*fw_address_callback_t)(struct fw_card *card, | |
282 | struct fw_request *request, | |
283 | int tcode, int destination, int source, | |
33e553fe | 284 | int generation, |
77c9a5da SR |
285 | unsigned long long offset, |
286 | void *data, size_t length, | |
287 | void *callback_data); | |
288 | ||
289 | struct fw_packet { | |
290 | int speed; | |
291 | int generation; | |
292 | u32 header[4]; | |
293 | size_t header_length; | |
294 | void *payload; | |
295 | size_t payload_length; | |
296 | dma_addr_t payload_bus; | |
19593ffd | 297 | bool payload_mapped; |
77c9a5da SR |
298 | u32 timestamp; |
299 | ||
300 | /* | |
18d0cdfd SR |
301 | * This callback is called when the packet transmission has completed. |
302 | * For successful transmission, the status code is the ack received | |
303 | * from the destination. Otherwise it is one of the juju-specific | |
304 | * rcodes: RCODE_SEND_ERROR, _CANCELLED, _BUSY, _GENERATION, _NO_ACK. | |
77c9a5da SR |
305 | * The callback can be called from tasklet context and thus |
306 | * must never block. | |
307 | */ | |
308 | fw_packet_callback_t callback; | |
309 | int ack; | |
310 | struct list_head link; | |
311 | void *driver_data; | |
312 | }; | |
313 | ||
314 | struct fw_transaction { | |
315 | int node_id; /* The generation is implied; it is always the current. */ | |
316 | int tlabel; | |
77c9a5da | 317 | struct list_head link; |
5c40cbfe | 318 | struct fw_card *card; |
410cf2bd | 319 | bool is_split_transaction; |
5c40cbfe | 320 | struct timer_list split_timeout_timer; |
77c9a5da SR |
321 | |
322 | struct fw_packet packet; | |
323 | ||
324 | /* | |
325 | * The data passed to the callback is valid only during the | |
326 | * callback. | |
327 | */ | |
328 | fw_transaction_callback_t callback; | |
329 | void *callback_data; | |
330 | }; | |
331 | ||
332 | struct fw_address_handler { | |
333 | u64 offset; | |
188726ec | 334 | u64 length; |
77c9a5da SR |
335 | fw_address_callback_t address_callback; |
336 | void *callback_data; | |
337 | struct list_head link; | |
338 | }; | |
339 | ||
340 | struct fw_address_region { | |
341 | u64 start; | |
342 | u64 end; | |
343 | }; | |
344 | ||
345 | extern const struct fw_address_region fw_high_memory_region; | |
346 | ||
347 | int fw_core_add_address_handler(struct fw_address_handler *handler, | |
348 | const struct fw_address_region *region); | |
349 | void fw_core_remove_address_handler(struct fw_address_handler *handler); | |
350 | void fw_send_response(struct fw_card *card, | |
351 | struct fw_request *request, int rcode); | |
253d9237 | 352 | int fw_get_request_speed(struct fw_request *request); |
77c9a5da SR |
353 | void fw_send_request(struct fw_card *card, struct fw_transaction *t, |
354 | int tcode, int destination_id, int generation, int speed, | |
355 | unsigned long long offset, void *payload, size_t length, | |
356 | fw_transaction_callback_t callback, void *callback_data); | |
357 | int fw_cancel_transaction(struct fw_card *card, | |
358 | struct fw_transaction *transaction); | |
359 | int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, | |
360 | int generation, int speed, unsigned long long offset, | |
361 | void *payload, size_t length); | |
7bdbff67 | 362 | const char *fw_rcode_string(int rcode); |
77c9a5da | 363 | |
c76acec6 JF |
364 | static inline int fw_stream_packet_destination_id(int tag, int channel, int sy) |
365 | { | |
366 | return tag << 14 | channel << 8 | sy; | |
367 | } | |
368 | ||
369 | struct fw_descriptor { | |
370 | struct list_head link; | |
371 | size_t length; | |
372 | u32 immediate; | |
373 | u32 key; | |
374 | const u32 *data; | |
375 | }; | |
376 | ||
377 | int fw_core_add_descriptor(struct fw_descriptor *desc); | |
378 | void fw_core_remove_descriptor(struct fw_descriptor *desc); | |
379 | ||
380 | /* | |
381 | * The iso packet format allows for an immediate header/payload part | |
382 | * stored in 'header' immediately after the packet info plus an | |
383 | * indirect payload part that is pointer to by the 'payload' field. | |
384 | * Applications can use one or the other or both to implement simple | |
385 | * low-bandwidth streaming (e.g. audio) or more advanced | |
386 | * scatter-gather streaming (e.g. assembling video frame automatically). | |
387 | */ | |
388 | struct fw_iso_packet { | |
872e330e SR |
389 | u16 payload_length; /* Length of indirect payload */ |
390 | u32 interrupt:1; /* Generate interrupt on this packet */ | |
391 | u32 skip:1; /* tx: Set to not send packet at all */ | |
392 | /* rx: Sync bit, wait for matching sy */ | |
393 | u32 tag:2; /* tx: Tag in packet header */ | |
394 | u32 sy:4; /* tx: Sy in packet header */ | |
395 | u32 header_length:8; /* Length of immediate header */ | |
396 | u32 header[0]; /* tx: Top of 1394 isoch. data_block */ | |
c76acec6 JF |
397 | }; |
398 | ||
872e330e SR |
399 | #define FW_ISO_CONTEXT_TRANSMIT 0 |
400 | #define FW_ISO_CONTEXT_RECEIVE 1 | |
401 | #define FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL 2 | |
c76acec6 JF |
402 | |
403 | #define FW_ISO_CONTEXT_MATCH_TAG0 1 | |
404 | #define FW_ISO_CONTEXT_MATCH_TAG1 2 | |
405 | #define FW_ISO_CONTEXT_MATCH_TAG2 4 | |
406 | #define FW_ISO_CONTEXT_MATCH_TAG3 8 | |
407 | #define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15 | |
408 | ||
409 | /* | |
410 | * An iso buffer is just a set of pages mapped for DMA in the | |
411 | * specified direction. Since the pages are to be used for DMA, they | |
412 | * are not mapped into the kernel virtual address space. We store the | |
413 | * DMA address in the page private. The helper function | |
414 | * fw_iso_buffer_map() will map the pages into a given vma. | |
415 | */ | |
416 | struct fw_iso_buffer { | |
417 | enum dma_data_direction direction; | |
418 | struct page **pages; | |
419 | int page_count; | |
0b6c4857 | 420 | int page_count_mapped; |
c76acec6 JF |
421 | }; |
422 | ||
423 | int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, | |
424 | int page_count, enum dma_data_direction direction); | |
425 | void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); | |
872e330e | 426 | size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed); |
c76acec6 JF |
427 | |
428 | struct fw_iso_context; | |
429 | typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, | |
430 | u32 cycle, size_t header_length, | |
431 | void *header, void *data); | |
872e330e SR |
432 | typedef void (*fw_iso_mc_callback_t)(struct fw_iso_context *context, |
433 | dma_addr_t completed, void *data); | |
c76acec6 JF |
434 | struct fw_iso_context { |
435 | struct fw_card *card; | |
436 | int type; | |
437 | int channel; | |
438 | int speed; | |
0699a73a | 439 | bool drop_overflow_headers; |
c76acec6 | 440 | size_t header_size; |
872e330e SR |
441 | union { |
442 | fw_iso_callback_t sc; | |
443 | fw_iso_mc_callback_t mc; | |
444 | } callback; | |
c76acec6 JF |
445 | void *callback_data; |
446 | }; | |
447 | ||
448 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, | |
449 | int type, int channel, int speed, size_t header_size, | |
450 | fw_iso_callback_t callback, void *callback_data); | |
872e330e | 451 | int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels); |
c76acec6 JF |
452 | int fw_iso_context_queue(struct fw_iso_context *ctx, |
453 | struct fw_iso_packet *packet, | |
454 | struct fw_iso_buffer *buffer, | |
455 | unsigned long payload); | |
13882a82 | 456 | void fw_iso_context_queue_flush(struct fw_iso_context *ctx); |
d1bbd209 | 457 | int fw_iso_context_flush_completions(struct fw_iso_context *ctx); |
c76acec6 JF |
458 | int fw_iso_context_start(struct fw_iso_context *ctx, |
459 | int cycle, int sync, int tags); | |
460 | int fw_iso_context_stop(struct fw_iso_context *ctx); | |
461 | void fw_iso_context_destroy(struct fw_iso_context *ctx); | |
31ef9134 CL |
462 | void fw_iso_resource_manage(struct fw_card *card, int generation, |
463 | u64 channels_mask, int *channel, int *bandwidth, | |
f30e6d3e | 464 | bool allocate); |
c76acec6 | 465 | |
105e53f8 SR |
466 | extern struct workqueue_struct *fw_workqueue; |
467 | ||
77c9a5da | 468 | #endif /* _LINUX_FIREWIRE_H */ |