Commit | Line | Data |
---|---|---|
c76acec6 JF |
1 | /* |
2 | * IPv4 over IEEE 1394, per RFC 2734 | |
cb6bf355 | 3 | * IPv6 over IEEE 1394, per RFC 3146 |
c76acec6 JF |
4 | * |
5 | * Copyright (C) 2009 Jay Fenlason <fenlason@redhat.com> | |
6 | * | |
7 | * based on eth1394 by Ben Collins et al | |
8 | */ | |
9 | ||
f91e3bd8 | 10 | #include <linux/bug.h> |
bf337b15 | 11 | #include <linux/compiler.h> |
48553011 | 12 | #include <linux/delay.h> |
c76acec6 | 13 | #include <linux/device.h> |
c1671470 | 14 | #include <linux/ethtool.h> |
c76acec6 JF |
15 | #include <linux/firewire.h> |
16 | #include <linux/firewire-constants.h> | |
17 | #include <linux/highmem.h> | |
18 | #include <linux/in.h> | |
19 | #include <linux/ip.h> | |
f91e3bd8 | 20 | #include <linux/jiffies.h> |
c76acec6 JF |
21 | #include <linux/mod_devicetable.h> |
22 | #include <linux/module.h> | |
23 | #include <linux/moduleparam.h> | |
5a124d38 | 24 | #include <linux/mutex.h> |
c76acec6 JF |
25 | #include <linux/netdevice.h> |
26 | #include <linux/skbuff.h> | |
5a0e3ad6 | 27 | #include <linux/slab.h> |
5a124d38 | 28 | #include <linux/spinlock.h> |
c76acec6 JF |
29 | |
30 | #include <asm/unaligned.h> | |
31 | #include <net/arp.h> | |
6752c8db | 32 | #include <net/firewire.h> |
c76acec6 | 33 | |
b2268830 SR |
34 | /* rx limits */ |
35 | #define FWNET_MAX_FRAGMENTS 30 /* arbitrary, > TX queue depth */ | |
36 | #define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16*1024 ? 4 : 2) | |
37 | ||
38 | /* tx limits */ | |
39 | #define FWNET_MAX_QUEUED_DATAGRAMS 20 /* < 64 = number of tlabels */ | |
40 | #define FWNET_MIN_QUEUED_DATAGRAMS 10 /* should keep AT DMA busy enough */ | |
41 | #define FWNET_TX_QUEUE_LEN FWNET_MAX_QUEUED_DATAGRAMS /* ? */ | |
c76acec6 | 42 | |
f91e3bd8 SR |
43 | #define IEEE1394_BROADCAST_CHANNEL 31 |
44 | #define IEEE1394_ALL_NODES (0xffc0 | 0x003f) | |
45 | #define IEEE1394_MAX_PAYLOAD_S100 512 | |
46 | #define FWNET_NO_FIFO_ADDR (~0ULL) | |
c76acec6 | 47 | |
f91e3bd8 SR |
48 | #define IANA_SPECIFIER_ID 0x00005eU |
49 | #define RFC2734_SW_VERSION 0x000001U | |
cb6bf355 | 50 | #define RFC3146_SW_VERSION 0x000002U |
c76acec6 | 51 | |
f91e3bd8 | 52 | #define IEEE1394_GASP_HDR_SIZE 8 |
c76acec6 | 53 | |
f91e3bd8 SR |
54 | #define RFC2374_UNFRAG_HDR_SIZE 4 |
55 | #define RFC2374_FRAG_HDR_SIZE 8 | |
56 | #define RFC2374_FRAG_OVERHEAD 4 | |
c76acec6 | 57 | |
f91e3bd8 SR |
58 | #define RFC2374_HDR_UNFRAG 0 /* unfragmented */ |
59 | #define RFC2374_HDR_FIRSTFRAG 1 /* first fragment */ | |
60 | #define RFC2374_HDR_LASTFRAG 2 /* last fragment */ | |
61 | #define RFC2374_HDR_INTFRAG 3 /* interior fragment */ | |
c76acec6 | 62 | |
021b97e4 YH |
63 | static bool fwnet_hwaddr_is_multicast(u8 *ha) |
64 | { | |
65 | return !!(*ha & 1); | |
66 | } | |
67 | ||
f91e3bd8 SR |
68 | /* IPv4 and IPv6 encapsulation header */ |
69 | struct rfc2734_header { | |
c76acec6 JF |
70 | u32 w0; |
71 | u32 w1; | |
72 | }; | |
73 | ||
f91e3bd8 SR |
74 | #define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30) |
75 | #define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff)) | |
76 | #define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16) | |
77 | #define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff)) | |
78 | #define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16) | |
c76acec6 | 79 | |
f91e3bd8 SR |
80 | #define fwnet_set_hdr_lf(lf) ((lf) << 30) |
81 | #define fwnet_set_hdr_ether_type(et) (et) | |
82 | #define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16) | |
83 | #define fwnet_set_hdr_fg_off(fgo) (fgo) | |
c76acec6 | 84 | |
f91e3bd8 | 85 | #define fwnet_set_hdr_dgl(dgl) ((dgl) << 16) |
c76acec6 | 86 | |
f91e3bd8 SR |
87 | static inline void fwnet_make_uf_hdr(struct rfc2734_header *hdr, |
88 | unsigned ether_type) | |
89 | { | |
90 | hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_UNFRAG) | |
91 | | fwnet_set_hdr_ether_type(ether_type); | |
92 | } | |
c76acec6 | 93 | |
f91e3bd8 SR |
94 | static inline void fwnet_make_ff_hdr(struct rfc2734_header *hdr, |
95 | unsigned ether_type, unsigned dg_size, unsigned dgl) | |
96 | { | |
97 | hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_FIRSTFRAG) | |
98 | | fwnet_set_hdr_dg_size(dg_size) | |
99 | | fwnet_set_hdr_ether_type(ether_type); | |
100 | hdr->w1 = fwnet_set_hdr_dgl(dgl); | |
101 | } | |
c76acec6 | 102 | |
f91e3bd8 SR |
103 | static inline void fwnet_make_sf_hdr(struct rfc2734_header *hdr, |
104 | unsigned lf, unsigned dg_size, unsigned fg_off, unsigned dgl) | |
105 | { | |
106 | hdr->w0 = fwnet_set_hdr_lf(lf) | |
107 | | fwnet_set_hdr_dg_size(dg_size) | |
108 | | fwnet_set_hdr_fg_off(fg_off); | |
109 | hdr->w1 = fwnet_set_hdr_dgl(dgl); | |
110 | } | |
c76acec6 JF |
111 | |
112 | /* This list keeps track of what parts of the datagram have been filled in */ | |
f91e3bd8 SR |
113 | struct fwnet_fragment_info { |
114 | struct list_head fi_link; | |
c76acec6 JF |
115 | u16 offset; |
116 | u16 len; | |
117 | }; | |
118 | ||
f91e3bd8 SR |
119 | struct fwnet_partial_datagram { |
120 | struct list_head pd_link; | |
121 | struct list_head fi_list; | |
c76acec6 JF |
122 | struct sk_buff *skb; |
123 | /* FIXME Why not use skb->data? */ | |
124 | char *pbuf; | |
125 | u16 datagram_label; | |
126 | u16 ether_type; | |
127 | u16 datagram_size; | |
128 | }; | |
129 | ||
5a124d38 SR |
130 | static DEFINE_MUTEX(fwnet_device_mutex); |
131 | static LIST_HEAD(fwnet_device_list); | |
c76acec6 | 132 | |
f91e3bd8 | 133 | struct fwnet_device { |
5a124d38 | 134 | struct list_head dev_link; |
c76acec6 | 135 | spinlock_t lock; |
f91e3bd8 SR |
136 | enum { |
137 | FWNET_BROADCAST_ERROR, | |
138 | FWNET_BROADCAST_RUNNING, | |
139 | FWNET_BROADCAST_STOPPED, | |
140 | } broadcast_state; | |
c76acec6 JF |
141 | struct fw_iso_context *broadcast_rcv_context; |
142 | struct fw_iso_buffer broadcast_rcv_buffer; | |
143 | void **broadcast_rcv_buffer_ptrs; | |
144 | unsigned broadcast_rcv_next_ptr; | |
145 | unsigned num_broadcast_rcv_ptrs; | |
146 | unsigned rcv_buffer_size; | |
147 | /* | |
148 | * This value is the maximum unfragmented datagram size that can be | |
149 | * sent by the hardware. It already has the GASP overhead and the | |
150 | * unfragmented datagram header overhead calculated into it. | |
151 | */ | |
152 | unsigned broadcast_xmt_max_payload; | |
153 | u16 broadcast_xmt_datagramlabel; | |
154 | ||
155 | /* | |
f91e3bd8 | 156 | * The CSR address that remote nodes must send datagrams to for us to |
c76acec6 JF |
157 | * receive them. |
158 | */ | |
159 | struct fw_address_handler handler; | |
160 | u64 local_fifo; | |
161 | ||
48553011 SR |
162 | /* Number of tx datagrams that have been queued but not yet acked */ |
163 | int queued_datagrams; | |
c76acec6 | 164 | |
c1671470 | 165 | int peer_count; |
5a124d38 | 166 | struct list_head peer_list; |
c76acec6 | 167 | struct fw_card *card; |
5a124d38 SR |
168 | struct net_device *netdev; |
169 | }; | |
170 | ||
171 | struct fwnet_peer { | |
172 | struct list_head peer_link; | |
173 | struct fwnet_device *dev; | |
174 | u64 guid; | |
5a124d38 SR |
175 | |
176 | /* guarded by dev->lock */ | |
177 | struct list_head pd_list; /* received partial datagrams */ | |
178 | unsigned pdg_size; /* pd_list size */ | |
179 | ||
180 | u16 datagram_label; /* outgoing datagram label */ | |
48553011 | 181 | u16 max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */ |
5a124d38 SR |
182 | int node_id; |
183 | int generation; | |
184 | unsigned speed; | |
c76acec6 JF |
185 | }; |
186 | ||
187 | /* This is our task struct. It's used for the packet complete callback. */ | |
f91e3bd8 | 188 | struct fwnet_packet_task { |
c76acec6 | 189 | struct fw_transaction transaction; |
f91e3bd8 | 190 | struct rfc2734_header hdr; |
c76acec6 | 191 | struct sk_buff *skb; |
f91e3bd8 SR |
192 | struct fwnet_device *dev; |
193 | ||
c76acec6 | 194 | int outstanding_pkts; |
c76acec6 JF |
195 | u64 fifo_addr; |
196 | u16 dest_node; | |
48553011 | 197 | u16 max_payload; |
c76acec6 JF |
198 | u8 generation; |
199 | u8 speed; | |
48553011 | 200 | u8 enqueued; |
c76acec6 JF |
201 | }; |
202 | ||
6752c8db YH |
203 | /* |
204 | * Get fifo address embedded in hwaddr | |
205 | */ | |
206 | static __u64 fwnet_hwaddr_fifo(union fwnet_hwaddr *ha) | |
207 | { | |
208 | return (u64)get_unaligned_be16(&ha->uc.fifo_hi) << 32 | |
209 | | get_unaligned_be32(&ha->uc.fifo_lo); | |
210 | } | |
211 | ||
f91e3bd8 SR |
212 | /* |
213 | * saddr == NULL means use device source address. | |
214 | * daddr == NULL means leave destination address (eg unresolved arp). | |
215 | */ | |
216 | static int fwnet_header_create(struct sk_buff *skb, struct net_device *net, | |
217 | unsigned short type, const void *daddr, | |
218 | const void *saddr, unsigned len) | |
219 | { | |
220 | struct fwnet_header *h; | |
c76acec6 | 221 | |
f91e3bd8 SR |
222 | h = (struct fwnet_header *)skb_push(skb, sizeof(*h)); |
223 | put_unaligned_be16(type, &h->h_proto); | |
c76acec6 | 224 | |
f91e3bd8 SR |
225 | if (net->flags & (IFF_LOOPBACK | IFF_NOARP)) { |
226 | memset(h->h_dest, 0, net->addr_len); | |
c76acec6 | 227 | |
f91e3bd8 | 228 | return net->hard_header_len; |
c76acec6 JF |
229 | } |
230 | ||
231 | if (daddr) { | |
f91e3bd8 SR |
232 | memcpy(h->h_dest, daddr, net->addr_len); |
233 | ||
234 | return net->hard_header_len; | |
c76acec6 JF |
235 | } |
236 | ||
f91e3bd8 | 237 | return -net->hard_header_len; |
c76acec6 JF |
238 | } |
239 | ||
f91e3bd8 | 240 | static int fwnet_header_cache(const struct neighbour *neigh, |
e69dd336 | 241 | struct hh_cache *hh, __be16 type) |
f91e3bd8 SR |
242 | { |
243 | struct net_device *net; | |
244 | struct fwnet_header *h; | |
c76acec6 | 245 | |
e69dd336 | 246 | if (type == cpu_to_be16(ETH_P_802_3)) |
c76acec6 | 247 | return -1; |
f91e3bd8 | 248 | net = neigh->dev; |
82586340 | 249 | h = (struct fwnet_header *)((u8 *)hh->hh_data + HH_DATA_OFF(sizeof(*h))); |
e69dd336 | 250 | h->h_proto = type; |
f91e3bd8 SR |
251 | memcpy(h->h_dest, neigh->ha, net->addr_len); |
252 | hh->hh_len = FWNET_HLEN; | |
c76acec6 | 253 | |
c76acec6 JF |
254 | return 0; |
255 | } | |
256 | ||
257 | /* Called by Address Resolution module to notify changes in address. */ | |
f91e3bd8 SR |
258 | static void fwnet_header_cache_update(struct hh_cache *hh, |
259 | const struct net_device *net, const unsigned char *haddr) | |
260 | { | |
82586340 | 261 | memcpy((u8 *)hh->hh_data + HH_DATA_OFF(FWNET_HLEN), haddr, net->addr_len); |
c76acec6 JF |
262 | } |
263 | ||
f91e3bd8 SR |
264 | static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr) |
265 | { | |
266 | memcpy(haddr, skb->dev->dev_addr, FWNET_ALEN); | |
267 | ||
268 | return FWNET_ALEN; | |
c76acec6 JF |
269 | } |
270 | ||
f91e3bd8 SR |
271 | static const struct header_ops fwnet_header_ops = { |
272 | .create = fwnet_header_create, | |
f91e3bd8 SR |
273 | .cache = fwnet_header_cache, |
274 | .cache_update = fwnet_header_cache_update, | |
275 | .parse = fwnet_header_parse, | |
c76acec6 JF |
276 | }; |
277 | ||
c76acec6 | 278 | /* FIXME: is this correct for all cases? */ |
f91e3bd8 SR |
279 | static bool fwnet_frag_overlap(struct fwnet_partial_datagram *pd, |
280 | unsigned offset, unsigned len) | |
c76acec6 | 281 | { |
f91e3bd8 | 282 | struct fwnet_fragment_info *fi; |
c76acec6 JF |
283 | unsigned end = offset + len; |
284 | ||
f91e3bd8 SR |
285 | list_for_each_entry(fi, &pd->fi_list, fi_link) |
286 | if (offset < fi->offset + fi->len && end > fi->offset) | |
c76acec6 | 287 | return true; |
f91e3bd8 | 288 | |
c76acec6 JF |
289 | return false; |
290 | } | |
291 | ||
292 | /* Assumes that new fragment does not overlap any existing fragments */ | |
f91e3bd8 SR |
293 | static struct fwnet_fragment_info *fwnet_frag_new( |
294 | struct fwnet_partial_datagram *pd, unsigned offset, unsigned len) | |
295 | { | |
296 | struct fwnet_fragment_info *fi, *fi2, *new; | |
c76acec6 JF |
297 | struct list_head *list; |
298 | ||
f91e3bd8 SR |
299 | list = &pd->fi_list; |
300 | list_for_each_entry(fi, &pd->fi_list, fi_link) { | |
c76acec6 JF |
301 | if (fi->offset + fi->len == offset) { |
302 | /* The new fragment can be tacked on to the end */ | |
303 | /* Did the new fragment plug a hole? */ | |
f91e3bd8 SR |
304 | fi2 = list_entry(fi->fi_link.next, |
305 | struct fwnet_fragment_info, fi_link); | |
c76acec6 | 306 | if (fi->offset + fi->len == fi2->offset) { |
c76acec6 JF |
307 | /* glue fragments together */ |
308 | fi->len += len + fi2->len; | |
f91e3bd8 | 309 | list_del(&fi2->fi_link); |
c76acec6 JF |
310 | kfree(fi2); |
311 | } else { | |
c76acec6 JF |
312 | fi->len += len; |
313 | } | |
f91e3bd8 | 314 | |
c76acec6 JF |
315 | return fi; |
316 | } | |
317 | if (offset + len == fi->offset) { | |
318 | /* The new fragment can be tacked on to the beginning */ | |
319 | /* Did the new fragment plug a hole? */ | |
f91e3bd8 SR |
320 | fi2 = list_entry(fi->fi_link.prev, |
321 | struct fwnet_fragment_info, fi_link); | |
c76acec6 JF |
322 | if (fi2->offset + fi2->len == fi->offset) { |
323 | /* glue fragments together */ | |
c76acec6 | 324 | fi2->len += fi->len + len; |
f91e3bd8 | 325 | list_del(&fi->fi_link); |
c76acec6 | 326 | kfree(fi); |
f91e3bd8 | 327 | |
c76acec6 JF |
328 | return fi2; |
329 | } | |
c76acec6 JF |
330 | fi->offset = offset; |
331 | fi->len += len; | |
f91e3bd8 | 332 | |
c76acec6 JF |
333 | return fi; |
334 | } | |
335 | if (offset > fi->offset + fi->len) { | |
f91e3bd8 | 336 | list = &fi->fi_link; |
c76acec6 JF |
337 | break; |
338 | } | |
339 | if (offset + len < fi->offset) { | |
f91e3bd8 | 340 | list = fi->fi_link.prev; |
c76acec6 JF |
341 | break; |
342 | } | |
343 | } | |
344 | ||
345 | new = kmalloc(sizeof(*new), GFP_ATOMIC); | |
cfb0c9d1 | 346 | if (!new) |
c76acec6 | 347 | return NULL; |
c76acec6 JF |
348 | |
349 | new->offset = offset; | |
350 | new->len = len; | |
f91e3bd8 SR |
351 | list_add(&new->fi_link, list); |
352 | ||
c76acec6 JF |
353 | return new; |
354 | } | |
355 | ||
f91e3bd8 SR |
356 | static struct fwnet_partial_datagram *fwnet_pd_new(struct net_device *net, |
357 | struct fwnet_peer *peer, u16 datagram_label, unsigned dg_size, | |
358 | void *frag_buf, unsigned frag_off, unsigned frag_len) | |
359 | { | |
360 | struct fwnet_partial_datagram *new; | |
361 | struct fwnet_fragment_info *fi; | |
c76acec6 JF |
362 | |
363 | new = kmalloc(sizeof(*new), GFP_ATOMIC); | |
364 | if (!new) | |
365 | goto fail; | |
f91e3bd8 SR |
366 | |
367 | INIT_LIST_HEAD(&new->fi_list); | |
368 | fi = fwnet_frag_new(new, frag_off, frag_len); | |
369 | if (fi == NULL) | |
c76acec6 | 370 | goto fail_w_new; |
f91e3bd8 | 371 | |
c76acec6 JF |
372 | new->datagram_label = datagram_label; |
373 | new->datagram_size = dg_size; | |
82586340 | 374 | new->skb = dev_alloc_skb(dg_size + LL_RESERVED_SPACE(net)); |
f91e3bd8 | 375 | if (new->skb == NULL) |
c76acec6 | 376 | goto fail_w_fi; |
f91e3bd8 | 377 | |
82586340 | 378 | skb_reserve(new->skb, LL_RESERVED_SPACE(net)); |
c76acec6 JF |
379 | new->pbuf = skb_put(new->skb, dg_size); |
380 | memcpy(new->pbuf + frag_off, frag_buf, frag_len); | |
f91e3bd8 SR |
381 | list_add_tail(&new->pd_link, &peer->pd_list); |
382 | ||
c76acec6 JF |
383 | return new; |
384 | ||
385 | fail_w_fi: | |
386 | kfree(fi); | |
387 | fail_w_new: | |
388 | kfree(new); | |
389 | fail: | |
c76acec6 JF |
390 | return NULL; |
391 | } | |
392 | ||
f91e3bd8 SR |
393 | static struct fwnet_partial_datagram *fwnet_pd_find(struct fwnet_peer *peer, |
394 | u16 datagram_label) | |
395 | { | |
396 | struct fwnet_partial_datagram *pd; | |
c76acec6 | 397 | |
f91e3bd8 SR |
398 | list_for_each_entry(pd, &peer->pd_list, pd_link) |
399 | if (pd->datagram_label == datagram_label) | |
c76acec6 | 400 | return pd; |
f91e3bd8 | 401 | |
c76acec6 JF |
402 | return NULL; |
403 | } | |
404 | ||
405 | ||
f91e3bd8 SR |
406 | static void fwnet_pd_delete(struct fwnet_partial_datagram *old) |
407 | { | |
408 | struct fwnet_fragment_info *fi, *n; | |
c76acec6 | 409 | |
f91e3bd8 | 410 | list_for_each_entry_safe(fi, n, &old->fi_list, fi_link) |
c76acec6 | 411 | kfree(fi); |
f91e3bd8 SR |
412 | |
413 | list_del(&old->pd_link); | |
c76acec6 JF |
414 | dev_kfree_skb_any(old->skb); |
415 | kfree(old); | |
416 | } | |
417 | ||
f91e3bd8 SR |
418 | static bool fwnet_pd_update(struct fwnet_peer *peer, |
419 | struct fwnet_partial_datagram *pd, void *frag_buf, | |
420 | unsigned frag_off, unsigned frag_len) | |
421 | { | |
422 | if (fwnet_frag_new(pd, frag_off, frag_len) == NULL) | |
c76acec6 | 423 | return false; |
f91e3bd8 | 424 | |
c76acec6 JF |
425 | memcpy(pd->pbuf + frag_off, frag_buf, frag_len); |
426 | ||
427 | /* | |
25985edc | 428 | * Move list entry to beginning of list so that oldest partial |
c76acec6 JF |
429 | * datagrams percolate to the end of the list |
430 | */ | |
f91e3bd8 SR |
431 | list_move_tail(&pd->pd_link, &peer->pd_list); |
432 | ||
c76acec6 JF |
433 | return true; |
434 | } | |
435 | ||
f91e3bd8 SR |
436 | static bool fwnet_pd_is_complete(struct fwnet_partial_datagram *pd) |
437 | { | |
438 | struct fwnet_fragment_info *fi; | |
c76acec6 | 439 | |
f91e3bd8 | 440 | fi = list_entry(pd->fi_list.next, struct fwnet_fragment_info, fi_link); |
c76acec6 | 441 | |
f91e3bd8 | 442 | return fi->len == pd->datagram_size; |
c76acec6 JF |
443 | } |
444 | ||
5a124d38 | 445 | /* caller must hold dev->lock */ |
f91e3bd8 SR |
446 | static struct fwnet_peer *fwnet_peer_find_by_guid(struct fwnet_device *dev, |
447 | u64 guid) | |
448 | { | |
5a124d38 | 449 | struct fwnet_peer *peer; |
c76acec6 | 450 | |
5a124d38 SR |
451 | list_for_each_entry(peer, &dev->peer_list, peer_link) |
452 | if (peer->guid == guid) | |
453 | return peer; | |
c76acec6 | 454 | |
5a124d38 | 455 | return NULL; |
c76acec6 JF |
456 | } |
457 | ||
5a124d38 | 458 | /* caller must hold dev->lock */ |
f91e3bd8 | 459 | static struct fwnet_peer *fwnet_peer_find_by_node_id(struct fwnet_device *dev, |
5a124d38 | 460 | int node_id, int generation) |
f91e3bd8 | 461 | { |
5a124d38 | 462 | struct fwnet_peer *peer; |
c76acec6 | 463 | |
5a124d38 SR |
464 | list_for_each_entry(peer, &dev->peer_list, peer_link) |
465 | if (peer->node_id == node_id && | |
466 | peer->generation == generation) | |
467 | return peer; | |
f91e3bd8 | 468 | |
5a124d38 | 469 | return NULL; |
c76acec6 JF |
470 | } |
471 | ||
5a124d38 SR |
472 | /* See IEEE 1394-2008 table 6-4, table 8-8, table 16-18. */ |
473 | static unsigned fwnet_max_payload(unsigned max_rec, unsigned speed) | |
f91e3bd8 | 474 | { |
5a124d38 | 475 | max_rec = min(max_rec, speed + 8); |
4ec4a67a | 476 | max_rec = clamp(max_rec, 8U, 11U); /* 512...4096 */ |
5a124d38 SR |
477 | |
478 | return (1 << (max_rec + 1)) - RFC2374_FRAG_HDR_SIZE; | |
c76acec6 JF |
479 | } |
480 | ||
5a124d38 | 481 | |
f91e3bd8 SR |
482 | static int fwnet_finish_incoming_packet(struct net_device *net, |
483 | struct sk_buff *skb, u16 source_node_id, | |
484 | bool is_broadcast, u16 ether_type) | |
485 | { | |
486 | struct fwnet_device *dev; | |
c76acec6 | 487 | int status; |
f91e3bd8 | 488 | __be64 guid; |
c76acec6 | 489 | |
18406d7e YH |
490 | switch (ether_type) { |
491 | case ETH_P_ARP: | |
492 | case ETH_P_IP: | |
cb6bf355 YH |
493 | #if IS_ENABLED(CONFIG_IPV6) |
494 | case ETH_P_IPV6: | |
495 | #endif | |
18406d7e YH |
496 | break; |
497 | default: | |
498 | goto err; | |
499 | } | |
500 | ||
f91e3bd8 | 501 | dev = netdev_priv(net); |
c76acec6 | 502 | /* Write metadata, and then pass to the receive level */ |
f91e3bd8 | 503 | skb->dev = net; |
b577d7e2 | 504 | skb->ip_summed = CHECKSUM_NONE; |
c76acec6 JF |
505 | |
506 | /* | |
507 | * Parse the encapsulation header. This actually does the job of | |
6752c8db | 508 | * converting to an ethernet-like pseudo frame header. |
c76acec6 | 509 | */ |
f91e3bd8 SR |
510 | guid = cpu_to_be64(dev->card->guid); |
511 | if (dev_hard_header(skb, net, ether_type, | |
6752c8db | 512 | is_broadcast ? net->broadcast : net->dev_addr, |
f91e3bd8 SR |
513 | NULL, skb->len) >= 0) { |
514 | struct fwnet_header *eth; | |
c76acec6 JF |
515 | u16 *rawp; |
516 | __be16 protocol; | |
517 | ||
518 | skb_reset_mac_header(skb); | |
519 | skb_pull(skb, sizeof(*eth)); | |
f91e3bd8 | 520 | eth = (struct fwnet_header *)skb_mac_header(skb); |
021b97e4 | 521 | if (fwnet_hwaddr_is_multicast(eth->h_dest)) { |
f91e3bd8 SR |
522 | if (memcmp(eth->h_dest, net->broadcast, |
523 | net->addr_len) == 0) | |
c76acec6 | 524 | skb->pkt_type = PACKET_BROADCAST; |
c76acec6 JF |
525 | #if 0 |
526 | else | |
527 | skb->pkt_type = PACKET_MULTICAST; | |
528 | #endif | |
529 | } else { | |
156ce867 | 530 | if (memcmp(eth->h_dest, net->dev_addr, net->addr_len)) |
c76acec6 | 531 | skb->pkt_type = PACKET_OTHERHOST; |
c76acec6 | 532 | } |
e5c5d22e | 533 | if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) { |
c76acec6 JF |
534 | protocol = eth->h_proto; |
535 | } else { | |
536 | rawp = (u16 *)skb->data; | |
f91e3bd8 | 537 | if (*rawp == 0xffff) |
c76acec6 | 538 | protocol = htons(ETH_P_802_3); |
f91e3bd8 | 539 | else |
c76acec6 | 540 | protocol = htons(ETH_P_802_2); |
c76acec6 JF |
541 | } |
542 | skb->protocol = protocol; | |
543 | } | |
544 | status = netif_rx(skb); | |
f91e3bd8 SR |
545 | if (status == NET_RX_DROP) { |
546 | net->stats.rx_errors++; | |
547 | net->stats.rx_dropped++; | |
c76acec6 | 548 | } else { |
f91e3bd8 SR |
549 | net->stats.rx_packets++; |
550 | net->stats.rx_bytes += skb->len; | |
c76acec6 | 551 | } |
f91e3bd8 | 552 | |
c76acec6 JF |
553 | return 0; |
554 | ||
18406d7e | 555 | err: |
f91e3bd8 SR |
556 | net->stats.rx_errors++; |
557 | net->stats.rx_dropped++; | |
558 | ||
c76acec6 | 559 | dev_kfree_skb_any(skb); |
f91e3bd8 | 560 | |
1bf145fe | 561 | return -ENOENT; |
c76acec6 JF |
562 | } |
563 | ||
f91e3bd8 | 564 | static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, |
5a124d38 SR |
565 | int source_node_id, int generation, |
566 | bool is_broadcast) | |
f91e3bd8 | 567 | { |
c76acec6 | 568 | struct sk_buff *skb; |
5a124d38 | 569 | struct net_device *net = dev->netdev; |
f91e3bd8 | 570 | struct rfc2734_header hdr; |
c76acec6 JF |
571 | unsigned lf; |
572 | unsigned long flags; | |
f91e3bd8 SR |
573 | struct fwnet_peer *peer; |
574 | struct fwnet_partial_datagram *pd; | |
c76acec6 JF |
575 | int fg_off; |
576 | int dg_size; | |
577 | u16 datagram_label; | |
578 | int retval; | |
579 | u16 ether_type; | |
580 | ||
f91e3bd8 SR |
581 | hdr.w0 = be32_to_cpu(buf[0]); |
582 | lf = fwnet_get_hdr_lf(&hdr); | |
583 | if (lf == RFC2374_HDR_UNFRAG) { | |
c76acec6 JF |
584 | /* |
585 | * An unfragmented datagram has been received by the ieee1394 | |
586 | * bus. Build an skbuff around it so we can pass it to the | |
587 | * high level network layer. | |
588 | */ | |
f91e3bd8 | 589 | ether_type = fwnet_get_hdr_ether_type(&hdr); |
c76acec6 | 590 | buf++; |
f91e3bd8 | 591 | len -= RFC2374_UNFRAG_HDR_SIZE; |
c76acec6 | 592 | |
82586340 | 593 | skb = dev_alloc_skb(len + LL_RESERVED_SPACE(net)); |
c76acec6 | 594 | if (unlikely(!skb)) { |
f91e3bd8 SR |
595 | net->stats.rx_dropped++; |
596 | ||
1bf145fe | 597 | return -ENOMEM; |
c76acec6 | 598 | } |
82586340 | 599 | skb_reserve(skb, LL_RESERVED_SPACE(net)); |
f91e3bd8 SR |
600 | memcpy(skb_put(skb, len), buf, len); |
601 | ||
602 | return fwnet_finish_incoming_packet(net, skb, source_node_id, | |
603 | is_broadcast, ether_type); | |
c76acec6 JF |
604 | } |
605 | /* A datagram fragment has been received, now the fun begins. */ | |
606 | hdr.w1 = ntohl(buf[1]); | |
f91e3bd8 SR |
607 | buf += 2; |
608 | len -= RFC2374_FRAG_HDR_SIZE; | |
609 | if (lf == RFC2374_HDR_FIRSTFRAG) { | |
610 | ether_type = fwnet_get_hdr_ether_type(&hdr); | |
c76acec6 JF |
611 | fg_off = 0; |
612 | } else { | |
f91e3bd8 SR |
613 | ether_type = 0; |
614 | fg_off = fwnet_get_hdr_fg_off(&hdr); | |
c76acec6 | 615 | } |
f91e3bd8 SR |
616 | datagram_label = fwnet_get_hdr_dgl(&hdr); |
617 | dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */ | |
f91e3bd8 | 618 | |
5a124d38 SR |
619 | spin_lock_irqsave(&dev->lock, flags); |
620 | ||
621 | peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation); | |
1bf145fe SR |
622 | if (!peer) { |
623 | retval = -ENOENT; | |
624 | goto fail; | |
625 | } | |
f91e3bd8 SR |
626 | |
627 | pd = fwnet_pd_find(peer, datagram_label); | |
c76acec6 | 628 | if (pd == NULL) { |
f91e3bd8 | 629 | while (peer->pdg_size >= FWNET_MAX_FRAGMENTS) { |
c76acec6 | 630 | /* remove the oldest */ |
f91e3bd8 SR |
631 | fwnet_pd_delete(list_first_entry(&peer->pd_list, |
632 | struct fwnet_partial_datagram, pd_link)); | |
633 | peer->pdg_size--; | |
c76acec6 | 634 | } |
f91e3bd8 SR |
635 | pd = fwnet_pd_new(net, peer, datagram_label, |
636 | dg_size, buf, fg_off, len); | |
637 | if (pd == NULL) { | |
c76acec6 | 638 | retval = -ENOMEM; |
1bf145fe | 639 | goto fail; |
c76acec6 | 640 | } |
f91e3bd8 | 641 | peer->pdg_size++; |
c76acec6 | 642 | } else { |
f91e3bd8 SR |
643 | if (fwnet_frag_overlap(pd, fg_off, len) || |
644 | pd->datagram_size != dg_size) { | |
c76acec6 JF |
645 | /* |
646 | * Differing datagram sizes or overlapping fragments, | |
f91e3bd8 | 647 | * discard old datagram and start a new one. |
c76acec6 | 648 | */ |
f91e3bd8 SR |
649 | fwnet_pd_delete(pd); |
650 | pd = fwnet_pd_new(net, peer, datagram_label, | |
651 | dg_size, buf, fg_off, len); | |
652 | if (pd == NULL) { | |
f91e3bd8 | 653 | peer->pdg_size--; |
1bf145fe SR |
654 | retval = -ENOMEM; |
655 | goto fail; | |
c76acec6 JF |
656 | } |
657 | } else { | |
f91e3bd8 | 658 | if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) { |
c76acec6 JF |
659 | /* |
660 | * Couldn't save off fragment anyway | |
661 | * so might as well obliterate the | |
662 | * datagram now. | |
663 | */ | |
f91e3bd8 SR |
664 | fwnet_pd_delete(pd); |
665 | peer->pdg_size--; | |
1bf145fe SR |
666 | retval = -ENOMEM; |
667 | goto fail; | |
c76acec6 JF |
668 | } |
669 | } | |
670 | } /* new datagram or add to existing one */ | |
671 | ||
f91e3bd8 | 672 | if (lf == RFC2374_HDR_FIRSTFRAG) |
c76acec6 | 673 | pd->ether_type = ether_type; |
f91e3bd8 SR |
674 | |
675 | if (fwnet_pd_is_complete(pd)) { | |
c76acec6 | 676 | ether_type = pd->ether_type; |
f91e3bd8 | 677 | peer->pdg_size--; |
c76acec6 | 678 | skb = skb_get(pd->skb); |
f91e3bd8 SR |
679 | fwnet_pd_delete(pd); |
680 | ||
5a124d38 | 681 | spin_unlock_irqrestore(&dev->lock, flags); |
f91e3bd8 SR |
682 | |
683 | return fwnet_finish_incoming_packet(net, skb, source_node_id, | |
684 | false, ether_type); | |
c76acec6 JF |
685 | } |
686 | /* | |
687 | * Datagram is not complete, we're done for the | |
688 | * moment. | |
689 | */ | |
b2268830 | 690 | retval = 0; |
1bf145fe | 691 | fail: |
5a124d38 | 692 | spin_unlock_irqrestore(&dev->lock, flags); |
f91e3bd8 | 693 | |
1bf145fe | 694 | return retval; |
c76acec6 JF |
695 | } |
696 | ||
f91e3bd8 SR |
697 | static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, |
698 | int tcode, int destination, int source, int generation, | |
33e553fe SR |
699 | unsigned long long offset, void *payload, size_t length, |
700 | void *callback_data) | |
f91e3bd8 | 701 | { |
00635b8e SR |
702 | struct fwnet_device *dev = callback_data; |
703 | int rcode; | |
c76acec6 | 704 | |
00635b8e SR |
705 | if (destination == IEEE1394_ALL_NODES) { |
706 | kfree(r); | |
f91e3bd8 | 707 | |
c76acec6 JF |
708 | return; |
709 | } | |
f91e3bd8 | 710 | |
00635b8e SR |
711 | if (offset != dev->handler.offset) |
712 | rcode = RCODE_ADDRESS_ERROR; | |
713 | else if (tcode != TCODE_WRITE_BLOCK_REQUEST) | |
714 | rcode = RCODE_TYPE_ERROR; | |
715 | else if (fwnet_incoming_packet(dev, payload, length, | |
716 | source, generation, false) != 0) { | |
8408dc1c | 717 | dev_err(&dev->netdev->dev, "incoming packet failure\n"); |
00635b8e SR |
718 | rcode = RCODE_CONFLICT_ERROR; |
719 | } else | |
720 | rcode = RCODE_COMPLETE; | |
f91e3bd8 | 721 | |
00635b8e | 722 | fw_send_response(card, r, rcode); |
c76acec6 JF |
723 | } |
724 | ||
f91e3bd8 SR |
725 | static void fwnet_receive_broadcast(struct fw_iso_context *context, |
726 | u32 cycle, size_t header_length, void *header, void *data) | |
727 | { | |
728 | struct fwnet_device *dev; | |
c76acec6 | 729 | struct fw_iso_packet packet; |
f91e3bd8 SR |
730 | __be16 *hdr_ptr; |
731 | __be32 *buf_ptr; | |
c76acec6 JF |
732 | int retval; |
733 | u32 length; | |
734 | u16 source_node_id; | |
735 | u32 specifier_id; | |
736 | u32 ver; | |
737 | unsigned long offset; | |
738 | unsigned long flags; | |
739 | ||
f91e3bd8 | 740 | dev = data; |
c76acec6 | 741 | hdr_ptr = header; |
f91e3bd8 SR |
742 | length = be16_to_cpup(hdr_ptr); |
743 | ||
744 | spin_lock_irqsave(&dev->lock, flags); | |
745 | ||
746 | offset = dev->rcv_buffer_size * dev->broadcast_rcv_next_ptr; | |
747 | buf_ptr = dev->broadcast_rcv_buffer_ptrs[dev->broadcast_rcv_next_ptr++]; | |
748 | if (dev->broadcast_rcv_next_ptr == dev->num_broadcast_rcv_ptrs) | |
749 | dev->broadcast_rcv_next_ptr = 0; | |
750 | ||
751 | spin_unlock_irqrestore(&dev->lock, flags); | |
c76acec6 JF |
752 | |
753 | specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8 | |
754 | | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24; | |
f91e3bd8 | 755 | ver = be32_to_cpu(buf_ptr[1]) & 0xffffff; |
c76acec6 | 756 | source_node_id = be32_to_cpu(buf_ptr[0]) >> 16; |
f91e3bd8 | 757 | |
cb6bf355 YH |
758 | if (specifier_id == IANA_SPECIFIER_ID && |
759 | (ver == RFC2734_SW_VERSION | |
760 | #if IS_ENABLED(CONFIG_IPV6) | |
761 | || ver == RFC3146_SW_VERSION | |
762 | #endif | |
763 | )) { | |
c76acec6 | 764 | buf_ptr += 2; |
f91e3bd8 | 765 | length -= IEEE1394_GASP_HDR_SIZE; |
9d237342 SG |
766 | fwnet_incoming_packet(dev, buf_ptr, length, source_node_id, |
767 | context->card->generation, true); | |
f91e3bd8 SR |
768 | } |
769 | ||
770 | packet.payload_length = dev->rcv_buffer_size; | |
c76acec6 JF |
771 | packet.interrupt = 1; |
772 | packet.skip = 0; | |
773 | packet.tag = 3; | |
774 | packet.sy = 0; | |
f91e3bd8 SR |
775 | packet.header_length = IEEE1394_GASP_HDR_SIZE; |
776 | ||
777 | spin_lock_irqsave(&dev->lock, flags); | |
c76acec6 | 778 | |
f91e3bd8 SR |
779 | retval = fw_iso_context_queue(dev->broadcast_rcv_context, &packet, |
780 | &dev->broadcast_rcv_buffer, offset); | |
781 | ||
782 | spin_unlock_irqrestore(&dev->lock, flags); | |
783 | ||
13882a82 CL |
784 | if (retval >= 0) |
785 | fw_iso_context_queue_flush(dev->broadcast_rcv_context); | |
786 | else | |
8408dc1c | 787 | dev_err(&dev->netdev->dev, "requeue failed\n"); |
c76acec6 JF |
788 | } |
789 | ||
f91e3bd8 SR |
790 | static struct kmem_cache *fwnet_packet_task_cache; |
791 | ||
110f82d7 SR |
792 | static void fwnet_free_ptask(struct fwnet_packet_task *ptask) |
793 | { | |
794 | dev_kfree_skb_any(ptask->skb); | |
795 | kmem_cache_free(fwnet_packet_task_cache, ptask); | |
796 | } | |
797 | ||
b2268830 SR |
798 | /* Caller must hold dev->lock. */ |
799 | static void dec_queued_datagrams(struct fwnet_device *dev) | |
800 | { | |
801 | if (--dev->queued_datagrams == FWNET_MIN_QUEUED_DATAGRAMS) | |
802 | netif_wake_queue(dev->netdev); | |
803 | } | |
804 | ||
f91e3bd8 SR |
805 | static int fwnet_send_packet(struct fwnet_packet_task *ptask); |
806 | ||
807 | static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) | |
808 | { | |
110f82d7 | 809 | struct fwnet_device *dev = ptask->dev; |
902bca00 | 810 | struct sk_buff *skb = ptask->skb; |
c76acec6 | 811 | unsigned long flags; |
110f82d7 | 812 | bool free; |
f91e3bd8 SR |
813 | |
814 | spin_lock_irqsave(&dev->lock, flags); | |
f91e3bd8 | 815 | |
110f82d7 SR |
816 | ptask->outstanding_pkts--; |
817 | ||
818 | /* Check whether we or the networking TX soft-IRQ is last user. */ | |
48553011 | 819 | free = (ptask->outstanding_pkts == 0 && ptask->enqueued); |
7ee11fa8 | 820 | if (free) |
b2268830 | 821 | dec_queued_datagrams(dev); |
110f82d7 | 822 | |
902bca00 | 823 | if (ptask->outstanding_pkts == 0) { |
902bca00 SR |
824 | dev->netdev->stats.tx_packets++; |
825 | dev->netdev->stats.tx_bytes += skb->len; | |
826 | } | |
110f82d7 SR |
827 | |
828 | spin_unlock_irqrestore(&dev->lock, flags); | |
f91e3bd8 SR |
829 | |
830 | if (ptask->outstanding_pkts > 0) { | |
c76acec6 JF |
831 | u16 dg_size; |
832 | u16 fg_off; | |
833 | u16 datagram_label; | |
834 | u16 lf; | |
c76acec6 JF |
835 | |
836 | /* Update the ptask to point to the next fragment and send it */ | |
f91e3bd8 | 837 | lf = fwnet_get_hdr_lf(&ptask->hdr); |
c76acec6 | 838 | switch (lf) { |
f91e3bd8 SR |
839 | case RFC2374_HDR_LASTFRAG: |
840 | case RFC2374_HDR_UNFRAG: | |
c76acec6 | 841 | default: |
8408dc1c SR |
842 | dev_err(&dev->netdev->dev, |
843 | "outstanding packet %x lf %x, header %x,%x\n", | |
844 | ptask->outstanding_pkts, lf, ptask->hdr.w0, | |
845 | ptask->hdr.w1); | |
c76acec6 JF |
846 | BUG(); |
847 | ||
f91e3bd8 | 848 | case RFC2374_HDR_FIRSTFRAG: |
c76acec6 | 849 | /* Set frag type here for future interior fragments */ |
f91e3bd8 SR |
850 | dg_size = fwnet_get_hdr_dg_size(&ptask->hdr); |
851 | fg_off = ptask->max_payload - RFC2374_FRAG_HDR_SIZE; | |
852 | datagram_label = fwnet_get_hdr_dgl(&ptask->hdr); | |
c76acec6 JF |
853 | break; |
854 | ||
f91e3bd8 SR |
855 | case RFC2374_HDR_INTFRAG: |
856 | dg_size = fwnet_get_hdr_dg_size(&ptask->hdr); | |
857 | fg_off = fwnet_get_hdr_fg_off(&ptask->hdr) | |
858 | + ptask->max_payload - RFC2374_FRAG_HDR_SIZE; | |
859 | datagram_label = fwnet_get_hdr_dgl(&ptask->hdr); | |
c76acec6 JF |
860 | break; |
861 | } | |
902bca00 | 862 | |
9d237342 SG |
863 | if (ptask->dest_node == IEEE1394_ALL_NODES) { |
864 | skb_pull(skb, | |
865 | ptask->max_payload + IEEE1394_GASP_HDR_SIZE); | |
866 | } else { | |
867 | skb_pull(skb, ptask->max_payload); | |
868 | } | |
f91e3bd8 SR |
869 | if (ptask->outstanding_pkts > 1) { |
870 | fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG, | |
871 | dg_size, fg_off, datagram_label); | |
c76acec6 | 872 | } else { |
f91e3bd8 SR |
873 | fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_LASTFRAG, |
874 | dg_size, fg_off, datagram_label); | |
875 | ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE; | |
c76acec6 | 876 | } |
f91e3bd8 | 877 | fwnet_send_packet(ptask); |
c76acec6 | 878 | } |
110f82d7 SR |
879 | |
880 | if (free) | |
881 | fwnet_free_ptask(ptask); | |
c76acec6 JF |
882 | } |
883 | ||
7ee11fa8 SR |
884 | static void fwnet_transmit_packet_failed(struct fwnet_packet_task *ptask) |
885 | { | |
886 | struct fwnet_device *dev = ptask->dev; | |
887 | unsigned long flags; | |
888 | bool free; | |
889 | ||
890 | spin_lock_irqsave(&dev->lock, flags); | |
891 | ||
892 | /* One fragment failed; don't try to send remaining fragments. */ | |
893 | ptask->outstanding_pkts = 0; | |
894 | ||
895 | /* Check whether we or the networking TX soft-IRQ is last user. */ | |
48553011 | 896 | free = ptask->enqueued; |
7ee11fa8 | 897 | if (free) |
b2268830 | 898 | dec_queued_datagrams(dev); |
7ee11fa8 SR |
899 | |
900 | dev->netdev->stats.tx_dropped++; | |
901 | dev->netdev->stats.tx_errors++; | |
902 | ||
903 | spin_unlock_irqrestore(&dev->lock, flags); | |
904 | ||
905 | if (free) | |
906 | fwnet_free_ptask(ptask); | |
907 | } | |
908 | ||
f91e3bd8 SR |
909 | static void fwnet_write_complete(struct fw_card *card, int rcode, |
910 | void *payload, size_t length, void *data) | |
911 | { | |
c4d6fd40 ML |
912 | struct fwnet_packet_task *ptask = data; |
913 | static unsigned long j; | |
914 | static int last_rcode, errors_skipped; | |
c76acec6 | 915 | |
7ee11fa8 | 916 | if (rcode == RCODE_COMPLETE) { |
f91e3bd8 | 917 | fwnet_transmit_packet_done(ptask); |
7ee11fa8 | 918 | } else { |
c4d6fd40 | 919 | if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) { |
8408dc1c SR |
920 | dev_err(&ptask->dev->netdev->dev, |
921 | "fwnet_write_complete failed: %x (skipped %d)\n", | |
922 | rcode, errors_skipped); | |
c4d6fd40 ML |
923 | |
924 | errors_skipped = 0; | |
925 | last_rcode = rcode; | |
89875833 | 926 | } else { |
c4d6fd40 | 927 | errors_skipped++; |
89875833 SR |
928 | } |
929 | fwnet_transmit_packet_failed(ptask); | |
7ee11fa8 | 930 | } |
c76acec6 JF |
931 | } |
932 | ||
f91e3bd8 SR |
933 | static int fwnet_send_packet(struct fwnet_packet_task *ptask) |
934 | { | |
935 | struct fwnet_device *dev; | |
c76acec6 | 936 | unsigned tx_len; |
f91e3bd8 | 937 | struct rfc2734_header *bufhdr; |
c76acec6 | 938 | unsigned long flags; |
110f82d7 | 939 | bool free; |
c76acec6 | 940 | |
f91e3bd8 | 941 | dev = ptask->dev; |
c76acec6 | 942 | tx_len = ptask->max_payload; |
f91e3bd8 SR |
943 | switch (fwnet_get_hdr_lf(&ptask->hdr)) { |
944 | case RFC2374_HDR_UNFRAG: | |
945 | bufhdr = (struct rfc2734_header *) | |
946 | skb_push(ptask->skb, RFC2374_UNFRAG_HDR_SIZE); | |
947 | put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0); | |
c76acec6 JF |
948 | break; |
949 | ||
f91e3bd8 SR |
950 | case RFC2374_HDR_FIRSTFRAG: |
951 | case RFC2374_HDR_INTFRAG: | |
952 | case RFC2374_HDR_LASTFRAG: | |
953 | bufhdr = (struct rfc2734_header *) | |
954 | skb_push(ptask->skb, RFC2374_FRAG_HDR_SIZE); | |
955 | put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0); | |
956 | put_unaligned_be32(ptask->hdr.w1, &bufhdr->w1); | |
c76acec6 JF |
957 | break; |
958 | ||
959 | default: | |
960 | BUG(); | |
961 | } | |
f91e3bd8 SR |
962 | if (ptask->dest_node == IEEE1394_ALL_NODES) { |
963 | u8 *p; | |
c76acec6 | 964 | int generation; |
f91e3bd8 | 965 | int node_id; |
cb6bf355 | 966 | unsigned int sw_version; |
c76acec6 JF |
967 | |
968 | /* ptask->generation may not have been set yet */ | |
f91e3bd8 | 969 | generation = dev->card->generation; |
c76acec6 | 970 | smp_rmb(); |
f91e3bd8 SR |
971 | node_id = dev->card->node_id; |
972 | ||
cb6bf355 YH |
973 | switch (ptask->skb->protocol) { |
974 | default: | |
975 | sw_version = RFC2734_SW_VERSION; | |
976 | break; | |
977 | #if IS_ENABLED(CONFIG_IPV6) | |
978 | case htons(ETH_P_IPV6): | |
979 | sw_version = RFC3146_SW_VERSION; | |
980 | #endif | |
981 | } | |
982 | ||
9d237342 | 983 | p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE); |
f91e3bd8 SR |
984 | put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p); |
985 | put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24 | |
cb6bf355 | 986 | | sw_version, &p[4]); |
f91e3bd8 SR |
987 | |
988 | /* We should not transmit if broadcast_channel.valid == 0. */ | |
989 | fw_send_request(dev->card, &ptask->transaction, | |
990 | TCODE_STREAM_DATA, | |
991 | fw_stream_packet_destination_id(3, | |
992 | IEEE1394_BROADCAST_CHANNEL, 0), | |
993 | generation, SCODE_100, 0ULL, ptask->skb->data, | |
994 | tx_len + 8, fwnet_write_complete, ptask); | |
995 | ||
f91e3bd8 | 996 | spin_lock_irqsave(&dev->lock, flags); |
110f82d7 SR |
997 | |
998 | /* If the AT tasklet already ran, we may be last user. */ | |
48553011 | 999 | free = (ptask->outstanding_pkts == 0 && !ptask->enqueued); |
110f82d7 | 1000 | if (!free) |
48553011 SR |
1001 | ptask->enqueued = true; |
1002 | else | |
b2268830 | 1003 | dec_queued_datagrams(dev); |
110f82d7 | 1004 | |
f91e3bd8 SR |
1005 | spin_unlock_irqrestore(&dev->lock, flags); |
1006 | ||
110f82d7 | 1007 | goto out; |
c76acec6 | 1008 | } |
f91e3bd8 SR |
1009 | |
1010 | fw_send_request(dev->card, &ptask->transaction, | |
1011 | TCODE_WRITE_BLOCK_REQUEST, ptask->dest_node, | |
1012 | ptask->generation, ptask->speed, ptask->fifo_addr, | |
1013 | ptask->skb->data, tx_len, fwnet_write_complete, ptask); | |
1014 | ||
f91e3bd8 | 1015 | spin_lock_irqsave(&dev->lock, flags); |
110f82d7 SR |
1016 | |
1017 | /* If the AT tasklet already ran, we may be last user. */ | |
48553011 | 1018 | free = (ptask->outstanding_pkts == 0 && !ptask->enqueued); |
110f82d7 | 1019 | if (!free) |
48553011 SR |
1020 | ptask->enqueued = true; |
1021 | else | |
b2268830 | 1022 | dec_queued_datagrams(dev); |
110f82d7 | 1023 | |
f91e3bd8 SR |
1024 | spin_unlock_irqrestore(&dev->lock, flags); |
1025 | ||
860e9538 | 1026 | netif_trans_update(dev->netdev); |
110f82d7 SR |
1027 | out: |
1028 | if (free) | |
1029 | fwnet_free_ptask(ptask); | |
f91e3bd8 | 1030 | |
c76acec6 JF |
1031 | return 0; |
1032 | } | |
1033 | ||
9d39c90a YH |
1034 | static void fwnet_fifo_stop(struct fwnet_device *dev) |
1035 | { | |
1036 | if (dev->local_fifo == FWNET_NO_FIFO_ADDR) | |
1037 | return; | |
1038 | ||
1039 | fw_core_remove_address_handler(&dev->handler); | |
1040 | dev->local_fifo = FWNET_NO_FIFO_ADDR; | |
1041 | } | |
1042 | ||
1043 | static int fwnet_fifo_start(struct fwnet_device *dev) | |
1044 | { | |
1045 | int retval; | |
1046 | ||
1047 | if (dev->local_fifo != FWNET_NO_FIFO_ADDR) | |
1048 | return 0; | |
1049 | ||
1050 | dev->handler.length = 4096; | |
1051 | dev->handler.address_callback = fwnet_receive_packet; | |
1052 | dev->handler.callback_data = dev; | |
1053 | ||
1054 | retval = fw_core_add_address_handler(&dev->handler, | |
1055 | &fw_high_memory_region); | |
1056 | if (retval < 0) | |
1057 | return retval; | |
1058 | ||
1059 | dev->local_fifo = dev->handler.offset; | |
1060 | ||
1061 | return 0; | |
1062 | } | |
1063 | ||
111534cd YH |
1064 | static void __fwnet_broadcast_stop(struct fwnet_device *dev) |
1065 | { | |
1066 | unsigned u; | |
1067 | ||
1068 | if (dev->broadcast_state != FWNET_BROADCAST_ERROR) { | |
1069 | for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) | |
1070 | kunmap(dev->broadcast_rcv_buffer.pages[u]); | |
1071 | fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card); | |
1072 | } | |
1073 | if (dev->broadcast_rcv_context) { | |
1074 | fw_iso_context_destroy(dev->broadcast_rcv_context); | |
1075 | dev->broadcast_rcv_context = NULL; | |
1076 | } | |
1077 | kfree(dev->broadcast_rcv_buffer_ptrs); | |
1078 | dev->broadcast_rcv_buffer_ptrs = NULL; | |
1079 | dev->broadcast_state = FWNET_BROADCAST_ERROR; | |
1080 | } | |
1081 | ||
1082 | static void fwnet_broadcast_stop(struct fwnet_device *dev) | |
1083 | { | |
1084 | if (dev->broadcast_state == FWNET_BROADCAST_ERROR) | |
1085 | return; | |
1086 | fw_iso_context_stop(dev->broadcast_rcv_context); | |
1087 | __fwnet_broadcast_stop(dev); | |
1088 | } | |
1089 | ||
f91e3bd8 SR |
1090 | static int fwnet_broadcast_start(struct fwnet_device *dev) |
1091 | { | |
c76acec6 JF |
1092 | struct fw_iso_context *context; |
1093 | int retval; | |
1094 | unsigned num_packets; | |
1095 | unsigned max_receive; | |
1096 | struct fw_iso_packet packet; | |
1097 | unsigned long offset; | |
f2090594 | 1098 | void **ptrptr; |
c76acec6 | 1099 | unsigned u; |
c76acec6 | 1100 | |
2fbd8dfe YH |
1101 | if (dev->broadcast_state != FWNET_BROADCAST_ERROR) |
1102 | return 0; | |
1103 | ||
f91e3bd8 SR |
1104 | max_receive = 1U << (dev->card->max_receive + 1); |
1105 | num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive; | |
1106 | ||
eac31d58 YH |
1107 | ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL); |
1108 | if (!ptrptr) { | |
1109 | retval = -ENOMEM; | |
111534cd | 1110 | goto failed; |
eac31d58 YH |
1111 | } |
1112 | dev->broadcast_rcv_buffer_ptrs = ptrptr; | |
1113 | ||
f2090594 YH |
1114 | context = fw_iso_context_create(dev->card, FW_ISO_CONTEXT_RECEIVE, |
1115 | IEEE1394_BROADCAST_CHANNEL, | |
1116 | dev->card->link_speed, 8, | |
1117 | fwnet_receive_broadcast, dev); | |
1118 | if (IS_ERR(context)) { | |
1119 | retval = PTR_ERR(context); | |
111534cd | 1120 | goto failed; |
f2090594 | 1121 | } |
f91e3bd8 | 1122 | |
f2090594 YH |
1123 | retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer, dev->card, |
1124 | FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE); | |
1125 | if (retval < 0) | |
111534cd YH |
1126 | goto failed; |
1127 | ||
1128 | dev->broadcast_state = FWNET_BROADCAST_STOPPED; | |
f91e3bd8 | 1129 | |
f2090594 YH |
1130 | for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) { |
1131 | void *ptr; | |
1132 | unsigned v; | |
c76acec6 | 1133 | |
f2090594 YH |
1134 | ptr = kmap(dev->broadcast_rcv_buffer.pages[u]); |
1135 | for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++) | |
1136 | *ptrptr++ = (void *) ((char *)ptr + v * max_receive); | |
f91e3bd8 | 1137 | } |
f2090594 | 1138 | dev->broadcast_rcv_context = context; |
c76acec6 JF |
1139 | |
1140 | packet.payload_length = max_receive; | |
1141 | packet.interrupt = 1; | |
1142 | packet.skip = 0; | |
1143 | packet.tag = 3; | |
1144 | packet.sy = 0; | |
f91e3bd8 | 1145 | packet.header_length = IEEE1394_GASP_HDR_SIZE; |
c76acec6 | 1146 | offset = 0; |
f91e3bd8 SR |
1147 | |
1148 | for (u = 0; u < num_packets; u++) { | |
1149 | retval = fw_iso_context_queue(context, &packet, | |
1150 | &dev->broadcast_rcv_buffer, offset); | |
1151 | if (retval < 0) | |
111534cd | 1152 | goto failed; |
f91e3bd8 | 1153 | |
c76acec6 JF |
1154 | offset += max_receive; |
1155 | } | |
f91e3bd8 SR |
1156 | dev->num_broadcast_rcv_ptrs = num_packets; |
1157 | dev->rcv_buffer_size = max_receive; | |
1158 | dev->broadcast_rcv_next_ptr = 0U; | |
1159 | retval = fw_iso_context_start(context, -1, 0, | |
1160 | FW_ISO_CONTEXT_MATCH_ALL_TAGS); /* ??? sync */ | |
1161 | if (retval < 0) | |
111534cd | 1162 | goto failed; |
f91e3bd8 SR |
1163 | |
1164 | /* FIXME: adjust it according to the min. speed of all known peers? */ | |
1165 | dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100 | |
1166 | - IEEE1394_GASP_HDR_SIZE - RFC2374_UNFRAG_HDR_SIZE; | |
1167 | dev->broadcast_state = FWNET_BROADCAST_RUNNING; | |
1168 | ||
c76acec6 JF |
1169 | return 0; |
1170 | ||
111534cd YH |
1171 | failed: |
1172 | __fwnet_broadcast_stop(dev); | |
c76acec6 JF |
1173 | return retval; |
1174 | } | |
1175 | ||
c1671470 SR |
1176 | static void set_carrier_state(struct fwnet_device *dev) |
1177 | { | |
1178 | if (dev->peer_count > 1) | |
1179 | netif_carrier_on(dev->netdev); | |
1180 | else | |
1181 | netif_carrier_off(dev->netdev); | |
1182 | } | |
1183 | ||
f91e3bd8 SR |
1184 | /* ifup */ |
1185 | static int fwnet_open(struct net_device *net) | |
1186 | { | |
1187 | struct fwnet_device *dev = netdev_priv(net); | |
c76acec6 JF |
1188 | int ret; |
1189 | ||
2fbd8dfe YH |
1190 | ret = fwnet_broadcast_start(dev); |
1191 | if (ret) | |
382c4b40 | 1192 | return ret; |
2fbd8dfe | 1193 | |
f91e3bd8 SR |
1194 | netif_start_queue(net); |
1195 | ||
c1671470 SR |
1196 | spin_lock_irq(&dev->lock); |
1197 | set_carrier_state(dev); | |
1198 | spin_unlock_irq(&dev->lock); | |
1199 | ||
c76acec6 JF |
1200 | return 0; |
1201 | } | |
1202 | ||
f91e3bd8 SR |
1203 | /* ifdown */ |
1204 | static int fwnet_stop(struct net_device *net) | |
c76acec6 | 1205 | { |
8559e7f0 YH |
1206 | struct fwnet_device *dev = netdev_priv(net); |
1207 | ||
f91e3bd8 | 1208 | netif_stop_queue(net); |
8559e7f0 | 1209 | fwnet_broadcast_stop(dev); |
c76acec6 | 1210 | |
c76acec6 JF |
1211 | return 0; |
1212 | } | |
1213 | ||
424efe9c | 1214 | static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net) |
c76acec6 | 1215 | { |
f91e3bd8 SR |
1216 | struct fwnet_header hdr_buf; |
1217 | struct fwnet_device *dev = netdev_priv(net); | |
c76acec6 JF |
1218 | __be16 proto; |
1219 | u16 dest_node; | |
c76acec6 JF |
1220 | unsigned max_payload; |
1221 | u16 dg_size; | |
1222 | u16 *datagram_label_ptr; | |
f91e3bd8 | 1223 | struct fwnet_packet_task *ptask; |
5a124d38 SR |
1224 | struct fwnet_peer *peer; |
1225 | unsigned long flags; | |
c76acec6 | 1226 | |
b2268830 SR |
1227 | spin_lock_irqsave(&dev->lock, flags); |
1228 | ||
1229 | /* Can this happen? */ | |
1230 | if (netif_queue_stopped(dev->netdev)) { | |
1231 | spin_unlock_irqrestore(&dev->lock, flags); | |
1232 | ||
1233 | return NETDEV_TX_BUSY; | |
1234 | } | |
1235 | ||
f91e3bd8 | 1236 | ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC); |
c76acec6 JF |
1237 | if (ptask == NULL) |
1238 | goto fail; | |
1239 | ||
1240 | skb = skb_share_check(skb, GFP_ATOMIC); | |
1241 | if (!skb) | |
1242 | goto fail; | |
1243 | ||
1244 | /* | |
f91e3bd8 | 1245 | * Make a copy of the driver-specific header. |
c76acec6 JF |
1246 | * We might need to rebuild the header on tx failure. |
1247 | */ | |
1248 | memcpy(&hdr_buf, skb->data, sizeof(hdr_buf)); | |
c76acec6 | 1249 | proto = hdr_buf.h_proto; |
18406d7e YH |
1250 | |
1251 | switch (proto) { | |
1252 | case htons(ETH_P_ARP): | |
1253 | case htons(ETH_P_IP): | |
cb6bf355 YH |
1254 | #if IS_ENABLED(CONFIG_IPV6) |
1255 | case htons(ETH_P_IPV6): | |
1256 | #endif | |
18406d7e YH |
1257 | break; |
1258 | default: | |
1259 | goto fail; | |
1260 | } | |
1261 | ||
1262 | skb_pull(skb, sizeof(hdr_buf)); | |
c76acec6 JF |
1263 | dg_size = skb->len; |
1264 | ||
1265 | /* | |
1266 | * Set the transmission type for the packet. ARP packets and IP | |
1267 | * broadcast packets are sent via GASP. | |
1268 | */ | |
021b97e4 | 1269 | if (fwnet_hwaddr_is_multicast(hdr_buf.h_dest)) { |
5a124d38 | 1270 | max_payload = dev->broadcast_xmt_max_payload; |
f91e3bd8 SR |
1271 | datagram_label_ptr = &dev->broadcast_xmt_datagramlabel; |
1272 | ||
5a124d38 SR |
1273 | ptask->fifo_addr = FWNET_NO_FIFO_ADDR; |
1274 | ptask->generation = 0; | |
1275 | ptask->dest_node = IEEE1394_ALL_NODES; | |
1276 | ptask->speed = SCODE_100; | |
c76acec6 | 1277 | } else { |
6752c8db YH |
1278 | union fwnet_hwaddr *ha = (union fwnet_hwaddr *)hdr_buf.h_dest; |
1279 | __be64 guid = get_unaligned(&ha->uc.uniq_id); | |
c76acec6 JF |
1280 | u8 generation; |
1281 | ||
f91e3bd8 | 1282 | peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid)); |
6752c8db | 1283 | if (!peer) |
b2268830 | 1284 | goto fail; |
c76acec6 | 1285 | |
5a124d38 SR |
1286 | generation = peer->generation; |
1287 | dest_node = peer->node_id; | |
1288 | max_payload = peer->max_payload; | |
f91e3bd8 | 1289 | datagram_label_ptr = &peer->datagram_label; |
c76acec6 | 1290 | |
6752c8db | 1291 | ptask->fifo_addr = fwnet_hwaddr_fifo(ha); |
5a124d38 SR |
1292 | ptask->generation = generation; |
1293 | ptask->dest_node = dest_node; | |
1294 | ptask->speed = peer->speed; | |
c76acec6 JF |
1295 | } |
1296 | ||
c76acec6 JF |
1297 | ptask->hdr.w0 = 0; |
1298 | ptask->hdr.w1 = 0; | |
1299 | ptask->skb = skb; | |
f91e3bd8 SR |
1300 | ptask->dev = dev; |
1301 | ||
c76acec6 | 1302 | /* Does it all fit in one packet? */ |
f91e3bd8 SR |
1303 | if (dg_size <= max_payload) { |
1304 | fwnet_make_uf_hdr(&ptask->hdr, ntohs(proto)); | |
c76acec6 | 1305 | ptask->outstanding_pkts = 1; |
f91e3bd8 | 1306 | max_payload = dg_size + RFC2374_UNFRAG_HDR_SIZE; |
c76acec6 JF |
1307 | } else { |
1308 | u16 datagram_label; | |
1309 | ||
f91e3bd8 | 1310 | max_payload -= RFC2374_FRAG_OVERHEAD; |
c76acec6 | 1311 | datagram_label = (*datagram_label_ptr)++; |
f91e3bd8 SR |
1312 | fwnet_make_ff_hdr(&ptask->hdr, ntohs(proto), dg_size, |
1313 | datagram_label); | |
c76acec6 | 1314 | ptask->outstanding_pkts = DIV_ROUND_UP(dg_size, max_payload); |
f91e3bd8 | 1315 | max_payload += RFC2374_FRAG_HDR_SIZE; |
c76acec6 | 1316 | } |
5a124d38 | 1317 | |
b2268830 SR |
1318 | if (++dev->queued_datagrams == FWNET_MAX_QUEUED_DATAGRAMS) |
1319 | netif_stop_queue(dev->netdev); | |
48553011 | 1320 | |
5a124d38 SR |
1321 | spin_unlock_irqrestore(&dev->lock, flags); |
1322 | ||
c76acec6 | 1323 | ptask->max_payload = max_payload; |
48553011 | 1324 | ptask->enqueued = 0; |
110f82d7 | 1325 | |
f91e3bd8 SR |
1326 | fwnet_send_packet(ptask); |
1327 | ||
c76acec6 JF |
1328 | return NETDEV_TX_OK; |
1329 | ||
1330 | fail: | |
b2268830 SR |
1331 | spin_unlock_irqrestore(&dev->lock, flags); |
1332 | ||
c76acec6 | 1333 | if (ptask) |
f91e3bd8 | 1334 | kmem_cache_free(fwnet_packet_task_cache, ptask); |
c76acec6 JF |
1335 | |
1336 | if (skb != NULL) | |
1337 | dev_kfree_skb(skb); | |
1338 | ||
f91e3bd8 SR |
1339 | net->stats.tx_dropped++; |
1340 | net->stats.tx_errors++; | |
c76acec6 JF |
1341 | |
1342 | /* | |
1343 | * FIXME: According to a patch from 2003-02-26, "returning non-zero | |
1344 | * causes serious problems" here, allegedly. Before that patch, | |
1345 | * -ERRNO was returned which is not appropriate under Linux 2.6. | |
1346 | * Perhaps more needs to be done? Stop the queue in serious | |
1347 | * conditions and restart it elsewhere? | |
1348 | */ | |
1349 | return NETDEV_TX_OK; | |
1350 | } | |
1351 | ||
f91e3bd8 SR |
1352 | static int fwnet_change_mtu(struct net_device *net, int new_mtu) |
1353 | { | |
c76acec6 JF |
1354 | if (new_mtu < 68) |
1355 | return -EINVAL; | |
1356 | ||
f91e3bd8 | 1357 | net->mtu = new_mtu; |
c76acec6 JF |
1358 | return 0; |
1359 | } | |
1360 | ||
18bb36f9 ML |
1361 | static const struct ethtool_ops fwnet_ethtool_ops = { |
1362 | .get_link = ethtool_op_get_link, | |
1363 | }; | |
1364 | ||
f91e3bd8 SR |
1365 | static const struct net_device_ops fwnet_netdev_ops = { |
1366 | .ndo_open = fwnet_open, | |
1367 | .ndo_stop = fwnet_stop, | |
1368 | .ndo_start_xmit = fwnet_tx, | |
f91e3bd8 | 1369 | .ndo_change_mtu = fwnet_change_mtu, |
c76acec6 JF |
1370 | }; |
1371 | ||
f91e3bd8 SR |
1372 | static void fwnet_init_dev(struct net_device *net) |
1373 | { | |
1374 | net->header_ops = &fwnet_header_ops; | |
1375 | net->netdev_ops = &fwnet_netdev_ops; | |
1337f853 | 1376 | net->watchdog_timeo = 2 * HZ; |
f91e3bd8 SR |
1377 | net->flags = IFF_BROADCAST | IFF_MULTICAST; |
1378 | net->features = NETIF_F_HIGHDMA; | |
1379 | net->addr_len = FWNET_ALEN; | |
1380 | net->hard_header_len = FWNET_HLEN; | |
1381 | net->type = ARPHRD_IEEE1394; | |
b2268830 | 1382 | net->tx_queue_len = FWNET_TX_QUEUE_LEN; |
18bb36f9 | 1383 | net->ethtool_ops = &fwnet_ethtool_ops; |
c76acec6 JF |
1384 | } |
1385 | ||
5a124d38 SR |
1386 | /* caller must hold fwnet_device_mutex */ |
1387 | static struct fwnet_device *fwnet_dev_find(struct fw_card *card) | |
1388 | { | |
1389 | struct fwnet_device *dev; | |
1390 | ||
1391 | list_for_each_entry(dev, &fwnet_device_list, dev_link) | |
1392 | if (dev->card == card) | |
1393 | return dev; | |
1394 | ||
1395 | return NULL; | |
1396 | } | |
1397 | ||
1398 | static int fwnet_add_peer(struct fwnet_device *dev, | |
1399 | struct fw_unit *unit, struct fw_device *device) | |
1400 | { | |
1401 | struct fwnet_peer *peer; | |
1402 | ||
1403 | peer = kmalloc(sizeof(*peer), GFP_KERNEL); | |
1404 | if (!peer) | |
1405 | return -ENOMEM; | |
1406 | ||
b01b4bab SR |
1407 | dev_set_drvdata(&unit->device, peer); |
1408 | ||
5a124d38 SR |
1409 | peer->dev = dev; |
1410 | peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; | |
5a124d38 SR |
1411 | INIT_LIST_HEAD(&peer->pd_list); |
1412 | peer->pdg_size = 0; | |
1413 | peer->datagram_label = 0; | |
1414 | peer->speed = device->max_speed; | |
1415 | peer->max_payload = fwnet_max_payload(device->max_rec, peer->speed); | |
1416 | ||
1417 | peer->generation = device->generation; | |
1418 | smp_rmb(); | |
1419 | peer->node_id = device->node_id; | |
1420 | ||
1421 | spin_lock_irq(&dev->lock); | |
1422 | list_add_tail(&peer->peer_link, &dev->peer_list); | |
18bb36f9 | 1423 | dev->peer_count++; |
c1671470 | 1424 | set_carrier_state(dev); |
5a124d38 SR |
1425 | spin_unlock_irq(&dev->lock); |
1426 | ||
1427 | return 0; | |
1428 | } | |
1429 | ||
94a87157 SR |
1430 | static int fwnet_probe(struct fw_unit *unit, |
1431 | const struct ieee1394_device_id *id) | |
f91e3bd8 | 1432 | { |
f91e3bd8 SR |
1433 | struct fw_device *device = fw_parent_device(unit); |
1434 | struct fw_card *card = device->card; | |
1435 | struct net_device *net; | |
b01b4bab | 1436 | bool allocated_netdev = false; |
f91e3bd8 | 1437 | struct fwnet_device *dev; |
c76acec6 | 1438 | unsigned max_mtu; |
5a124d38 | 1439 | int ret; |
6752c8db | 1440 | union fwnet_hwaddr *ha; |
c76acec6 | 1441 | |
5a124d38 | 1442 | mutex_lock(&fwnet_device_mutex); |
c76acec6 | 1443 | |
5a124d38 SR |
1444 | dev = fwnet_dev_find(card); |
1445 | if (dev) { | |
5a124d38 SR |
1446 | net = dev->netdev; |
1447 | goto have_dev; | |
c76acec6 | 1448 | } |
5a124d38 | 1449 | |
c835a677 TG |
1450 | net = alloc_netdev(sizeof(*dev), "firewire%d", NET_NAME_UNKNOWN, |
1451 | fwnet_init_dev); | |
f91e3bd8 | 1452 | if (net == NULL) { |
1118f8d0 DY |
1453 | mutex_unlock(&fwnet_device_mutex); |
1454 | return -ENOMEM; | |
c76acec6 JF |
1455 | } |
1456 | ||
b01b4bab | 1457 | allocated_netdev = true; |
f91e3bd8 SR |
1458 | SET_NETDEV_DEV(net, card->device); |
1459 | dev = netdev_priv(net); | |
c76acec6 | 1460 | |
f91e3bd8 SR |
1461 | spin_lock_init(&dev->lock); |
1462 | dev->broadcast_state = FWNET_BROADCAST_ERROR; | |
1463 | dev->broadcast_rcv_context = NULL; | |
1464 | dev->broadcast_xmt_max_payload = 0; | |
1465 | dev->broadcast_xmt_datagramlabel = 0; | |
f91e3bd8 | 1466 | dev->local_fifo = FWNET_NO_FIFO_ADDR; |
48553011 | 1467 | dev->queued_datagrams = 0; |
5a124d38 | 1468 | INIT_LIST_HEAD(&dev->peer_list); |
f91e3bd8 | 1469 | dev->card = card; |
5a124d38 | 1470 | dev->netdev = net; |
c76acec6 | 1471 | |
382c4b40 YH |
1472 | ret = fwnet_fifo_start(dev); |
1473 | if (ret < 0) | |
1474 | goto out; | |
1475 | dev->local_fifo = dev->handler.offset; | |
1476 | ||
c76acec6 JF |
1477 | /* |
1478 | * Use the RFC 2734 default 1500 octets or the maximum payload | |
1479 | * as initial MTU | |
1480 | */ | |
1481 | max_mtu = (1 << (card->max_receive + 1)) | |
f91e3bd8 SR |
1482 | - sizeof(struct rfc2734_header) - IEEE1394_GASP_HDR_SIZE; |
1483 | net->mtu = min(1500U, max_mtu); | |
c76acec6 JF |
1484 | |
1485 | /* Set our hardware address while we're at it */ | |
6752c8db YH |
1486 | ha = (union fwnet_hwaddr *)net->dev_addr; |
1487 | put_unaligned_be64(card->guid, &ha->uc.uniq_id); | |
1488 | ha->uc.max_rec = dev->card->max_receive; | |
1489 | ha->uc.sspd = dev->card->link_speed; | |
1490 | put_unaligned_be16(dev->local_fifo >> 32, &ha->uc.fifo_hi); | |
1491 | put_unaligned_be32(dev->local_fifo & 0xffffffff, &ha->uc.fifo_lo); | |
1492 | ||
1493 | memset(net->broadcast, -1, net->addr_len); | |
1494 | ||
5a124d38 | 1495 | ret = register_netdev(net); |
8408dc1c | 1496 | if (ret) |
c76acec6 | 1497 | goto out; |
c76acec6 | 1498 | |
5a124d38 | 1499 | list_add_tail(&dev->dev_link, &fwnet_device_list); |
cb6bf355 | 1500 | dev_notice(&net->dev, "IP over IEEE 1394 on card %s\n", |
8408dc1c | 1501 | dev_name(card->device)); |
5a124d38 SR |
1502 | have_dev: |
1503 | ret = fwnet_add_peer(dev, unit, device); | |
b01b4bab | 1504 | if (ret && allocated_netdev) { |
5a124d38 SR |
1505 | unregister_netdev(net); |
1506 | list_del(&dev->dev_link); | |
c76acec6 | 1507 | out: |
382c4b40 | 1508 | fwnet_fifo_stop(dev); |
f91e3bd8 | 1509 | free_netdev(net); |
382c4b40 | 1510 | } |
f91e3bd8 | 1511 | |
5a124d38 SR |
1512 | mutex_unlock(&fwnet_device_mutex); |
1513 | ||
1514 | return ret; | |
1515 | } | |
1516 | ||
94a87157 SR |
1517 | /* |
1518 | * FIXME abort partially sent fragmented datagrams, | |
1519 | * discard partially received fragmented datagrams | |
1520 | */ | |
1521 | static void fwnet_update(struct fw_unit *unit) | |
1522 | { | |
1523 | struct fw_device *device = fw_parent_device(unit); | |
1524 | struct fwnet_peer *peer = dev_get_drvdata(&unit->device); | |
1525 | int generation; | |
1526 | ||
1527 | generation = device->generation; | |
1528 | ||
1529 | spin_lock_irq(&peer->dev->lock); | |
1530 | peer->node_id = device->node_id; | |
1531 | peer->generation = generation; | |
1532 | spin_unlock_irq(&peer->dev->lock); | |
1533 | } | |
1534 | ||
c1671470 | 1535 | static void fwnet_remove_peer(struct fwnet_peer *peer, struct fwnet_device *dev) |
5a124d38 SR |
1536 | { |
1537 | struct fwnet_partial_datagram *pd, *pd_next; | |
1538 | ||
c1671470 | 1539 | spin_lock_irq(&dev->lock); |
5a124d38 | 1540 | list_del(&peer->peer_link); |
c1671470 SR |
1541 | dev->peer_count--; |
1542 | set_carrier_state(dev); | |
1543 | spin_unlock_irq(&dev->lock); | |
5a124d38 SR |
1544 | |
1545 | list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link) | |
1546 | fwnet_pd_delete(pd); | |
1547 | ||
1548 | kfree(peer); | |
c76acec6 JF |
1549 | } |
1550 | ||
94a87157 | 1551 | static void fwnet_remove(struct fw_unit *unit) |
f91e3bd8 | 1552 | { |
94a87157 | 1553 | struct fwnet_peer *peer = dev_get_drvdata(&unit->device); |
5a124d38 | 1554 | struct fwnet_device *dev = peer->dev; |
f91e3bd8 | 1555 | struct net_device *net; |
48553011 | 1556 | int i; |
f91e3bd8 | 1557 | |
5a124d38 | 1558 | mutex_lock(&fwnet_device_mutex); |
c76acec6 | 1559 | |
74a14504 | 1560 | net = dev->netdev; |
74a14504 | 1561 | |
c1671470 | 1562 | fwnet_remove_peer(peer, dev); |
18bb36f9 | 1563 | |
5a124d38 | 1564 | if (list_empty(&dev->peer_list)) { |
f91e3bd8 SR |
1565 | unregister_netdev(net); |
1566 | ||
382c4b40 YH |
1567 | fwnet_fifo_stop(dev); |
1568 | ||
48553011 SR |
1569 | for (i = 0; dev->queued_datagrams && i < 5; i++) |
1570 | ssleep(1); | |
1571 | WARN_ON(dev->queued_datagrams); | |
b01b4bab SR |
1572 | list_del(&dev->dev_link); |
1573 | ||
f91e3bd8 | 1574 | free_netdev(net); |
c76acec6 | 1575 | } |
f91e3bd8 | 1576 | |
5a124d38 | 1577 | mutex_unlock(&fwnet_device_mutex); |
c76acec6 JF |
1578 | } |
1579 | ||
f91e3bd8 SR |
1580 | static const struct ieee1394_device_id fwnet_id_table[] = { |
1581 | { | |
1582 | .match_flags = IEEE1394_MATCH_SPECIFIER_ID | | |
1583 | IEEE1394_MATCH_VERSION, | |
1584 | .specifier_id = IANA_SPECIFIER_ID, | |
1585 | .version = RFC2734_SW_VERSION, | |
1586 | }, | |
cb6bf355 YH |
1587 | #if IS_ENABLED(CONFIG_IPV6) |
1588 | { | |
1589 | .match_flags = IEEE1394_MATCH_SPECIFIER_ID | | |
1590 | IEEE1394_MATCH_VERSION, | |
1591 | .specifier_id = IANA_SPECIFIER_ID, | |
1592 | .version = RFC3146_SW_VERSION, | |
1593 | }, | |
1594 | #endif | |
f91e3bd8 SR |
1595 | { } |
1596 | }; | |
1597 | ||
1598 | static struct fw_driver fwnet_driver = { | |
c76acec6 | 1599 | .driver = { |
f91e3bd8 | 1600 | .owner = THIS_MODULE, |
59759ff6 | 1601 | .name = KBUILD_MODNAME, |
f91e3bd8 | 1602 | .bus = &fw_bus_type, |
c76acec6 | 1603 | }, |
94a87157 | 1604 | .probe = fwnet_probe, |
f91e3bd8 | 1605 | .update = fwnet_update, |
94a87157 | 1606 | .remove = fwnet_remove, |
f91e3bd8 SR |
1607 | .id_table = fwnet_id_table, |
1608 | }; | |
1609 | ||
1610 | static const u32 rfc2374_unit_directory_data[] = { | |
1611 | 0x00040000, /* directory_length */ | |
1612 | 0x1200005e, /* unit_specifier_id: IANA */ | |
1613 | 0x81000003, /* textual descriptor offset */ | |
1614 | 0x13000001, /* unit_sw_version: RFC 2734 */ | |
1615 | 0x81000005, /* textual descriptor offset */ | |
1616 | 0x00030000, /* descriptor_length */ | |
1617 | 0x00000000, /* text */ | |
1618 | 0x00000000, /* minimal ASCII, en */ | |
1619 | 0x49414e41, /* I A N A */ | |
1620 | 0x00030000, /* descriptor_length */ | |
1621 | 0x00000000, /* text */ | |
1622 | 0x00000000, /* minimal ASCII, en */ | |
1623 | 0x49507634, /* I P v 4 */ | |
1624 | }; | |
1625 | ||
1626 | static struct fw_descriptor rfc2374_unit_directory = { | |
1627 | .length = ARRAY_SIZE(rfc2374_unit_directory_data), | |
1628 | .key = (CSR_DIRECTORY | CSR_UNIT) << 24, | |
1629 | .data = rfc2374_unit_directory_data | |
c76acec6 JF |
1630 | }; |
1631 | ||
cb6bf355 YH |
1632 | #if IS_ENABLED(CONFIG_IPV6) |
1633 | static const u32 rfc3146_unit_directory_data[] = { | |
1634 | 0x00040000, /* directory_length */ | |
1635 | 0x1200005e, /* unit_specifier_id: IANA */ | |
1636 | 0x81000003, /* textual descriptor offset */ | |
1637 | 0x13000002, /* unit_sw_version: RFC 3146 */ | |
1638 | 0x81000005, /* textual descriptor offset */ | |
1639 | 0x00030000, /* descriptor_length */ | |
1640 | 0x00000000, /* text */ | |
1641 | 0x00000000, /* minimal ASCII, en */ | |
1642 | 0x49414e41, /* I A N A */ | |
1643 | 0x00030000, /* descriptor_length */ | |
1644 | 0x00000000, /* text */ | |
1645 | 0x00000000, /* minimal ASCII, en */ | |
1646 | 0x49507636, /* I P v 6 */ | |
1647 | }; | |
1648 | ||
1649 | static struct fw_descriptor rfc3146_unit_directory = { | |
1650 | .length = ARRAY_SIZE(rfc3146_unit_directory_data), | |
1651 | .key = (CSR_DIRECTORY | CSR_UNIT) << 24, | |
1652 | .data = rfc3146_unit_directory_data | |
1653 | }; | |
1654 | #endif | |
1655 | ||
f91e3bd8 SR |
1656 | static int __init fwnet_init(void) |
1657 | { | |
1658 | int err; | |
1659 | ||
1660 | err = fw_core_add_descriptor(&rfc2374_unit_directory); | |
1661 | if (err) | |
1662 | return err; | |
c76acec6 | 1663 | |
cb6bf355 YH |
1664 | #if IS_ENABLED(CONFIG_IPV6) |
1665 | err = fw_core_add_descriptor(&rfc3146_unit_directory); | |
1666 | if (err) | |
1667 | goto out; | |
1668 | #endif | |
1669 | ||
f91e3bd8 SR |
1670 | fwnet_packet_task_cache = kmem_cache_create("packet_task", |
1671 | sizeof(struct fwnet_packet_task), 0, 0, NULL); | |
1672 | if (!fwnet_packet_task_cache) { | |
1673 | err = -ENOMEM; | |
cb6bf355 | 1674 | goto out2; |
f91e3bd8 SR |
1675 | } |
1676 | ||
1677 | err = driver_register(&fwnet_driver.driver); | |
1678 | if (!err) | |
1679 | return 0; | |
1680 | ||
1681 | kmem_cache_destroy(fwnet_packet_task_cache); | |
cb6bf355 YH |
1682 | out2: |
1683 | #if IS_ENABLED(CONFIG_IPV6) | |
1684 | fw_core_remove_descriptor(&rfc3146_unit_directory); | |
f91e3bd8 | 1685 | out: |
cb6bf355 | 1686 | #endif |
f91e3bd8 SR |
1687 | fw_core_remove_descriptor(&rfc2374_unit_directory); |
1688 | ||
1689 | return err; | |
c76acec6 | 1690 | } |
f91e3bd8 | 1691 | module_init(fwnet_init); |
c76acec6 | 1692 | |
f91e3bd8 SR |
1693 | static void __exit fwnet_cleanup(void) |
1694 | { | |
1695 | driver_unregister(&fwnet_driver.driver); | |
1696 | kmem_cache_destroy(fwnet_packet_task_cache); | |
cb6bf355 YH |
1697 | #if IS_ENABLED(CONFIG_IPV6) |
1698 | fw_core_remove_descriptor(&rfc3146_unit_directory); | |
1699 | #endif | |
f91e3bd8 | 1700 | fw_core_remove_descriptor(&rfc2374_unit_directory); |
c76acec6 | 1701 | } |
f91e3bd8 | 1702 | module_exit(fwnet_cleanup); |
c76acec6 | 1703 | |
f91e3bd8 | 1704 | MODULE_AUTHOR("Jay Fenlason <fenlason@redhat.com>"); |
cb6bf355 | 1705 | MODULE_DESCRIPTION("IP over IEEE1394 as per RFC 2734/3146"); |
f91e3bd8 SR |
1706 | MODULE_LICENSE("GPL"); |
1707 | MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table); |