Commit | Line | Data |
---|---|---|
4d22de3e | 1 | /* |
a02d44a0 | 2 | * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. |
4d22de3e DLR |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | #include <linux/skbuff.h> | |
33 | #include <linux/netdevice.h> | |
34 | #include <linux/if.h> | |
35 | #include <linux/if_vlan.h> | |
36 | #include <linux/jhash.h> | |
37 | #include <net/neighbour.h> | |
38 | #include "common.h" | |
39 | #include "t3cdev.h" | |
40 | #include "cxgb3_defs.h" | |
41 | #include "l2t.h" | |
42 | #include "t3_cpl.h" | |
43 | #include "firmware_exports.h" | |
44 | ||
45 | #define VLAN_NONE 0xfff | |
46 | ||
47 | /* | |
48 | * Module locking notes: There is a RW lock protecting the L2 table as a | |
49 | * whole plus a spinlock per L2T entry. Entry lookups and allocations happen | |
50 | * under the protection of the table lock, individual entry changes happen | |
51 | * while holding that entry's spinlock. The table lock nests outside the | |
52 | * entry locks. Allocations of new entries take the table lock as writers so | |
53 | * no other lookups can happen while allocating new entries. Entry updates | |
54 | * take the table lock as readers so multiple entries can be updated in | |
55 | * parallel. An L2T entry can be dropped by decrementing its reference count | |
56 | * and therefore can happen in parallel with entry allocation but no entry | |
57 | * can change state or increment its ref count during allocation as both of | |
58 | * these perform lookups. | |
59 | */ | |
60 | ||
61 | static inline unsigned int vlan_prio(const struct l2t_entry *e) | |
62 | { | |
63 | return e->vlan >> 13; | |
64 | } | |
65 | ||
66 | static inline unsigned int arp_hash(u32 key, int ifindex, | |
67 | const struct l2t_data *d) | |
68 | { | |
69 | return jhash_2words(key, ifindex, 0) & (d->nentries - 1); | |
70 | } | |
71 | ||
72 | static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n) | |
73 | { | |
74 | neigh_hold(n); | |
75 | if (e->neigh) | |
76 | neigh_release(e->neigh); | |
77 | e->neigh = n; | |
78 | } | |
79 | ||
80 | /* | |
81 | * Set up an L2T entry and send any packets waiting in the arp queue. The | |
82 | * supplied skb is used for the CPL_L2T_WRITE_REQ. Must be called with the | |
83 | * entry locked. | |
84 | */ | |
85 | static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb, | |
86 | struct l2t_entry *e) | |
87 | { | |
88 | struct cpl_l2t_write_req *req; | |
147e70e6 | 89 | struct sk_buff *tmp; |
4d22de3e DLR |
90 | |
91 | if (!skb) { | |
92 | skb = alloc_skb(sizeof(*req), GFP_ATOMIC); | |
93 | if (!skb) | |
94 | return -ENOMEM; | |
95 | } | |
96 | ||
97 | req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); | |
98 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
99 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx)); | |
100 | req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) | | |
101 | V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) | | |
102 | V_L2T_W_PRIO(vlan_prio(e))); | |
103 | memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); | |
104 | memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); | |
105 | skb->priority = CPL_PRIORITY_CONTROL; | |
106 | cxgb3_ofld_send(dev, skb); | |
147e70e6 DM |
107 | |
108 | skb_queue_walk_safe(&e->arpq, skb, tmp) { | |
109 | __skb_unlink(skb, &e->arpq); | |
4d22de3e DLR |
110 | cxgb3_ofld_send(dev, skb); |
111 | } | |
4d22de3e DLR |
112 | e->state = L2T_STATE_VALID; |
113 | ||
114 | return 0; | |
115 | } | |
116 | ||
117 | /* | |
118 | * Add a packet to the an L2T entry's queue of packets awaiting resolution. | |
119 | * Must be called with the entry's lock held. | |
120 | */ | |
121 | static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) | |
122 | { | |
147e70e6 | 123 | __skb_queue_tail(&e->arpq, skb); |
4d22de3e DLR |
124 | } |
125 | ||
126 | int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb, | |
127 | struct l2t_entry *e) | |
128 | { | |
129 | again: | |
130 | switch (e->state) { | |
131 | case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ | |
132 | neigh_event_send(e->neigh, NULL); | |
133 | spin_lock_bh(&e->lock); | |
134 | if (e->state == L2T_STATE_STALE) | |
135 | e->state = L2T_STATE_VALID; | |
136 | spin_unlock_bh(&e->lock); | |
137 | case L2T_STATE_VALID: /* fast-path, send the packet on */ | |
138 | return cxgb3_ofld_send(dev, skb); | |
139 | case L2T_STATE_RESOLVING: | |
140 | spin_lock_bh(&e->lock); | |
141 | if (e->state != L2T_STATE_RESOLVING) { | |
142 | /* ARP already completed */ | |
143 | spin_unlock_bh(&e->lock); | |
144 | goto again; | |
145 | } | |
146 | arpq_enqueue(e, skb); | |
147 | spin_unlock_bh(&e->lock); | |
148 | ||
149 | /* | |
150 | * Only the first packet added to the arpq should kick off | |
151 | * resolution. However, because the alloc_skb below can fail, | |
152 | * we allow each packet added to the arpq to retry resolution | |
153 | * as a way of recovering from transient memory exhaustion. | |
154 | * A better way would be to use a work request to retry L2T | |
155 | * entries when there's no memory. | |
156 | */ | |
157 | if (!neigh_event_send(e->neigh, NULL)) { | |
158 | skb = alloc_skb(sizeof(struct cpl_l2t_write_req), | |
159 | GFP_ATOMIC); | |
160 | if (!skb) | |
161 | break; | |
162 | ||
163 | spin_lock_bh(&e->lock); | |
147e70e6 | 164 | if (!skb_queue_empty(&e->arpq)) |
4d22de3e DLR |
165 | setup_l2e_send_pending(dev, skb, e); |
166 | else /* we lost the race */ | |
167 | __kfree_skb(skb); | |
168 | spin_unlock_bh(&e->lock); | |
169 | } | |
170 | } | |
171 | return 0; | |
172 | } | |
173 | ||
174 | EXPORT_SYMBOL(t3_l2t_send_slow); | |
175 | ||
176 | void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e) | |
177 | { | |
178 | again: | |
179 | switch (e->state) { | |
180 | case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ | |
181 | neigh_event_send(e->neigh, NULL); | |
182 | spin_lock_bh(&e->lock); | |
183 | if (e->state == L2T_STATE_STALE) { | |
184 | e->state = L2T_STATE_VALID; | |
185 | } | |
186 | spin_unlock_bh(&e->lock); | |
187 | return; | |
188 | case L2T_STATE_VALID: /* fast-path, send the packet on */ | |
189 | return; | |
190 | case L2T_STATE_RESOLVING: | |
191 | spin_lock_bh(&e->lock); | |
192 | if (e->state != L2T_STATE_RESOLVING) { | |
193 | /* ARP already completed */ | |
194 | spin_unlock_bh(&e->lock); | |
195 | goto again; | |
196 | } | |
197 | spin_unlock_bh(&e->lock); | |
198 | ||
199 | /* | |
200 | * Only the first packet added to the arpq should kick off | |
201 | * resolution. However, because the alloc_skb below can fail, | |
202 | * we allow each packet added to the arpq to retry resolution | |
203 | * as a way of recovering from transient memory exhaustion. | |
204 | * A better way would be to use a work request to retry L2T | |
205 | * entries when there's no memory. | |
206 | */ | |
207 | neigh_event_send(e->neigh, NULL); | |
208 | } | |
209 | return; | |
210 | } | |
211 | ||
212 | EXPORT_SYMBOL(t3_l2t_send_event); | |
213 | ||
214 | /* | |
215 | * Allocate a free L2T entry. Must be called with l2t_data.lock held. | |
216 | */ | |
217 | static struct l2t_entry *alloc_l2e(struct l2t_data *d) | |
218 | { | |
219 | struct l2t_entry *end, *e, **p; | |
220 | ||
221 | if (!atomic_read(&d->nfree)) | |
222 | return NULL; | |
223 | ||
224 | /* there's definitely a free entry */ | |
225 | for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e) | |
226 | if (atomic_read(&e->refcnt) == 0) | |
227 | goto found; | |
228 | ||
229 | for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ; | |
230 | found: | |
231 | d->rover = e + 1; | |
232 | atomic_dec(&d->nfree); | |
233 | ||
234 | /* | |
235 | * The entry we found may be an inactive entry that is | |
236 | * presently in the hash table. We need to remove it. | |
237 | */ | |
238 | if (e->state != L2T_STATE_UNUSED) { | |
239 | int hash = arp_hash(e->addr, e->ifindex, d); | |
240 | ||
241 | for (p = &d->l2tab[hash].first; *p; p = &(*p)->next) | |
242 | if (*p == e) { | |
243 | *p = e->next; | |
244 | break; | |
245 | } | |
246 | e->state = L2T_STATE_UNUSED; | |
247 | } | |
248 | return e; | |
249 | } | |
250 | ||
251 | /* | |
252 | * Called when an L2T entry has no more users. The entry is left in the hash | |
253 | * table since it is likely to be reused but we also bump nfree to indicate | |
254 | * that the entry can be reallocated for a different neighbor. We also drop | |
255 | * the existing neighbor reference in case the neighbor is going away and is | |
256 | * waiting on our reference. | |
257 | * | |
258 | * Because entries can be reallocated to other neighbors once their ref count | |
259 | * drops to 0 we need to take the entry's lock to avoid races with a new | |
260 | * incarnation. | |
261 | */ | |
262 | void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e) | |
263 | { | |
264 | spin_lock_bh(&e->lock); | |
265 | if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ | |
266 | if (e->neigh) { | |
267 | neigh_release(e->neigh); | |
268 | e->neigh = NULL; | |
269 | } | |
270 | } | |
271 | spin_unlock_bh(&e->lock); | |
272 | atomic_inc(&d->nfree); | |
273 | } | |
274 | ||
275 | EXPORT_SYMBOL(t3_l2e_free); | |
276 | ||
277 | /* | |
278 | * Update an L2T entry that was previously used for the same next hop as neigh. | |
279 | * Must be called with softirqs disabled. | |
280 | */ | |
281 | static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) | |
282 | { | |
283 | unsigned int nud_state; | |
284 | ||
285 | spin_lock(&e->lock); /* avoid race with t3_l2t_free */ | |
286 | ||
287 | if (neigh != e->neigh) | |
288 | neigh_replace(e, neigh); | |
289 | nud_state = neigh->nud_state; | |
290 | if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) || | |
291 | !(nud_state & NUD_VALID)) | |
292 | e->state = L2T_STATE_RESOLVING; | |
293 | else if (nud_state & NUD_CONNECTED) | |
294 | e->state = L2T_STATE_VALID; | |
295 | else | |
296 | e->state = L2T_STATE_STALE; | |
297 | spin_unlock(&e->lock); | |
298 | } | |
299 | ||
300 | struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, | |
301 | struct net_device *dev) | |
302 | { | |
303 | struct l2t_entry *e; | |
304 | struct l2t_data *d = L2DATA(cdev); | |
305 | u32 addr = *(u32 *) neigh->primary_key; | |
306 | int ifidx = neigh->dev->ifindex; | |
307 | int hash = arp_hash(addr, ifidx, d); | |
308 | struct port_info *p = netdev_priv(dev); | |
309 | int smt_idx = p->port_id; | |
310 | ||
311 | write_lock_bh(&d->lock); | |
312 | for (e = d->l2tab[hash].first; e; e = e->next) | |
313 | if (e->addr == addr && e->ifindex == ifidx && | |
314 | e->smt_idx == smt_idx) { | |
315 | l2t_hold(d, e); | |
316 | if (atomic_read(&e->refcnt) == 1) | |
317 | reuse_entry(e, neigh); | |
318 | goto done; | |
319 | } | |
320 | ||
321 | /* Need to allocate a new entry */ | |
322 | e = alloc_l2e(d); | |
323 | if (e) { | |
324 | spin_lock(&e->lock); /* avoid race with t3_l2t_free */ | |
325 | e->next = d->l2tab[hash].first; | |
326 | d->l2tab[hash].first = e; | |
327 | e->state = L2T_STATE_RESOLVING; | |
328 | e->addr = addr; | |
329 | e->ifindex = ifidx; | |
330 | e->smt_idx = smt_idx; | |
331 | atomic_set(&e->refcnt, 1); | |
332 | neigh_replace(e, neigh); | |
333 | if (neigh->dev->priv_flags & IFF_802_1Q_VLAN) | |
22d1ba74 | 334 | e->vlan = vlan_dev_vlan_id(neigh->dev); |
4d22de3e DLR |
335 | else |
336 | e->vlan = VLAN_NONE; | |
337 | spin_unlock(&e->lock); | |
338 | } | |
339 | done: | |
340 | write_unlock_bh(&d->lock); | |
341 | return e; | |
342 | } | |
343 | ||
344 | EXPORT_SYMBOL(t3_l2t_get); | |
345 | ||
346 | /* | |
347 | * Called when address resolution fails for an L2T entry to handle packets | |
348 | * on the arpq head. If a packet specifies a failure handler it is invoked, | |
349 | * otherwise the packets is sent to the offload device. | |
350 | * | |
351 | * XXX: maybe we should abandon the latter behavior and just require a failure | |
352 | * handler. | |
353 | */ | |
147e70e6 | 354 | static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff_head *arpq) |
4d22de3e | 355 | { |
147e70e6 DM |
356 | struct sk_buff *skb, *tmp; |
357 | ||
358 | skb_queue_walk_safe(arpq, skb, tmp) { | |
4d22de3e DLR |
359 | struct l2t_skb_cb *cb = L2T_SKB_CB(skb); |
360 | ||
147e70e6 | 361 | __skb_unlink(skb, arpq); |
4d22de3e DLR |
362 | if (cb->arp_failure_handler) |
363 | cb->arp_failure_handler(dev, skb); | |
364 | else | |
365 | cxgb3_ofld_send(dev, skb); | |
366 | } | |
367 | } | |
368 | ||
369 | /* | |
370 | * Called when the host's ARP layer makes a change to some entry that is | |
371 | * loaded into the HW L2 table. | |
372 | */ | |
373 | void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh) | |
374 | { | |
147e70e6 | 375 | struct sk_buff_head arpq; |
4d22de3e | 376 | struct l2t_entry *e; |
4d22de3e DLR |
377 | struct l2t_data *d = L2DATA(dev); |
378 | u32 addr = *(u32 *) neigh->primary_key; | |
379 | int ifidx = neigh->dev->ifindex; | |
380 | int hash = arp_hash(addr, ifidx, d); | |
381 | ||
382 | read_lock_bh(&d->lock); | |
383 | for (e = d->l2tab[hash].first; e; e = e->next) | |
384 | if (e->addr == addr && e->ifindex == ifidx) { | |
385 | spin_lock(&e->lock); | |
386 | goto found; | |
387 | } | |
388 | read_unlock_bh(&d->lock); | |
389 | return; | |
390 | ||
391 | found: | |
147e70e6 DM |
392 | __skb_queue_head_init(&arpq); |
393 | ||
4d22de3e DLR |
394 | read_unlock(&d->lock); |
395 | if (atomic_read(&e->refcnt)) { | |
396 | if (neigh != e->neigh) | |
397 | neigh_replace(e, neigh); | |
398 | ||
399 | if (e->state == L2T_STATE_RESOLVING) { | |
400 | if (neigh->nud_state & NUD_FAILED) { | |
147e70e6 | 401 | skb_queue_splice_init(&e->arpq, &arpq); |
4eb61e02 | 402 | } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE)) |
4d22de3e DLR |
403 | setup_l2e_send_pending(dev, NULL, e); |
404 | } else { | |
8082c37c | 405 | e->state = neigh->nud_state & NUD_CONNECTED ? |
4d22de3e DLR |
406 | L2T_STATE_VALID : L2T_STATE_STALE; |
407 | if (memcmp(e->dmac, neigh->ha, 6)) | |
408 | setup_l2e_send_pending(dev, NULL, e); | |
409 | } | |
410 | } | |
411 | spin_unlock_bh(&e->lock); | |
412 | ||
147e70e6 DM |
413 | if (!skb_queue_empty(&arpq)) |
414 | handle_failed_resolution(dev, &arpq); | |
4d22de3e DLR |
415 | } |
416 | ||
417 | struct l2t_data *t3_init_l2t(unsigned int l2t_capacity) | |
418 | { | |
419 | struct l2t_data *d; | |
420 | int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry); | |
421 | ||
422 | d = cxgb_alloc_mem(size); | |
423 | if (!d) | |
424 | return NULL; | |
425 | ||
426 | d->nentries = l2t_capacity; | |
427 | d->rover = &d->l2tab[1]; /* entry 0 is not used */ | |
428 | atomic_set(&d->nfree, l2t_capacity - 1); | |
429 | rwlock_init(&d->lock); | |
430 | ||
431 | for (i = 0; i < l2t_capacity; ++i) { | |
432 | d->l2tab[i].idx = i; | |
433 | d->l2tab[i].state = L2T_STATE_UNUSED; | |
6d329af9 | 434 | __skb_queue_head_init(&d->l2tab[i].arpq); |
4d22de3e DLR |
435 | spin_lock_init(&d->l2tab[i].lock); |
436 | atomic_set(&d->l2tab[i].refcnt, 0); | |
437 | } | |
438 | return d; | |
439 | } | |
440 | ||
441 | void t3_free_l2t(struct l2t_data *d) | |
442 | { | |
443 | cxgb_free_mem(d); | |
444 | } | |
445 |