Commit | Line | Data |
---|---|---|
625ba2c2 DM |
1 | /* |
2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. | |
3 | * | |
ce100b8b | 4 | * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. |
625ba2c2 DM |
5 | * |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | */ | |
34 | ||
35 | #include <linux/skbuff.h> | |
36 | #include <linux/netdevice.h> | |
37 | #include <linux/if.h> | |
38 | #include <linux/if_vlan.h> | |
39 | #include <linux/jhash.h> | |
310587c3 PG |
40 | #include <linux/module.h> |
41 | #include <linux/debugfs.h> | |
42 | #include <linux/seq_file.h> | |
625ba2c2 DM |
43 | #include <net/neighbour.h> |
44 | #include "cxgb4.h" | |
45 | #include "l2t.h" | |
46 | #include "t4_msg.h" | |
47 | #include "t4fw_api.h" | |
dcf7b6f5 | 48 | #include "t4_regs.h" |
0d804338 | 49 | #include "t4_values.h" |
625ba2c2 DM |
50 | |
51 | #define VLAN_NONE 0xfff | |
52 | ||
53 | /* identifies sync vs async L2T_WRITE_REQs */ | |
54 | #define F_SYNC_WR (1 << 12) | |
55 | ||
56 | enum { | |
57 | L2T_STATE_VALID, /* entry is up to date */ | |
58 | L2T_STATE_STALE, /* entry may be used but needs revalidation */ | |
59 | L2T_STATE_RESOLVING, /* entry needs address resolution */ | |
60 | L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */ | |
61 | ||
62 | /* when state is one of the below the entry is not hashed */ | |
63 | L2T_STATE_SWITCHING, /* entry is being used by a switching filter */ | |
64 | L2T_STATE_UNUSED /* entry not in use */ | |
65 | }; | |
66 | ||
67 | struct l2t_data { | |
68 | rwlock_t lock; | |
69 | atomic_t nfree; /* number of free entries */ | |
70 | struct l2t_entry *rover; /* starting point for next allocation */ | |
71 | struct l2t_entry l2tab[L2T_SIZE]; | |
72 | }; | |
73 | ||
74 | static inline unsigned int vlan_prio(const struct l2t_entry *e) | |
75 | { | |
76 | return e->vlan >> 13; | |
77 | } | |
78 | ||
79 | static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) | |
80 | { | |
81 | if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ | |
82 | atomic_dec(&d->nfree); | |
83 | } | |
84 | ||
85 | /* | |
86 | * To avoid having to check address families we do not allow v4 and v6 | |
87 | * neighbors to be on the same hash chain. We keep v4 entries in the first | |
88 | * half of available hash buckets and v6 in the second. | |
89 | */ | |
90 | enum { | |
91 | L2T_SZ_HALF = L2T_SIZE / 2, | |
92 | L2T_HASH_MASK = L2T_SZ_HALF - 1 | |
93 | }; | |
94 | ||
95 | static inline unsigned int arp_hash(const u32 *key, int ifindex) | |
96 | { | |
97 | return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK; | |
98 | } | |
99 | ||
100 | static inline unsigned int ipv6_hash(const u32 *key, int ifindex) | |
101 | { | |
102 | u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3]; | |
103 | ||
104 | return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK); | |
105 | } | |
106 | ||
107 | static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex) | |
108 | { | |
109 | return addr_len == 4 ? arp_hash(addr, ifindex) : | |
110 | ipv6_hash(addr, ifindex); | |
111 | } | |
112 | ||
113 | /* | |
114 | * Checks if an L2T entry is for the given IP/IPv6 address. It does not check | |
115 | * whether the L2T entry and the address are of the same address family. | |
116 | * Callers ensure an address is only checked against L2T entries of the same | |
117 | * family, something made trivial by the separation of IP and IPv6 hash chains | |
118 | * mentioned above. Returns 0 if there's a match, | |
119 | */ | |
120 | static int addreq(const struct l2t_entry *e, const u32 *addr) | |
121 | { | |
122 | if (e->v6) | |
123 | return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) | | |
124 | (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]); | |
125 | return e->addr[0] ^ addr[0]; | |
126 | } | |
127 | ||
128 | static void neigh_replace(struct l2t_entry *e, struct neighbour *n) | |
129 | { | |
130 | neigh_hold(n); | |
131 | if (e->neigh) | |
132 | neigh_release(e->neigh); | |
133 | e->neigh = n; | |
134 | } | |
135 | ||
136 | /* | |
137 | * Write an L2T entry. Must be called with the entry locked. | |
138 | * The write may be synchronous or asynchronous. | |
139 | */ | |
140 | static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) | |
141 | { | |
142 | struct sk_buff *skb; | |
143 | struct cpl_l2t_write_req *req; | |
144 | ||
145 | skb = alloc_skb(sizeof(*req), GFP_ATOMIC); | |
146 | if (!skb) | |
147 | return -ENOMEM; | |
148 | ||
149 | req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); | |
150 | INIT_TP_WR(req, 0); | |
151 | ||
152 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, | |
153 | e->idx | (sync ? F_SYNC_WR : 0) | | |
6c53e938 | 154 | TID_QID_V(adap->sge.fw_evtq.abs_id))); |
bdc590b9 | 155 | req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync)); |
625ba2c2 DM |
156 | req->l2t_idx = htons(e->idx); |
157 | req->vlan = htons(e->vlan); | |
bfae2324 | 158 | if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK)) |
625ba2c2 DM |
159 | memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); |
160 | memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); | |
161 | ||
162 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); | |
163 | t4_ofld_send(adap, skb); | |
164 | ||
165 | if (sync && e->state != L2T_STATE_SWITCHING) | |
166 | e->state = L2T_STATE_SYNC_WRITE; | |
167 | return 0; | |
168 | } | |
169 | ||
170 | /* | |
171 | * Send packets waiting in an L2T entry's ARP queue. Must be called with the | |
172 | * entry locked. | |
173 | */ | |
174 | static void send_pending(struct adapter *adap, struct l2t_entry *e) | |
175 | { | |
176 | while (e->arpq_head) { | |
177 | struct sk_buff *skb = e->arpq_head; | |
178 | ||
179 | e->arpq_head = skb->next; | |
180 | skb->next = NULL; | |
181 | t4_ofld_send(adap, skb); | |
182 | } | |
183 | e->arpq_tail = NULL; | |
184 | } | |
185 | ||
186 | /* | |
187 | * Process a CPL_L2T_WRITE_RPL. Wake up the ARP queue if it completes a | |
188 | * synchronous L2T_WRITE. Note that the TID in the reply is really the L2T | |
189 | * index it refers to. | |
190 | */ | |
191 | void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl) | |
192 | { | |
193 | unsigned int tid = GET_TID(rpl); | |
194 | unsigned int idx = tid & (L2T_SIZE - 1); | |
195 | ||
196 | if (unlikely(rpl->status != CPL_ERR_NONE)) { | |
197 | dev_err(adap->pdev_dev, | |
198 | "Unexpected L2T_WRITE_RPL status %u for entry %u\n", | |
199 | rpl->status, idx); | |
200 | return; | |
201 | } | |
202 | ||
203 | if (tid & F_SYNC_WR) { | |
204 | struct l2t_entry *e = &adap->l2t->l2tab[idx]; | |
205 | ||
206 | spin_lock(&e->lock); | |
207 | if (e->state != L2T_STATE_SWITCHING) { | |
208 | send_pending(adap, e); | |
209 | e->state = (e->neigh->nud_state & NUD_STALE) ? | |
210 | L2T_STATE_STALE : L2T_STATE_VALID; | |
211 | } | |
212 | spin_unlock(&e->lock); | |
213 | } | |
214 | } | |
215 | ||
216 | /* | |
217 | * Add a packet to an L2T entry's queue of packets awaiting resolution. | |
218 | * Must be called with the entry's lock held. | |
219 | */ | |
220 | static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) | |
221 | { | |
222 | skb->next = NULL; | |
223 | if (e->arpq_head) | |
224 | e->arpq_tail->next = skb; | |
225 | else | |
226 | e->arpq_head = skb; | |
227 | e->arpq_tail = skb; | |
228 | } | |
229 | ||
230 | int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, | |
231 | struct l2t_entry *e) | |
232 | { | |
233 | struct adapter *adap = netdev2adap(dev); | |
234 | ||
235 | again: | |
236 | switch (e->state) { | |
237 | case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ | |
238 | neigh_event_send(e->neigh, NULL); | |
239 | spin_lock_bh(&e->lock); | |
240 | if (e->state == L2T_STATE_STALE) | |
241 | e->state = L2T_STATE_VALID; | |
242 | spin_unlock_bh(&e->lock); | |
243 | case L2T_STATE_VALID: /* fast-path, send the packet on */ | |
244 | return t4_ofld_send(adap, skb); | |
245 | case L2T_STATE_RESOLVING: | |
246 | case L2T_STATE_SYNC_WRITE: | |
247 | spin_lock_bh(&e->lock); | |
248 | if (e->state != L2T_STATE_SYNC_WRITE && | |
249 | e->state != L2T_STATE_RESOLVING) { | |
250 | spin_unlock_bh(&e->lock); | |
251 | goto again; | |
252 | } | |
253 | arpq_enqueue(e, skb); | |
254 | spin_unlock_bh(&e->lock); | |
255 | ||
256 | if (e->state == L2T_STATE_RESOLVING && | |
257 | !neigh_event_send(e->neigh, NULL)) { | |
258 | spin_lock_bh(&e->lock); | |
259 | if (e->state == L2T_STATE_RESOLVING && e->arpq_head) | |
260 | write_l2e(adap, e, 1); | |
261 | spin_unlock_bh(&e->lock); | |
262 | } | |
263 | } | |
264 | return 0; | |
265 | } | |
266 | EXPORT_SYMBOL(cxgb4_l2t_send); | |
267 | ||
268 | /* | |
269 | * Allocate a free L2T entry. Must be called with l2t_data.lock held. | |
270 | */ | |
271 | static struct l2t_entry *alloc_l2e(struct l2t_data *d) | |
272 | { | |
273 | struct l2t_entry *end, *e, **p; | |
274 | ||
275 | if (!atomic_read(&d->nfree)) | |
276 | return NULL; | |
277 | ||
278 | /* there's definitely a free entry */ | |
279 | for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e) | |
280 | if (atomic_read(&e->refcnt) == 0) | |
281 | goto found; | |
282 | ||
283 | for (e = d->l2tab; atomic_read(&e->refcnt); ++e) | |
284 | ; | |
285 | found: | |
286 | d->rover = e + 1; | |
287 | atomic_dec(&d->nfree); | |
288 | ||
289 | /* | |
290 | * The entry we found may be an inactive entry that is | |
291 | * presently in the hash table. We need to remove it. | |
292 | */ | |
293 | if (e->state < L2T_STATE_SWITCHING) | |
294 | for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) | |
295 | if (*p == e) { | |
296 | *p = e->next; | |
297 | e->next = NULL; | |
298 | break; | |
299 | } | |
300 | ||
301 | e->state = L2T_STATE_UNUSED; | |
302 | return e; | |
303 | } | |
304 | ||
305 | /* | |
306 | * Called when an L2T entry has no more users. | |
307 | */ | |
308 | static void t4_l2e_free(struct l2t_entry *e) | |
309 | { | |
310 | struct l2t_data *d; | |
311 | ||
312 | spin_lock_bh(&e->lock); | |
313 | if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ | |
314 | if (e->neigh) { | |
315 | neigh_release(e->neigh); | |
316 | e->neigh = NULL; | |
317 | } | |
204dc3c0 DM |
318 | while (e->arpq_head) { |
319 | struct sk_buff *skb = e->arpq_head; | |
320 | ||
321 | e->arpq_head = skb->next; | |
05eda04b | 322 | kfree_skb(skb); |
204dc3c0 DM |
323 | } |
324 | e->arpq_tail = NULL; | |
625ba2c2 DM |
325 | } |
326 | spin_unlock_bh(&e->lock); | |
327 | ||
328 | d = container_of(e, struct l2t_data, l2tab[e->idx]); | |
329 | atomic_inc(&d->nfree); | |
330 | } | |
331 | ||
332 | void cxgb4_l2t_release(struct l2t_entry *e) | |
333 | { | |
334 | if (atomic_dec_and_test(&e->refcnt)) | |
335 | t4_l2e_free(e); | |
336 | } | |
337 | EXPORT_SYMBOL(cxgb4_l2t_release); | |
338 | ||
339 | /* | |
340 | * Update an L2T entry that was previously used for the same next hop as neigh. | |
341 | * Must be called with softirqs disabled. | |
342 | */ | |
343 | static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) | |
344 | { | |
345 | unsigned int nud_state; | |
346 | ||
347 | spin_lock(&e->lock); /* avoid race with t4_l2t_free */ | |
348 | if (neigh != e->neigh) | |
349 | neigh_replace(e, neigh); | |
350 | nud_state = neigh->nud_state; | |
351 | if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) || | |
352 | !(nud_state & NUD_VALID)) | |
353 | e->state = L2T_STATE_RESOLVING; | |
354 | else if (nud_state & NUD_CONNECTED) | |
355 | e->state = L2T_STATE_VALID; | |
356 | else | |
357 | e->state = L2T_STATE_STALE; | |
358 | spin_unlock(&e->lock); | |
359 | } | |
360 | ||
361 | struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, | |
362 | const struct net_device *physdev, | |
363 | unsigned int priority) | |
364 | { | |
365 | u8 lport; | |
366 | u16 vlan; | |
367 | struct l2t_entry *e; | |
368 | int addr_len = neigh->tbl->key_len; | |
369 | u32 *addr = (u32 *)neigh->primary_key; | |
370 | int ifidx = neigh->dev->ifindex; | |
371 | int hash = addr_hash(addr, addr_len, ifidx); | |
372 | ||
373 | if (neigh->dev->flags & IFF_LOOPBACK) | |
374 | lport = netdev2pinfo(physdev)->tx_chan + 4; | |
375 | else | |
376 | lport = netdev2pinfo(physdev)->lport; | |
377 | ||
378 | if (neigh->dev->priv_flags & IFF_802_1Q_VLAN) | |
379 | vlan = vlan_dev_vlan_id(neigh->dev); | |
380 | else | |
381 | vlan = VLAN_NONE; | |
382 | ||
383 | write_lock_bh(&d->lock); | |
384 | for (e = d->l2tab[hash].first; e; e = e->next) | |
385 | if (!addreq(e, addr) && e->ifindex == ifidx && | |
386 | e->vlan == vlan && e->lport == lport) { | |
387 | l2t_hold(d, e); | |
388 | if (atomic_read(&e->refcnt) == 1) | |
389 | reuse_entry(e, neigh); | |
390 | goto done; | |
391 | } | |
392 | ||
393 | /* Need to allocate a new entry */ | |
394 | e = alloc_l2e(d); | |
395 | if (e) { | |
396 | spin_lock(&e->lock); /* avoid race with t4_l2t_free */ | |
397 | e->state = L2T_STATE_RESOLVING; | |
bfae2324 SW |
398 | if (neigh->dev->flags & IFF_LOOPBACK) |
399 | memcpy(e->dmac, physdev->dev_addr, sizeof(e->dmac)); | |
625ba2c2 DM |
400 | memcpy(e->addr, addr, addr_len); |
401 | e->ifindex = ifidx; | |
402 | e->hash = hash; | |
403 | e->lport = lport; | |
404 | e->v6 = addr_len == 16; | |
405 | atomic_set(&e->refcnt, 1); | |
406 | neigh_replace(e, neigh); | |
407 | e->vlan = vlan; | |
408 | e->next = d->l2tab[hash].first; | |
409 | d->l2tab[hash].first = e; | |
410 | spin_unlock(&e->lock); | |
411 | } | |
412 | done: | |
413 | write_unlock_bh(&d->lock); | |
414 | return e; | |
415 | } | |
416 | EXPORT_SYMBOL(cxgb4_l2t_get); | |
417 | ||
dcf7b6f5 KS |
418 | u64 cxgb4_select_ntuple(struct net_device *dev, |
419 | const struct l2t_entry *l2t) | |
420 | { | |
421 | struct adapter *adap = netdev2adap(dev); | |
422 | struct tp_params *tp = &adap->params.tp; | |
423 | u64 ntuple = 0; | |
424 | ||
425 | /* Initialize each of the fields which we care about which are present | |
426 | * in the Compressed Filter Tuple. | |
427 | */ | |
428 | if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE) | |
0d804338 | 429 | ntuple |= (u64)(FT_VLAN_VLD_F | l2t->vlan) << tp->vlan_shift; |
dcf7b6f5 KS |
430 | |
431 | if (tp->port_shift >= 0) | |
432 | ntuple |= (u64)l2t->lport << tp->port_shift; | |
433 | ||
434 | if (tp->protocol_shift >= 0) | |
435 | ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift; | |
436 | ||
437 | if (tp->vnic_shift >= 0) { | |
438 | u32 viid = cxgb4_port_viid(dev); | |
2b5fb1f2 | 439 | u32 vf = FW_VIID_VIN_G(viid); |
d7990b0c | 440 | u32 pf = FW_VIID_PFN_G(viid); |
2b5fb1f2 | 441 | u32 vld = FW_VIID_VIVLD_G(viid); |
dcf7b6f5 | 442 | |
0d804338 HS |
443 | ntuple |= (u64)(FT_VNID_ID_VF_V(vf) | |
444 | FT_VNID_ID_PF_V(pf) | | |
445 | FT_VNID_ID_VLD_V(vld)) << tp->vnic_shift; | |
dcf7b6f5 KS |
446 | } |
447 | ||
448 | return ntuple; | |
449 | } | |
450 | EXPORT_SYMBOL(cxgb4_select_ntuple); | |
451 | ||
625ba2c2 DM |
452 | /* |
453 | * Called when address resolution fails for an L2T entry to handle packets | |
454 | * on the arpq head. If a packet specifies a failure handler it is invoked, | |
455 | * otherwise the packet is sent to the device. | |
456 | */ | |
457 | static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq) | |
458 | { | |
459 | while (arpq) { | |
460 | struct sk_buff *skb = arpq; | |
461 | const struct l2t_skb_cb *cb = L2T_SKB_CB(skb); | |
462 | ||
463 | arpq = skb->next; | |
464 | skb->next = NULL; | |
465 | if (cb->arp_err_handler) | |
466 | cb->arp_err_handler(cb->handle, skb); | |
467 | else | |
468 | t4_ofld_send(adap, skb); | |
469 | } | |
470 | } | |
471 | ||
472 | /* | |
473 | * Called when the host's neighbor layer makes a change to some entry that is | |
474 | * loaded into the HW L2 table. | |
475 | */ | |
476 | void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) | |
477 | { | |
478 | struct l2t_entry *e; | |
479 | struct sk_buff *arpq = NULL; | |
480 | struct l2t_data *d = adap->l2t; | |
481 | int addr_len = neigh->tbl->key_len; | |
482 | u32 *addr = (u32 *) neigh->primary_key; | |
483 | int ifidx = neigh->dev->ifindex; | |
484 | int hash = addr_hash(addr, addr_len, ifidx); | |
485 | ||
486 | read_lock_bh(&d->lock); | |
487 | for (e = d->l2tab[hash].first; e; e = e->next) | |
488 | if (!addreq(e, addr) && e->ifindex == ifidx) { | |
489 | spin_lock(&e->lock); | |
490 | if (atomic_read(&e->refcnt)) | |
491 | goto found; | |
492 | spin_unlock(&e->lock); | |
493 | break; | |
494 | } | |
495 | read_unlock_bh(&d->lock); | |
496 | return; | |
497 | ||
498 | found: | |
499 | read_unlock(&d->lock); | |
500 | ||
501 | if (neigh != e->neigh) | |
502 | neigh_replace(e, neigh); | |
503 | ||
504 | if (e->state == L2T_STATE_RESOLVING) { | |
505 | if (neigh->nud_state & NUD_FAILED) { | |
506 | arpq = e->arpq_head; | |
507 | e->arpq_head = e->arpq_tail = NULL; | |
508 | } else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) && | |
509 | e->arpq_head) { | |
510 | write_l2e(adap, e, 1); | |
511 | } | |
512 | } else { | |
513 | e->state = neigh->nud_state & NUD_CONNECTED ? | |
514 | L2T_STATE_VALID : L2T_STATE_STALE; | |
515 | if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac))) | |
516 | write_l2e(adap, e, 0); | |
517 | } | |
518 | ||
519 | spin_unlock_bh(&e->lock); | |
520 | ||
521 | if (arpq) | |
522 | handle_failed_resolution(adap, arpq); | |
523 | } | |
524 | ||
f2b7e78d VP |
525 | /* Allocate an L2T entry for use by a switching rule. Such need to be |
526 | * explicitly freed and while busy they are not on any hash chain, so normal | |
527 | * address resolution updates do not see them. | |
528 | */ | |
529 | struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d) | |
530 | { | |
531 | struct l2t_entry *e; | |
532 | ||
533 | write_lock_bh(&d->lock); | |
534 | e = alloc_l2e(d); | |
535 | if (e) { | |
536 | spin_lock(&e->lock); /* avoid race with t4_l2t_free */ | |
537 | e->state = L2T_STATE_SWITCHING; | |
538 | atomic_set(&e->refcnt, 1); | |
539 | spin_unlock(&e->lock); | |
540 | } | |
541 | write_unlock_bh(&d->lock); | |
542 | return e; | |
543 | } | |
544 | ||
545 | /* Sets/updates the contents of a switching L2T entry that has been allocated | |
546 | * with an earlier call to @t4_l2t_alloc_switching. | |
547 | */ | |
548 | int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, | |
549 | u8 port, u8 *eth_addr) | |
550 | { | |
551 | e->vlan = vlan; | |
552 | e->lport = port; | |
553 | memcpy(e->dmac, eth_addr, ETH_ALEN); | |
554 | return write_l2e(adap, e, 0); | |
555 | } | |
556 | ||
625ba2c2 DM |
557 | struct l2t_data *t4_init_l2t(void) |
558 | { | |
559 | int i; | |
560 | struct l2t_data *d; | |
561 | ||
562 | d = t4_alloc_mem(sizeof(*d)); | |
563 | if (!d) | |
564 | return NULL; | |
565 | ||
566 | d->rover = d->l2tab; | |
567 | atomic_set(&d->nfree, L2T_SIZE); | |
568 | rwlock_init(&d->lock); | |
569 | ||
570 | for (i = 0; i < L2T_SIZE; ++i) { | |
571 | d->l2tab[i].idx = i; | |
572 | d->l2tab[i].state = L2T_STATE_UNUSED; | |
573 | spin_lock_init(&d->l2tab[i].lock); | |
574 | atomic_set(&d->l2tab[i].refcnt, 0); | |
575 | } | |
576 | return d; | |
577 | } | |
578 | ||
625ba2c2 DM |
579 | static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos) |
580 | { | |
581 | struct l2t_entry *l2tab = seq->private; | |
582 | ||
583 | return pos >= L2T_SIZE ? NULL : &l2tab[pos]; | |
584 | } | |
585 | ||
586 | static void *l2t_seq_start(struct seq_file *seq, loff_t *pos) | |
587 | { | |
588 | return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; | |
589 | } | |
590 | ||
591 | static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
592 | { | |
593 | v = l2t_get_idx(seq, *pos); | |
594 | if (v) | |
595 | ++*pos; | |
596 | return v; | |
597 | } | |
598 | ||
599 | static void l2t_seq_stop(struct seq_file *seq, void *v) | |
600 | { | |
601 | } | |
602 | ||
603 | static char l2e_state(const struct l2t_entry *e) | |
604 | { | |
605 | switch (e->state) { | |
606 | case L2T_STATE_VALID: return 'V'; | |
607 | case L2T_STATE_STALE: return 'S'; | |
608 | case L2T_STATE_SYNC_WRITE: return 'W'; | |
609 | case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R'; | |
610 | case L2T_STATE_SWITCHING: return 'X'; | |
611 | default: | |
612 | return 'U'; | |
613 | } | |
614 | } | |
615 | ||
616 | static int l2t_seq_show(struct seq_file *seq, void *v) | |
617 | { | |
618 | if (v == SEQ_START_TOKEN) | |
619 | seq_puts(seq, " Idx IP address " | |
620 | "Ethernet address VLAN/P LP State Users Port\n"); | |
621 | else { | |
622 | char ip[60]; | |
623 | struct l2t_entry *e = v; | |
624 | ||
625 | spin_lock_bh(&e->lock); | |
626 | if (e->state == L2T_STATE_SWITCHING) | |
627 | ip[0] = '\0'; | |
628 | else | |
629 | sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr); | |
630 | seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n", | |
631 | e->idx, ip, e->dmac, | |
632 | e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport, | |
633 | l2e_state(e), atomic_read(&e->refcnt), | |
634 | e->neigh ? e->neigh->dev->name : ""); | |
635 | spin_unlock_bh(&e->lock); | |
636 | } | |
637 | return 0; | |
638 | } | |
639 | ||
640 | static const struct seq_operations l2t_seq_ops = { | |
641 | .start = l2t_seq_start, | |
642 | .next = l2t_seq_next, | |
643 | .stop = l2t_seq_stop, | |
644 | .show = l2t_seq_show | |
645 | }; | |
646 | ||
647 | static int l2t_seq_open(struct inode *inode, struct file *file) | |
648 | { | |
649 | int rc = seq_open(file, &l2t_seq_ops); | |
650 | ||
651 | if (!rc) { | |
652 | struct adapter *adap = inode->i_private; | |
653 | struct seq_file *seq = file->private_data; | |
654 | ||
655 | seq->private = adap->l2t->l2tab; | |
656 | } | |
657 | return rc; | |
658 | } | |
659 | ||
660 | const struct file_operations t4_l2t_fops = { | |
661 | .owner = THIS_MODULE, | |
662 | .open = l2t_seq_open, | |
663 | .read = seq_read, | |
664 | .llseek = seq_lseek, | |
665 | .release = seq_release, | |
666 | }; |