Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | |
2a1d9b7f RD |
3 | * Copyright (c) 2005 Intel Corporation. All rights reserved. |
4 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. | |
5 | * Copyright (c) 2005 Voltaire, Inc. All rights reserved. | |
1da177e4 LT |
6 | * |
7 | * This software is available to you under a choice of one of two | |
8 | * licenses. You may choose to be licensed under the terms of the GNU | |
9 | * General Public License (GPL) Version 2, available from the file | |
10 | * COPYING in the main directory of this source tree, or the | |
11 | * OpenIB.org BSD license below: | |
12 | * | |
13 | * Redistribution and use in source and binary forms, with or | |
14 | * without modification, are permitted provided that the following | |
15 | * conditions are met: | |
16 | * | |
17 | * - Redistributions of source code must retain the above | |
18 | * copyright notice, this list of conditions and the following | |
19 | * disclaimer. | |
20 | * | |
21 | * - Redistributions in binary form must reproduce the above | |
22 | * copyright notice, this list of conditions and the following | |
23 | * disclaimer in the documentation and/or other materials | |
24 | * provided with the distribution. | |
25 | * | |
26 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
27 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
28 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
29 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
30 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
31 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
32 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
33 | * SOFTWARE. | |
1da177e4 LT |
34 | */ |
35 | ||
1da177e4 LT |
36 | #include <linux/module.h> |
37 | #include <linux/errno.h> | |
38 | #include <linux/slab.h> | |
e8edc6e0 | 39 | #include <linux/workqueue.h> |
03db3a2d MB |
40 | #include <linux/netdevice.h> |
41 | #include <net/addrconf.h> | |
1da177e4 | 42 | |
a4d61e84 | 43 | #include <rdma/ib_cache.h> |
1da177e4 LT |
44 | |
45 | #include "core_priv.h" | |
46 | ||
47 | struct ib_pkey_cache { | |
48 | int table_len; | |
49 | u16 table[0]; | |
50 | }; | |
51 | ||
1da177e4 LT |
52 | struct ib_update_work { |
53 | struct work_struct work; | |
54 | struct ib_device *device; | |
55 | u8 port_num; | |
56 | }; | |
57 | ||
03db3a2d MB |
58 | static union ib_gid zgid; |
59 | ||
60 | static const struct ib_gid_attr zattr; | |
61 | ||
62 | enum gid_attr_find_mask { | |
63 | GID_ATTR_FIND_MASK_GID = 1UL << 0, | |
64 | GID_ATTR_FIND_MASK_NETDEV = 1UL << 1, | |
65 | GID_ATTR_FIND_MASK_DEFAULT = 1UL << 2, | |
66 | }; | |
67 | ||
68 | enum gid_table_entry_props { | |
69 | GID_TABLE_ENTRY_INVALID = 1UL << 0, | |
70 | GID_TABLE_ENTRY_DEFAULT = 1UL << 1, | |
71 | }; | |
72 | ||
73 | enum gid_table_write_action { | |
74 | GID_TABLE_WRITE_ACTION_ADD, | |
75 | GID_TABLE_WRITE_ACTION_DEL, | |
76 | /* MODIFY only updates the GID table. Currently only used by | |
77 | * ib_cache_update. | |
78 | */ | |
79 | GID_TABLE_WRITE_ACTION_MODIFY | |
80 | }; | |
81 | ||
82 | struct ib_gid_table_entry { | |
83 | /* This lock protects an entry from being | |
84 | * read and written simultaneously. | |
85 | */ | |
86 | rwlock_t lock; | |
87 | unsigned long props; | |
88 | union ib_gid gid; | |
89 | struct ib_gid_attr attr; | |
90 | void *context; | |
91 | }; | |
92 | ||
93 | struct ib_gid_table { | |
94 | int sz; | |
95 | /* In RoCE, adding a GID to the table requires: | |
96 | * (a) Find if this GID is already exists. | |
97 | * (b) Find a free space. | |
98 | * (c) Write the new GID | |
99 | * | |
100 | * Delete requires different set of operations: | |
101 | * (a) Find the GID | |
102 | * (b) Delete it. | |
103 | * | |
104 | * Add/delete should be carried out atomically. | |
105 | * This is done by locking this mutex from multiple | |
106 | * writers. We don't need this lock for IB, as the MAD | |
107 | * layer replaces all entries. All data_vec entries | |
108 | * are locked by this lock. | |
109 | **/ | |
110 | struct mutex lock; | |
111 | struct ib_gid_table_entry *data_vec; | |
112 | }; | |
113 | ||
114 | static int write_gid(struct ib_device *ib_dev, u8 port, | |
115 | struct ib_gid_table *table, int ix, | |
116 | const union ib_gid *gid, | |
117 | const struct ib_gid_attr *attr, | |
118 | enum gid_table_write_action action, | |
119 | bool default_gid) | |
1da177e4 | 120 | { |
03db3a2d MB |
121 | int ret = 0; |
122 | struct net_device *old_net_dev; | |
1da177e4 | 123 | unsigned long flags; |
03db3a2d MB |
124 | |
125 | /* in rdma_cap_roce_gid_table, this funciton should be protected by a | |
126 | * sleep-able lock. | |
127 | */ | |
128 | write_lock_irqsave(&table->data_vec[ix].lock, flags); | |
129 | ||
130 | if (rdma_cap_roce_gid_table(ib_dev, port)) { | |
131 | table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID; | |
132 | write_unlock_irqrestore(&table->data_vec[ix].lock, flags); | |
133 | /* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by | |
134 | * RoCE providers and thus only updates the cache. | |
135 | */ | |
136 | if (action == GID_TABLE_WRITE_ACTION_ADD) | |
137 | ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr, | |
138 | &table->data_vec[ix].context); | |
139 | else if (action == GID_TABLE_WRITE_ACTION_DEL) | |
140 | ret = ib_dev->del_gid(ib_dev, port, ix, | |
141 | &table->data_vec[ix].context); | |
142 | write_lock_irqsave(&table->data_vec[ix].lock, flags); | |
143 | } | |
144 | ||
145 | old_net_dev = table->data_vec[ix].attr.ndev; | |
146 | if (old_net_dev && old_net_dev != attr->ndev) | |
147 | dev_put(old_net_dev); | |
148 | /* if modify_gid failed, just delete the old gid */ | |
149 | if (ret || action == GID_TABLE_WRITE_ACTION_DEL) { | |
150 | gid = &zgid; | |
151 | attr = &zattr; | |
152 | table->data_vec[ix].context = NULL; | |
153 | } | |
154 | if (default_gid) | |
155 | table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT; | |
156 | memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid)); | |
157 | memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr)); | |
158 | if (table->data_vec[ix].attr.ndev && | |
159 | table->data_vec[ix].attr.ndev != old_net_dev) | |
160 | dev_hold(table->data_vec[ix].attr.ndev); | |
161 | ||
162 | table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID; | |
163 | ||
164 | write_unlock_irqrestore(&table->data_vec[ix].lock, flags); | |
165 | ||
166 | if (!ret && rdma_cap_roce_gid_table(ib_dev, port)) { | |
167 | struct ib_event event; | |
168 | ||
169 | event.device = ib_dev; | |
170 | event.element.port_num = port; | |
171 | event.event = IB_EVENT_GID_CHANGE; | |
172 | ||
173 | ib_dispatch_event(&event); | |
174 | } | |
175 | return ret; | |
176 | } | |
177 | ||
178 | static int add_gid(struct ib_device *ib_dev, u8 port, | |
179 | struct ib_gid_table *table, int ix, | |
180 | const union ib_gid *gid, | |
181 | const struct ib_gid_attr *attr, | |
182 | bool default_gid) { | |
183 | return write_gid(ib_dev, port, table, ix, gid, attr, | |
184 | GID_TABLE_WRITE_ACTION_ADD, default_gid); | |
185 | } | |
186 | ||
187 | static int modify_gid(struct ib_device *ib_dev, u8 port, | |
188 | struct ib_gid_table *table, int ix, | |
189 | const union ib_gid *gid, | |
190 | const struct ib_gid_attr *attr, | |
191 | bool default_gid) { | |
192 | return write_gid(ib_dev, port, table, ix, gid, attr, | |
193 | GID_TABLE_WRITE_ACTION_MODIFY, default_gid); | |
194 | } | |
195 | ||
196 | static int del_gid(struct ib_device *ib_dev, u8 port, | |
197 | struct ib_gid_table *table, int ix, | |
198 | bool default_gid) { | |
199 | return write_gid(ib_dev, port, table, ix, &zgid, &zattr, | |
200 | GID_TABLE_WRITE_ACTION_DEL, default_gid); | |
201 | } | |
202 | ||
203 | static int find_gid(struct ib_gid_table *table, const union ib_gid *gid, | |
204 | const struct ib_gid_attr *val, bool default_gid, | |
205 | unsigned long mask) | |
206 | { | |
207 | int i; | |
208 | ||
209 | for (i = 0; i < table->sz; i++) { | |
210 | unsigned long flags; | |
211 | struct ib_gid_attr *attr = &table->data_vec[i].attr; | |
212 | ||
213 | read_lock_irqsave(&table->data_vec[i].lock, flags); | |
214 | ||
215 | if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID) | |
216 | goto next; | |
217 | ||
218 | if (mask & GID_ATTR_FIND_MASK_GID && | |
219 | memcmp(gid, &table->data_vec[i].gid, sizeof(*gid))) | |
220 | goto next; | |
221 | ||
222 | if (mask & GID_ATTR_FIND_MASK_NETDEV && | |
223 | attr->ndev != val->ndev) | |
224 | goto next; | |
225 | ||
226 | if (mask & GID_ATTR_FIND_MASK_DEFAULT && | |
227 | !!(table->data_vec[i].props & GID_TABLE_ENTRY_DEFAULT) != | |
228 | default_gid) | |
229 | goto next; | |
230 | ||
231 | read_unlock_irqrestore(&table->data_vec[i].lock, flags); | |
232 | return i; | |
233 | next: | |
234 | read_unlock_irqrestore(&table->data_vec[i].lock, flags); | |
235 | } | |
236 | ||
237 | return -1; | |
238 | } | |
239 | ||
240 | static void make_default_gid(struct net_device *dev, union ib_gid *gid) | |
241 | { | |
242 | gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); | |
243 | addrconf_ifid_eui48(&gid->raw[8], dev); | |
244 | } | |
245 | ||
246 | int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, | |
247 | union ib_gid *gid, struct ib_gid_attr *attr) | |
248 | { | |
249 | struct ib_gid_table **ports_table = ib_dev->cache.gid_cache; | |
250 | struct ib_gid_table *table; | |
251 | int ix; | |
1da177e4 | 252 | int ret = 0; |
03db3a2d | 253 | struct net_device *idev; |
1da177e4 | 254 | |
03db3a2d MB |
255 | table = ports_table[port - rdma_start_port(ib_dev)]; |
256 | ||
257 | if (!memcmp(gid, &zgid, sizeof(*gid))) | |
1da177e4 LT |
258 | return -EINVAL; |
259 | ||
03db3a2d MB |
260 | if (ib_dev->get_netdev) { |
261 | idev = ib_dev->get_netdev(ib_dev, port); | |
262 | if (idev && attr->ndev != idev) { | |
263 | union ib_gid default_gid; | |
1da177e4 | 264 | |
03db3a2d MB |
265 | /* Adding default GIDs in not permitted */ |
266 | make_default_gid(idev, &default_gid); | |
267 | if (!memcmp(gid, &default_gid, sizeof(*gid))) { | |
268 | dev_put(idev); | |
269 | return -EPERM; | |
270 | } | |
271 | } | |
272 | if (idev) | |
273 | dev_put(idev); | |
274 | } | |
1da177e4 | 275 | |
03db3a2d | 276 | mutex_lock(&table->lock); |
1da177e4 | 277 | |
03db3a2d MB |
278 | ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID | |
279 | GID_ATTR_FIND_MASK_NETDEV); | |
280 | if (ix >= 0) | |
281 | goto out_unlock; | |
1da177e4 | 282 | |
03db3a2d MB |
283 | ix = find_gid(table, &zgid, NULL, false, GID_ATTR_FIND_MASK_GID | |
284 | GID_ATTR_FIND_MASK_DEFAULT); | |
285 | if (ix < 0) { | |
286 | ret = -ENOSPC; | |
287 | goto out_unlock; | |
288 | } | |
289 | ||
290 | add_gid(ib_dev, port, table, ix, gid, attr, false); | |
291 | ||
292 | out_unlock: | |
293 | mutex_unlock(&table->lock); | |
1da177e4 LT |
294 | return ret; |
295 | } | |
1da177e4 | 296 | |
03db3a2d MB |
297 | int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, |
298 | union ib_gid *gid, struct ib_gid_attr *attr) | |
1da177e4 | 299 | { |
03db3a2d MB |
300 | struct ib_gid_table **ports_table = ib_dev->cache.gid_cache; |
301 | struct ib_gid_table *table; | |
302 | int ix; | |
303 | ||
304 | table = ports_table[port - rdma_start_port(ib_dev)]; | |
305 | ||
306 | mutex_lock(&table->lock); | |
307 | ||
308 | ix = find_gid(table, gid, attr, false, | |
309 | GID_ATTR_FIND_MASK_GID | | |
310 | GID_ATTR_FIND_MASK_NETDEV | | |
311 | GID_ATTR_FIND_MASK_DEFAULT); | |
312 | if (ix < 0) | |
313 | goto out_unlock; | |
314 | ||
315 | del_gid(ib_dev, port, table, ix, false); | |
316 | ||
317 | out_unlock: | |
318 | mutex_unlock(&table->lock); | |
319 | return 0; | |
320 | } | |
321 | ||
322 | int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, | |
323 | struct net_device *ndev) | |
324 | { | |
325 | struct ib_gid_table **ports_table = ib_dev->cache.gid_cache; | |
326 | struct ib_gid_table *table; | |
327 | int ix; | |
328 | ||
329 | table = ports_table[port - rdma_start_port(ib_dev)]; | |
330 | ||
331 | mutex_lock(&table->lock); | |
332 | ||
333 | for (ix = 0; ix < table->sz; ix++) | |
334 | if (table->data_vec[ix].attr.ndev == ndev) | |
335 | del_gid(ib_dev, port, table, ix, false); | |
336 | ||
337 | mutex_unlock(&table->lock); | |
338 | return 0; | |
339 | } | |
340 | ||
341 | static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index, | |
342 | union ib_gid *gid, struct ib_gid_attr *attr) | |
343 | { | |
344 | struct ib_gid_table **ports_table = ib_dev->cache.gid_cache; | |
345 | struct ib_gid_table *table; | |
1da177e4 | 346 | unsigned long flags; |
1da177e4 | 347 | |
03db3a2d | 348 | table = ports_table[port - rdma_start_port(ib_dev)]; |
1da177e4 | 349 | |
03db3a2d MB |
350 | if (index < 0 || index >= table->sz) |
351 | return -EINVAL; | |
1da177e4 | 352 | |
03db3a2d MB |
353 | read_lock_irqsave(&table->data_vec[index].lock, flags); |
354 | if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID) { | |
355 | read_unlock_irqrestore(&table->data_vec[index].lock, flags); | |
356 | return -EAGAIN; | |
357 | } | |
358 | ||
359 | memcpy(gid, &table->data_vec[index].gid, sizeof(*gid)); | |
360 | if (attr) { | |
361 | memcpy(attr, &table->data_vec[index].attr, sizeof(*attr)); | |
362 | if (attr->ndev) | |
363 | dev_hold(attr->ndev); | |
364 | } | |
365 | ||
366 | read_unlock_irqrestore(&table->data_vec[index].lock, flags); | |
367 | return 0; | |
368 | } | |
369 | ||
370 | static int _ib_cache_gid_table_find(struct ib_device *ib_dev, | |
371 | const union ib_gid *gid, | |
372 | const struct ib_gid_attr *val, | |
373 | unsigned long mask, | |
374 | u8 *port, u16 *index) | |
375 | { | |
376 | struct ib_gid_table **ports_table = ib_dev->cache.gid_cache; | |
377 | struct ib_gid_table *table; | |
378 | u8 p; | |
379 | int local_index; | |
380 | ||
381 | for (p = 0; p < ib_dev->phys_port_cnt; p++) { | |
382 | table = ports_table[p]; | |
383 | local_index = find_gid(table, gid, val, false, mask); | |
384 | if (local_index >= 0) { | |
385 | if (index) | |
386 | *index = local_index; | |
387 | if (port) | |
388 | *port = p + rdma_start_port(ib_dev); | |
389 | return 0; | |
1da177e4 LT |
390 | } |
391 | } | |
1da177e4 | 392 | |
03db3a2d MB |
393 | return -ENOENT; |
394 | } | |
395 | ||
396 | static int ib_cache_gid_find(struct ib_device *ib_dev, | |
397 | const union ib_gid *gid, | |
398 | struct net_device *ndev, u8 *port, | |
399 | u16 *index) | |
400 | { | |
401 | unsigned long mask = GID_ATTR_FIND_MASK_GID; | |
402 | struct ib_gid_attr gid_attr_val = {.ndev = ndev}; | |
403 | ||
404 | if (ndev) | |
405 | mask |= GID_ATTR_FIND_MASK_NETDEV; | |
406 | ||
407 | return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val, | |
408 | mask, port, index); | |
409 | } | |
410 | ||
411 | int ib_cache_gid_find_by_port(struct ib_device *ib_dev, | |
412 | const union ib_gid *gid, | |
413 | u8 port, struct net_device *ndev, | |
414 | u16 *index) | |
415 | { | |
416 | int local_index; | |
417 | struct ib_gid_table **ports_table = ib_dev->cache.gid_cache; | |
418 | struct ib_gid_table *table; | |
419 | unsigned long mask = GID_ATTR_FIND_MASK_GID; | |
420 | struct ib_gid_attr val = {.ndev = ndev}; | |
421 | ||
422 | if (port < rdma_start_port(ib_dev) || | |
423 | port > rdma_end_port(ib_dev)) | |
424 | return -ENOENT; | |
425 | ||
426 | table = ports_table[port - rdma_start_port(ib_dev)]; | |
427 | ||
428 | if (ndev) | |
429 | mask |= GID_ATTR_FIND_MASK_NETDEV; | |
430 | ||
431 | local_index = find_gid(table, gid, &val, false, mask); | |
432 | if (local_index >= 0) { | |
433 | if (index) | |
434 | *index = local_index; | |
435 | return 0; | |
436 | } | |
437 | ||
438 | return -ENOENT; | |
439 | } | |
440 | ||
441 | static struct ib_gid_table *alloc_gid_table(int sz) | |
442 | { | |
443 | unsigned int i; | |
444 | struct ib_gid_table *table = | |
445 | kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL); | |
446 | if (!table) | |
447 | return NULL; | |
448 | ||
449 | table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL); | |
450 | if (!table->data_vec) | |
451 | goto err_free_table; | |
452 | ||
453 | mutex_init(&table->lock); | |
454 | ||
455 | table->sz = sz; | |
456 | ||
457 | for (i = 0; i < sz; i++) | |
458 | rwlock_init(&table->data_vec[i].lock); | |
459 | ||
460 | return table; | |
461 | ||
462 | err_free_table: | |
463 | kfree(table); | |
464 | return NULL; | |
465 | } | |
466 | ||
467 | static void release_gid_table(struct ib_gid_table *table) | |
468 | { | |
469 | if (table) { | |
470 | kfree(table->data_vec); | |
471 | kfree(table); | |
472 | } | |
473 | } | |
474 | ||
475 | static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port, | |
476 | struct ib_gid_table *table) | |
477 | { | |
478 | int i; | |
479 | ||
480 | if (!table) | |
481 | return; | |
482 | ||
483 | for (i = 0; i < table->sz; ++i) { | |
484 | if (memcmp(&table->data_vec[i].gid, &zgid, | |
485 | sizeof(table->data_vec[i].gid))) | |
486 | del_gid(ib_dev, port, table, i, | |
487 | table->data_vec[i].props & | |
488 | GID_ATTR_FIND_MASK_DEFAULT); | |
489 | } | |
490 | } | |
491 | ||
492 | void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, | |
493 | struct net_device *ndev, | |
494 | enum ib_cache_gid_default_mode mode) | |
495 | { | |
496 | struct ib_gid_table **ports_table = ib_dev->cache.gid_cache; | |
497 | union ib_gid gid; | |
498 | struct ib_gid_attr gid_attr; | |
499 | struct ib_gid_table *table; | |
500 | int ix; | |
501 | union ib_gid current_gid; | |
502 | struct ib_gid_attr current_gid_attr = {}; | |
503 | ||
504 | table = ports_table[port - rdma_start_port(ib_dev)]; | |
505 | ||
506 | make_default_gid(ndev, &gid); | |
507 | memset(&gid_attr, 0, sizeof(gid_attr)); | |
508 | gid_attr.ndev = ndev; | |
509 | ||
510 | ix = find_gid(table, NULL, NULL, true, GID_ATTR_FIND_MASK_DEFAULT); | |
511 | ||
512 | /* Coudn't find default GID location */ | |
513 | WARN_ON(ix < 0); | |
514 | ||
515 | mutex_lock(&table->lock); | |
516 | if (!__ib_cache_gid_get(ib_dev, port, ix, | |
517 | ¤t_gid, ¤t_gid_attr) && | |
518 | mode == IB_CACHE_GID_DEFAULT_MODE_SET && | |
519 | !memcmp(&gid, ¤t_gid, sizeof(gid)) && | |
520 | !memcmp(&gid_attr, ¤t_gid_attr, sizeof(gid_attr))) | |
521 | goto unlock; | |
522 | ||
523 | if ((memcmp(¤t_gid, &zgid, sizeof(current_gid)) || | |
524 | memcmp(¤t_gid_attr, &zattr, | |
525 | sizeof(current_gid_attr))) && | |
526 | del_gid(ib_dev, port, table, ix, true)) { | |
527 | pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n", | |
528 | ix, gid.raw); | |
529 | goto unlock; | |
530 | } | |
531 | ||
532 | if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) | |
533 | if (add_gid(ib_dev, port, table, ix, &gid, &gid_attr, true)) | |
534 | pr_warn("ib_cache_gid: unable to add default gid %pI6\n", | |
535 | gid.raw); | |
536 | ||
537 | unlock: | |
538 | if (current_gid_attr.ndev) | |
539 | dev_put(current_gid_attr.ndev); | |
540 | mutex_unlock(&table->lock); | |
541 | } | |
542 | ||
543 | static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port, | |
544 | struct ib_gid_table *table) | |
545 | { | |
546 | if (rdma_protocol_roce(ib_dev, port)) { | |
547 | struct ib_gid_table_entry *entry = &table->data_vec[0]; | |
548 | ||
549 | entry->props |= GID_TABLE_ENTRY_DEFAULT; | |
550 | } | |
551 | ||
552 | return 0; | |
553 | } | |
554 | ||
555 | static int _gid_table_setup_one(struct ib_device *ib_dev) | |
556 | { | |
557 | u8 port; | |
558 | struct ib_gid_table **table; | |
559 | int err = 0; | |
560 | ||
561 | table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL); | |
562 | ||
563 | if (!table) { | |
564 | pr_warn("failed to allocate ib gid cache for %s\n", | |
565 | ib_dev->name); | |
566 | return -ENOMEM; | |
567 | } | |
568 | ||
569 | for (port = 0; port < ib_dev->phys_port_cnt; port++) { | |
570 | u8 rdma_port = port + rdma_start_port(ib_dev); | |
571 | ||
572 | table[port] = | |
573 | alloc_gid_table( | |
574 | ib_dev->port_immutable[rdma_port].gid_tbl_len); | |
575 | if (!table[port]) { | |
576 | err = -ENOMEM; | |
577 | goto rollback_table_setup; | |
578 | } | |
579 | ||
580 | err = gid_table_reserve_default(ib_dev, | |
581 | port + rdma_start_port(ib_dev), | |
582 | table[port]); | |
583 | if (err) | |
584 | goto rollback_table_setup; | |
585 | } | |
586 | ||
587 | ib_dev->cache.gid_cache = table; | |
588 | return 0; | |
589 | ||
590 | rollback_table_setup: | |
591 | for (port = 0; port < ib_dev->phys_port_cnt; port++) { | |
592 | cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev), | |
593 | table[port]); | |
594 | release_gid_table(table[port]); | |
595 | } | |
596 | ||
597 | kfree(table); | |
598 | return err; | |
599 | } | |
600 | ||
601 | static void gid_table_release_one(struct ib_device *ib_dev) | |
602 | { | |
603 | struct ib_gid_table **table = ib_dev->cache.gid_cache; | |
604 | u8 port; | |
605 | ||
606 | if (!table) | |
607 | return; | |
608 | ||
609 | for (port = 0; port < ib_dev->phys_port_cnt; port++) | |
610 | release_gid_table(table[port]); | |
611 | ||
612 | kfree(table); | |
613 | ib_dev->cache.gid_cache = NULL; | |
614 | } | |
615 | ||
616 | static void gid_table_cleanup_one(struct ib_device *ib_dev) | |
617 | { | |
618 | struct ib_gid_table **table = ib_dev->cache.gid_cache; | |
619 | u8 port; | |
620 | ||
621 | if (!table) | |
622 | return; | |
623 | ||
624 | for (port = 0; port < ib_dev->phys_port_cnt; port++) | |
625 | cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev), | |
626 | table[port]); | |
627 | } | |
628 | ||
629 | static int gid_table_setup_one(struct ib_device *ib_dev) | |
630 | { | |
631 | int err; | |
632 | ||
633 | err = _gid_table_setup_one(ib_dev); | |
634 | ||
635 | if (err) | |
636 | return err; | |
637 | ||
638 | err = roce_rescan_device(ib_dev); | |
639 | ||
640 | if (err) { | |
641 | gid_table_cleanup_one(ib_dev); | |
642 | gid_table_release_one(ib_dev); | |
643 | } | |
644 | ||
645 | return err; | |
646 | } | |
647 | ||
648 | int ib_get_cached_gid(struct ib_device *device, | |
649 | u8 port_num, | |
650 | int index, | |
651 | union ib_gid *gid) | |
652 | { | |
653 | if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) | |
654 | return -EINVAL; | |
655 | ||
656 | return __ib_cache_gid_get(device, port_num, index, gid, NULL); | |
657 | } | |
658 | EXPORT_SYMBOL(ib_get_cached_gid); | |
659 | ||
660 | int ib_find_cached_gid(struct ib_device *device, | |
661 | const union ib_gid *gid, | |
662 | u8 *port_num, | |
663 | u16 *index) | |
664 | { | |
665 | return ib_cache_gid_find(device, gid, NULL, port_num, index); | |
1da177e4 LT |
666 | } |
667 | EXPORT_SYMBOL(ib_find_cached_gid); | |
668 | ||
669 | int ib_get_cached_pkey(struct ib_device *device, | |
670 | u8 port_num, | |
671 | int index, | |
672 | u16 *pkey) | |
673 | { | |
674 | struct ib_pkey_cache *cache; | |
675 | unsigned long flags; | |
676 | int ret = 0; | |
677 | ||
0cf18d77 | 678 | if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) |
1da177e4 LT |
679 | return -EINVAL; |
680 | ||
681 | read_lock_irqsave(&device->cache.lock, flags); | |
682 | ||
0cf18d77 | 683 | cache = device->cache.pkey_cache[port_num - rdma_start_port(device)]; |
1da177e4 LT |
684 | |
685 | if (index < 0 || index >= cache->table_len) | |
686 | ret = -EINVAL; | |
687 | else | |
688 | *pkey = cache->table[index]; | |
689 | ||
690 | read_unlock_irqrestore(&device->cache.lock, flags); | |
691 | ||
692 | return ret; | |
693 | } | |
694 | EXPORT_SYMBOL(ib_get_cached_pkey); | |
695 | ||
696 | int ib_find_cached_pkey(struct ib_device *device, | |
697 | u8 port_num, | |
698 | u16 pkey, | |
699 | u16 *index) | |
700 | { | |
701 | struct ib_pkey_cache *cache; | |
702 | unsigned long flags; | |
703 | int i; | |
704 | int ret = -ENOENT; | |
ff7166c4 | 705 | int partial_ix = -1; |
1da177e4 | 706 | |
0cf18d77 | 707 | if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) |
1da177e4 LT |
708 | return -EINVAL; |
709 | ||
710 | read_lock_irqsave(&device->cache.lock, flags); | |
711 | ||
0cf18d77 | 712 | cache = device->cache.pkey_cache[port_num - rdma_start_port(device)]; |
1da177e4 LT |
713 | |
714 | *index = -1; | |
715 | ||
716 | for (i = 0; i < cache->table_len; ++i) | |
717 | if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) { | |
ff7166c4 JM |
718 | if (cache->table[i] & 0x8000) { |
719 | *index = i; | |
720 | ret = 0; | |
721 | break; | |
722 | } else | |
723 | partial_ix = i; | |
1da177e4 LT |
724 | } |
725 | ||
ff7166c4 JM |
726 | if (ret && partial_ix >= 0) { |
727 | *index = partial_ix; | |
728 | ret = 0; | |
729 | } | |
730 | ||
1da177e4 LT |
731 | read_unlock_irqrestore(&device->cache.lock, flags); |
732 | ||
733 | return ret; | |
734 | } | |
735 | EXPORT_SYMBOL(ib_find_cached_pkey); | |
736 | ||
73aaa741 JM |
737 | int ib_find_exact_cached_pkey(struct ib_device *device, |
738 | u8 port_num, | |
739 | u16 pkey, | |
740 | u16 *index) | |
741 | { | |
742 | struct ib_pkey_cache *cache; | |
743 | unsigned long flags; | |
744 | int i; | |
745 | int ret = -ENOENT; | |
746 | ||
0cf18d77 | 747 | if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) |
73aaa741 JM |
748 | return -EINVAL; |
749 | ||
750 | read_lock_irqsave(&device->cache.lock, flags); | |
751 | ||
0cf18d77 | 752 | cache = device->cache.pkey_cache[port_num - rdma_start_port(device)]; |
73aaa741 JM |
753 | |
754 | *index = -1; | |
755 | ||
756 | for (i = 0; i < cache->table_len; ++i) | |
757 | if (cache->table[i] == pkey) { | |
758 | *index = i; | |
759 | ret = 0; | |
760 | break; | |
761 | } | |
762 | ||
763 | read_unlock_irqrestore(&device->cache.lock, flags); | |
764 | ||
765 | return ret; | |
766 | } | |
767 | EXPORT_SYMBOL(ib_find_exact_cached_pkey); | |
768 | ||
6fb9cdbf JM |
769 | int ib_get_cached_lmc(struct ib_device *device, |
770 | u8 port_num, | |
771 | u8 *lmc) | |
772 | { | |
773 | unsigned long flags; | |
774 | int ret = 0; | |
775 | ||
0cf18d77 | 776 | if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) |
6fb9cdbf JM |
777 | return -EINVAL; |
778 | ||
779 | read_lock_irqsave(&device->cache.lock, flags); | |
0cf18d77 | 780 | *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)]; |
6fb9cdbf JM |
781 | read_unlock_irqrestore(&device->cache.lock, flags); |
782 | ||
783 | return ret; | |
784 | } | |
785 | EXPORT_SYMBOL(ib_get_cached_lmc); | |
786 | ||
1da177e4 LT |
787 | static void ib_cache_update(struct ib_device *device, |
788 | u8 port) | |
789 | { | |
790 | struct ib_port_attr *tprops = NULL; | |
791 | struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; | |
03db3a2d MB |
792 | struct ib_gid_cache { |
793 | int table_len; | |
794 | union ib_gid table[0]; | |
795 | } *gid_cache = NULL; | |
1da177e4 LT |
796 | int i; |
797 | int ret; | |
03db3a2d MB |
798 | struct ib_gid_table *table; |
799 | struct ib_gid_table **ports_table = device->cache.gid_cache; | |
800 | bool use_roce_gid_table = | |
801 | rdma_cap_roce_gid_table(device, port); | |
802 | ||
803 | if (port < rdma_start_port(device) || port > rdma_end_port(device)) | |
804 | return; | |
805 | ||
806 | table = ports_table[port - rdma_start_port(device)]; | |
1da177e4 LT |
807 | |
808 | tprops = kmalloc(sizeof *tprops, GFP_KERNEL); | |
809 | if (!tprops) | |
810 | return; | |
811 | ||
812 | ret = ib_query_port(device, port, tprops); | |
813 | if (ret) { | |
814 | printk(KERN_WARNING "ib_query_port failed (%d) for %s\n", | |
815 | ret, device->name); | |
816 | goto err; | |
817 | } | |
818 | ||
819 | pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len * | |
820 | sizeof *pkey_cache->table, GFP_KERNEL); | |
821 | if (!pkey_cache) | |
822 | goto err; | |
823 | ||
824 | pkey_cache->table_len = tprops->pkey_tbl_len; | |
825 | ||
03db3a2d MB |
826 | if (!use_roce_gid_table) { |
827 | gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len * | |
828 | sizeof(*gid_cache->table), GFP_KERNEL); | |
829 | if (!gid_cache) | |
830 | goto err; | |
1da177e4 | 831 | |
03db3a2d MB |
832 | gid_cache->table_len = tprops->gid_tbl_len; |
833 | } | |
1da177e4 LT |
834 | |
835 | for (i = 0; i < pkey_cache->table_len; ++i) { | |
836 | ret = ib_query_pkey(device, port, i, pkey_cache->table + i); | |
837 | if (ret) { | |
838 | printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n", | |
839 | ret, device->name, i); | |
840 | goto err; | |
841 | } | |
842 | } | |
843 | ||
03db3a2d MB |
844 | if (!use_roce_gid_table) { |
845 | for (i = 0; i < gid_cache->table_len; ++i) { | |
846 | ret = ib_query_gid(device, port, i, | |
847 | gid_cache->table + i); | |
848 | if (ret) { | |
849 | printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n", | |
850 | ret, device->name, i); | |
851 | goto err; | |
852 | } | |
1da177e4 LT |
853 | } |
854 | } | |
855 | ||
856 | write_lock_irq(&device->cache.lock); | |
857 | ||
0cf18d77 | 858 | old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)]; |
1da177e4 | 859 | |
0cf18d77 | 860 | device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache; |
03db3a2d MB |
861 | if (!use_roce_gid_table) { |
862 | for (i = 0; i < gid_cache->table_len; i++) { | |
863 | modify_gid(device, port, table, i, gid_cache->table + i, | |
864 | &zattr, false); | |
865 | } | |
866 | } | |
1da177e4 | 867 | |
0cf18d77 | 868 | device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc; |
6fb9cdbf | 869 | |
1da177e4 LT |
870 | write_unlock_irq(&device->cache.lock); |
871 | ||
03db3a2d | 872 | kfree(gid_cache); |
1da177e4 | 873 | kfree(old_pkey_cache); |
1da177e4 LT |
874 | kfree(tprops); |
875 | return; | |
876 | ||
877 | err: | |
878 | kfree(pkey_cache); | |
879 | kfree(gid_cache); | |
880 | kfree(tprops); | |
881 | } | |
882 | ||
c4028958 | 883 | static void ib_cache_task(struct work_struct *_work) |
1da177e4 | 884 | { |
c4028958 DH |
885 | struct ib_update_work *work = |
886 | container_of(_work, struct ib_update_work, work); | |
1da177e4 LT |
887 | |
888 | ib_cache_update(work->device, work->port_num); | |
889 | kfree(work); | |
890 | } | |
891 | ||
892 | static void ib_cache_event(struct ib_event_handler *handler, | |
893 | struct ib_event *event) | |
894 | { | |
895 | struct ib_update_work *work; | |
896 | ||
897 | if (event->event == IB_EVENT_PORT_ERR || | |
898 | event->event == IB_EVENT_PORT_ACTIVE || | |
899 | event->event == IB_EVENT_LID_CHANGE || | |
900 | event->event == IB_EVENT_PKEY_CHANGE || | |
acaea9ee | 901 | event->event == IB_EVENT_SM_CHANGE || |
761d90ed OG |
902 | event->event == IB_EVENT_CLIENT_REREGISTER || |
903 | event->event == IB_EVENT_GID_CHANGE) { | |
1da177e4 LT |
904 | work = kmalloc(sizeof *work, GFP_ATOMIC); |
905 | if (work) { | |
c4028958 | 906 | INIT_WORK(&work->work, ib_cache_task); |
1da177e4 LT |
907 | work->device = event->device; |
908 | work->port_num = event->element.port_num; | |
f0626710 | 909 | queue_work(ib_wq, &work->work); |
1da177e4 LT |
910 | } |
911 | } | |
912 | } | |
913 | ||
03db3a2d | 914 | int ib_cache_setup_one(struct ib_device *device) |
1da177e4 LT |
915 | { |
916 | int p; | |
03db3a2d | 917 | int err; |
1da177e4 LT |
918 | |
919 | rwlock_init(&device->cache.lock); | |
920 | ||
921 | device->cache.pkey_cache = | |
55aeed06 | 922 | kzalloc(sizeof *device->cache.pkey_cache * |
0cf18d77 | 923 | (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL); |
6fb9cdbf | 924 | device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache * |
0cf18d77 IW |
925 | (rdma_end_port(device) - |
926 | rdma_start_port(device) + 1), | |
6fb9cdbf | 927 | GFP_KERNEL); |
03db3a2d | 928 | if (!device->cache.pkey_cache || |
6fb9cdbf | 929 | !device->cache.lmc_cache) { |
1da177e4 LT |
930 | printk(KERN_WARNING "Couldn't allocate cache " |
931 | "for %s\n", device->name); | |
03db3a2d | 932 | return -ENOMEM; |
1da177e4 LT |
933 | } |
934 | ||
03db3a2d MB |
935 | err = gid_table_setup_one(device); |
936 | if (err) | |
937 | /* Allocated memory will be cleaned in the release function */ | |
938 | return err; | |
939 | ||
55aeed06 | 940 | for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) |
0cf18d77 | 941 | ib_cache_update(device, p + rdma_start_port(device)); |
1da177e4 LT |
942 | |
943 | INIT_IB_EVENT_HANDLER(&device->cache.event_handler, | |
944 | device, ib_cache_event); | |
03db3a2d MB |
945 | err = ib_register_event_handler(&device->cache.event_handler); |
946 | if (err) | |
947 | goto err; | |
1da177e4 | 948 | |
03db3a2d | 949 | return 0; |
1da177e4 LT |
950 | |
951 | err: | |
03db3a2d MB |
952 | gid_table_cleanup_one(device); |
953 | return err; | |
1da177e4 LT |
954 | } |
955 | ||
03db3a2d | 956 | void ib_cache_release_one(struct ib_device *device) |
1da177e4 LT |
957 | { |
958 | int p; | |
959 | ||
03db3a2d MB |
960 | /* |
961 | * The release function frees all the cache elements. | |
962 | * This function should be called as part of freeing | |
963 | * all the device's resources when the cache could no | |
964 | * longer be accessed. | |
965 | */ | |
966 | if (device->cache.pkey_cache) | |
967 | for (p = 0; | |
968 | p <= rdma_end_port(device) - rdma_start_port(device); ++p) | |
969 | kfree(device->cache.pkey_cache[p]); | |
970 | ||
971 | gid_table_release_one(device); | |
1da177e4 | 972 | kfree(device->cache.pkey_cache); |
6fb9cdbf | 973 | kfree(device->cache.lmc_cache); |
1da177e4 LT |
974 | } |
975 | ||
03db3a2d MB |
976 | void ib_cache_cleanup_one(struct ib_device *device) |
977 | { | |
978 | /* The cleanup function unregisters the event handler, | |
979 | * waits for all in-progress workqueue elements and cleans | |
980 | * up the GID cache. This function should be called after | |
981 | * the device was removed from the devices list and all | |
982 | * clients were removed, so the cache exists but is | |
983 | * non-functional and shouldn't be updated anymore. | |
984 | */ | |
985 | ib_unregister_event_handler(&device->cache.event_handler); | |
986 | flush_workqueue(ib_wq); | |
987 | gid_table_cleanup_one(device); | |
988 | } | |
1da177e4 | 989 | |
03db3a2d | 990 | void __init ib_cache_setup(void) |
1da177e4 | 991 | { |
03db3a2d | 992 | roce_gid_mgmt_init(); |
1da177e4 LT |
993 | } |
994 | ||
995 | void __exit ib_cache_cleanup(void) | |
996 | { | |
03db3a2d | 997 | roce_gid_mgmt_cleanup(); |
1da177e4 | 998 | } |