Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / drivers / infiniband / core / cache.c
1 /*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 #include <linux/netdevice.h>
41 #include <net/addrconf.h>
42
43 #include <rdma/ib_cache.h>
44
45 #include "core_priv.h"
46
47 struct ib_pkey_cache {
48 int table_len;
49 u16 table[0];
50 };
51
52 struct ib_update_work {
53 struct work_struct work;
54 struct ib_device *device;
55 u8 port_num;
56 };
57
58 union ib_gid zgid;
59 EXPORT_SYMBOL(zgid);
60
61 static const struct ib_gid_attr zattr;
62
63 enum gid_attr_find_mask {
64 GID_ATTR_FIND_MASK_GID = 1UL << 0,
65 GID_ATTR_FIND_MASK_NETDEV = 1UL << 1,
66 GID_ATTR_FIND_MASK_DEFAULT = 1UL << 2,
67 GID_ATTR_FIND_MASK_GID_TYPE = 1UL << 3,
68 };
69
70 enum gid_table_entry_props {
71 GID_TABLE_ENTRY_INVALID = 1UL << 0,
72 GID_TABLE_ENTRY_DEFAULT = 1UL << 1,
73 };
74
75 enum gid_table_write_action {
76 GID_TABLE_WRITE_ACTION_ADD,
77 GID_TABLE_WRITE_ACTION_DEL,
78 /* MODIFY only updates the GID table. Currently only used by
79 * ib_cache_update.
80 */
81 GID_TABLE_WRITE_ACTION_MODIFY
82 };
83
84 struct ib_gid_table_entry {
85 unsigned long props;
86 union ib_gid gid;
87 struct ib_gid_attr attr;
88 void *context;
89 };
90
91 struct ib_gid_table {
92 int sz;
93 /* In RoCE, adding a GID to the table requires:
94 * (a) Find if this GID is already exists.
95 * (b) Find a free space.
96 * (c) Write the new GID
97 *
98 * Delete requires different set of operations:
99 * (a) Find the GID
100 * (b) Delete it.
101 *
102 * Add/delete should be carried out atomically.
103 * This is done by locking this mutex from multiple
104 * writers. We don't need this lock for IB, as the MAD
105 * layer replaces all entries. All data_vec entries
106 * are locked by this lock.
107 **/
108 struct mutex lock;
109 /* This lock protects the table entries from being
110 * read and written simultaneously.
111 */
112 rwlock_t rwlock;
113 struct ib_gid_table_entry *data_vec;
114 };
115
116 static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
117 {
118 if (rdma_cap_roce_gid_table(ib_dev, port)) {
119 struct ib_event event;
120
121 event.device = ib_dev;
122 event.element.port_num = port;
123 event.event = IB_EVENT_GID_CHANGE;
124
125 ib_dispatch_event(&event);
126 }
127 }
128
129 static const char * const gid_type_str[] = {
130 [IB_GID_TYPE_IB] = "IB/RoCE v1",
131 [IB_GID_TYPE_ROCE_UDP_ENCAP] = "RoCE v2",
132 };
133
134 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
135 {
136 if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
137 return gid_type_str[gid_type];
138
139 return "Invalid GID type";
140 }
141 EXPORT_SYMBOL(ib_cache_gid_type_str);
142
143 int ib_cache_gid_parse_type_str(const char *buf)
144 {
145 unsigned int i;
146 size_t len;
147 int err = -EINVAL;
148
149 len = strlen(buf);
150 if (len == 0)
151 return -EINVAL;
152
153 if (buf[len - 1] == '\n')
154 len--;
155
156 for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
157 if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
158 len == strlen(gid_type_str[i])) {
159 err = i;
160 break;
161 }
162
163 return err;
164 }
165 EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
166
167 /* This function expects that rwlock will be write locked in all
168 * scenarios and that lock will be locked in sleep-able (RoCE)
169 * scenarios.
170 */
171 static int write_gid(struct ib_device *ib_dev, u8 port,
172 struct ib_gid_table *table, int ix,
173 const union ib_gid *gid,
174 const struct ib_gid_attr *attr,
175 enum gid_table_write_action action,
176 bool default_gid)
177 __releases(&table->rwlock) __acquires(&table->rwlock)
178 {
179 int ret = 0;
180 struct net_device *old_net_dev;
181
182 /* in rdma_cap_roce_gid_table, this funciton should be protected by a
183 * sleep-able lock.
184 */
185
186 if (rdma_cap_roce_gid_table(ib_dev, port)) {
187 table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
188 write_unlock_irq(&table->rwlock);
189 /* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
190 * RoCE providers and thus only updates the cache.
191 */
192 if (action == GID_TABLE_WRITE_ACTION_ADD)
193 ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr,
194 &table->data_vec[ix].context);
195 else if (action == GID_TABLE_WRITE_ACTION_DEL)
196 ret = ib_dev->del_gid(ib_dev, port, ix,
197 &table->data_vec[ix].context);
198 write_lock_irq(&table->rwlock);
199 }
200
201 old_net_dev = table->data_vec[ix].attr.ndev;
202 if (old_net_dev && old_net_dev != attr->ndev)
203 dev_put(old_net_dev);
204 /* if modify_gid failed, just delete the old gid */
205 if (ret || action == GID_TABLE_WRITE_ACTION_DEL) {
206 gid = &zgid;
207 attr = &zattr;
208 table->data_vec[ix].context = NULL;
209 }
210 if (default_gid)
211 table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
212 memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
213 memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
214 if (table->data_vec[ix].attr.ndev &&
215 table->data_vec[ix].attr.ndev != old_net_dev)
216 dev_hold(table->data_vec[ix].attr.ndev);
217
218 table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID;
219
220 return ret;
221 }
222
223 static int add_gid(struct ib_device *ib_dev, u8 port,
224 struct ib_gid_table *table, int ix,
225 const union ib_gid *gid,
226 const struct ib_gid_attr *attr,
227 bool default_gid) {
228 return write_gid(ib_dev, port, table, ix, gid, attr,
229 GID_TABLE_WRITE_ACTION_ADD, default_gid);
230 }
231
232 static int modify_gid(struct ib_device *ib_dev, u8 port,
233 struct ib_gid_table *table, int ix,
234 const union ib_gid *gid,
235 const struct ib_gid_attr *attr,
236 bool default_gid) {
237 return write_gid(ib_dev, port, table, ix, gid, attr,
238 GID_TABLE_WRITE_ACTION_MODIFY, default_gid);
239 }
240
241 static int del_gid(struct ib_device *ib_dev, u8 port,
242 struct ib_gid_table *table, int ix,
243 bool default_gid) {
244 return write_gid(ib_dev, port, table, ix, &zgid, &zattr,
245 GID_TABLE_WRITE_ACTION_DEL, default_gid);
246 }
247
248 /* rwlock should be read locked */
249 static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
250 const struct ib_gid_attr *val, bool default_gid,
251 unsigned long mask, int *pempty)
252 {
253 int i = 0;
254 int found = -1;
255 int empty = pempty ? -1 : 0;
256
257 while (i < table->sz && (found < 0 || empty < 0)) {
258 struct ib_gid_table_entry *data = &table->data_vec[i];
259 struct ib_gid_attr *attr = &data->attr;
260 int curr_index = i;
261
262 i++;
263
264 if (data->props & GID_TABLE_ENTRY_INVALID)
265 continue;
266
267 if (empty < 0)
268 if (!memcmp(&data->gid, &zgid, sizeof(*gid)) &&
269 !memcmp(attr, &zattr, sizeof(*attr)) &&
270 !data->props)
271 empty = curr_index;
272
273 if (found >= 0)
274 continue;
275
276 if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
277 attr->gid_type != val->gid_type)
278 continue;
279
280 if (mask & GID_ATTR_FIND_MASK_GID &&
281 memcmp(gid, &data->gid, sizeof(*gid)))
282 continue;
283
284 if (mask & GID_ATTR_FIND_MASK_NETDEV &&
285 attr->ndev != val->ndev)
286 continue;
287
288 if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
289 !!(data->props & GID_TABLE_ENTRY_DEFAULT) !=
290 default_gid)
291 continue;
292
293 found = curr_index;
294 }
295
296 if (pempty)
297 *pempty = empty;
298
299 return found;
300 }
301
302 static void make_default_gid(struct net_device *dev, union ib_gid *gid)
303 {
304 gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
305 addrconf_ifid_eui48(&gid->raw[8], dev);
306 }
307
308 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
309 union ib_gid *gid, struct ib_gid_attr *attr)
310 {
311 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
312 struct ib_gid_table *table;
313 int ix;
314 int ret = 0;
315 struct net_device *idev;
316 int empty;
317
318 table = ports_table[port - rdma_start_port(ib_dev)];
319
320 if (!memcmp(gid, &zgid, sizeof(*gid)))
321 return -EINVAL;
322
323 if (ib_dev->get_netdev) {
324 idev = ib_dev->get_netdev(ib_dev, port);
325 if (idev && attr->ndev != idev) {
326 union ib_gid default_gid;
327
328 /* Adding default GIDs in not permitted */
329 make_default_gid(idev, &default_gid);
330 if (!memcmp(gid, &default_gid, sizeof(*gid))) {
331 dev_put(idev);
332 return -EPERM;
333 }
334 }
335 if (idev)
336 dev_put(idev);
337 }
338
339 mutex_lock(&table->lock);
340 write_lock_irq(&table->rwlock);
341
342 ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID |
343 GID_ATTR_FIND_MASK_GID_TYPE |
344 GID_ATTR_FIND_MASK_NETDEV, &empty);
345 if (ix >= 0)
346 goto out_unlock;
347
348 if (empty < 0) {
349 ret = -ENOSPC;
350 goto out_unlock;
351 }
352
353 ret = add_gid(ib_dev, port, table, empty, gid, attr, false);
354 if (!ret)
355 dispatch_gid_change_event(ib_dev, port);
356
357 out_unlock:
358 write_unlock_irq(&table->rwlock);
359 mutex_unlock(&table->lock);
360 return ret;
361 }
362
363 int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
364 union ib_gid *gid, struct ib_gid_attr *attr)
365 {
366 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
367 struct ib_gid_table *table;
368 int ix;
369
370 table = ports_table[port - rdma_start_port(ib_dev)];
371
372 mutex_lock(&table->lock);
373 write_lock_irq(&table->rwlock);
374
375 ix = find_gid(table, gid, attr, false,
376 GID_ATTR_FIND_MASK_GID |
377 GID_ATTR_FIND_MASK_GID_TYPE |
378 GID_ATTR_FIND_MASK_NETDEV |
379 GID_ATTR_FIND_MASK_DEFAULT,
380 NULL);
381 if (ix < 0)
382 goto out_unlock;
383
384 if (!del_gid(ib_dev, port, table, ix, false))
385 dispatch_gid_change_event(ib_dev, port);
386
387 out_unlock:
388 write_unlock_irq(&table->rwlock);
389 mutex_unlock(&table->lock);
390 return 0;
391 }
392
393 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
394 struct net_device *ndev)
395 {
396 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
397 struct ib_gid_table *table;
398 int ix;
399 bool deleted = false;
400
401 table = ports_table[port - rdma_start_port(ib_dev)];
402
403 mutex_lock(&table->lock);
404 write_lock_irq(&table->rwlock);
405
406 for (ix = 0; ix < table->sz; ix++)
407 if (table->data_vec[ix].attr.ndev == ndev)
408 if (!del_gid(ib_dev, port, table, ix, false))
409 deleted = true;
410
411 write_unlock_irq(&table->rwlock);
412 mutex_unlock(&table->lock);
413
414 if (deleted)
415 dispatch_gid_change_event(ib_dev, port);
416
417 return 0;
418 }
419
420 static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
421 union ib_gid *gid, struct ib_gid_attr *attr)
422 {
423 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
424 struct ib_gid_table *table;
425
426 table = ports_table[port - rdma_start_port(ib_dev)];
427
428 if (index < 0 || index >= table->sz)
429 return -EINVAL;
430
431 if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
432 return -EAGAIN;
433
434 memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
435 if (attr) {
436 memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
437 if (attr->ndev)
438 dev_hold(attr->ndev);
439 }
440
441 return 0;
442 }
443
444 static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
445 const union ib_gid *gid,
446 const struct ib_gid_attr *val,
447 unsigned long mask,
448 u8 *port, u16 *index)
449 {
450 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
451 struct ib_gid_table *table;
452 u8 p;
453 int local_index;
454 unsigned long flags;
455
456 for (p = 0; p < ib_dev->phys_port_cnt; p++) {
457 table = ports_table[p];
458 read_lock_irqsave(&table->rwlock, flags);
459 local_index = find_gid(table, gid, val, false, mask, NULL);
460 if (local_index >= 0) {
461 if (index)
462 *index = local_index;
463 if (port)
464 *port = p + rdma_start_port(ib_dev);
465 read_unlock_irqrestore(&table->rwlock, flags);
466 return 0;
467 }
468 read_unlock_irqrestore(&table->rwlock, flags);
469 }
470
471 return -ENOENT;
472 }
473
474 static int ib_cache_gid_find(struct ib_device *ib_dev,
475 const union ib_gid *gid,
476 enum ib_gid_type gid_type,
477 struct net_device *ndev, u8 *port,
478 u16 *index)
479 {
480 unsigned long mask = GID_ATTR_FIND_MASK_GID |
481 GID_ATTR_FIND_MASK_GID_TYPE;
482 struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
483
484 if (ndev)
485 mask |= GID_ATTR_FIND_MASK_NETDEV;
486
487 return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
488 mask, port, index);
489 }
490
491 int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
492 const union ib_gid *gid,
493 enum ib_gid_type gid_type,
494 u8 port, struct net_device *ndev,
495 u16 *index)
496 {
497 int local_index;
498 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
499 struct ib_gid_table *table;
500 unsigned long mask = GID_ATTR_FIND_MASK_GID |
501 GID_ATTR_FIND_MASK_GID_TYPE;
502 struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
503 unsigned long flags;
504
505 if (port < rdma_start_port(ib_dev) ||
506 port > rdma_end_port(ib_dev))
507 return -ENOENT;
508
509 table = ports_table[port - rdma_start_port(ib_dev)];
510
511 if (ndev)
512 mask |= GID_ATTR_FIND_MASK_NETDEV;
513
514 read_lock_irqsave(&table->rwlock, flags);
515 local_index = find_gid(table, gid, &val, false, mask, NULL);
516 if (local_index >= 0) {
517 if (index)
518 *index = local_index;
519 read_unlock_irqrestore(&table->rwlock, flags);
520 return 0;
521 }
522
523 read_unlock_irqrestore(&table->rwlock, flags);
524 return -ENOENT;
525 }
526 EXPORT_SYMBOL(ib_find_cached_gid_by_port);
527
528 /**
529 * ib_find_gid_by_filter - Returns the GID table index where a specified
530 * GID value occurs
531 * @device: The device to query.
532 * @gid: The GID value to search for.
533 * @port_num: The port number of the device where the GID value could be
534 * searched.
535 * @filter: The filter function is executed on any matching GID in the table.
536 * If the filter function returns true, the corresponding index is returned,
537 * otherwise, we continue searching the GID table. It's guaranteed that
538 * while filter is executed, ndev field is valid and the structure won't
539 * change. filter is executed in an atomic context. filter must not be NULL.
540 * @index: The index into the cached GID table where the GID was found. This
541 * parameter may be NULL.
542 *
543 * ib_cache_gid_find_by_filter() searches for the specified GID value
544 * of which the filter function returns true in the port's GID table.
545 * This function is only supported on RoCE ports.
546 *
547 */
548 static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
549 const union ib_gid *gid,
550 u8 port,
551 bool (*filter)(const union ib_gid *,
552 const struct ib_gid_attr *,
553 void *),
554 void *context,
555 u16 *index)
556 {
557 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
558 struct ib_gid_table *table;
559 unsigned int i;
560 unsigned long flags;
561 bool found = false;
562
563 if (!ports_table)
564 return -EOPNOTSUPP;
565
566 if (port < rdma_start_port(ib_dev) ||
567 port > rdma_end_port(ib_dev) ||
568 !rdma_protocol_roce(ib_dev, port))
569 return -EPROTONOSUPPORT;
570
571 table = ports_table[port - rdma_start_port(ib_dev)];
572
573 read_lock_irqsave(&table->rwlock, flags);
574 for (i = 0; i < table->sz; i++) {
575 struct ib_gid_attr attr;
576
577 if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
578 goto next;
579
580 if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
581 goto next;
582
583 memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));
584
585 if (filter(gid, &attr, context))
586 found = true;
587
588 next:
589 if (found)
590 break;
591 }
592 read_unlock_irqrestore(&table->rwlock, flags);
593
594 if (!found)
595 return -ENOENT;
596
597 if (index)
598 *index = i;
599 return 0;
600 }
601
602 static struct ib_gid_table *alloc_gid_table(int sz)
603 {
604 struct ib_gid_table *table =
605 kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
606
607 if (!table)
608 return NULL;
609
610 table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
611 if (!table->data_vec)
612 goto err_free_table;
613
614 mutex_init(&table->lock);
615
616 table->sz = sz;
617 rwlock_init(&table->rwlock);
618
619 return table;
620
621 err_free_table:
622 kfree(table);
623 return NULL;
624 }
625
626 static void release_gid_table(struct ib_gid_table *table)
627 {
628 if (table) {
629 kfree(table->data_vec);
630 kfree(table);
631 }
632 }
633
634 static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
635 struct ib_gid_table *table)
636 {
637 int i;
638 bool deleted = false;
639
640 if (!table)
641 return;
642
643 write_lock_irq(&table->rwlock);
644 for (i = 0; i < table->sz; ++i) {
645 if (memcmp(&table->data_vec[i].gid, &zgid,
646 sizeof(table->data_vec[i].gid)))
647 if (!del_gid(ib_dev, port, table, i,
648 table->data_vec[i].props &
649 GID_ATTR_FIND_MASK_DEFAULT))
650 deleted = true;
651 }
652 write_unlock_irq(&table->rwlock);
653
654 if (deleted)
655 dispatch_gid_change_event(ib_dev, port);
656 }
657
658 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
659 struct net_device *ndev,
660 unsigned long gid_type_mask,
661 enum ib_cache_gid_default_mode mode)
662 {
663 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
664 union ib_gid gid;
665 struct ib_gid_attr gid_attr;
666 struct ib_gid_attr zattr_type = zattr;
667 struct ib_gid_table *table;
668 unsigned int gid_type;
669
670 table = ports_table[port - rdma_start_port(ib_dev)];
671
672 make_default_gid(ndev, &gid);
673 memset(&gid_attr, 0, sizeof(gid_attr));
674 gid_attr.ndev = ndev;
675
676 for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
677 int ix;
678 union ib_gid current_gid;
679 struct ib_gid_attr current_gid_attr = {};
680
681 if (1UL << gid_type & ~gid_type_mask)
682 continue;
683
684 gid_attr.gid_type = gid_type;
685
686 mutex_lock(&table->lock);
687 write_lock_irq(&table->rwlock);
688 ix = find_gid(table, NULL, &gid_attr, true,
689 GID_ATTR_FIND_MASK_GID_TYPE |
690 GID_ATTR_FIND_MASK_DEFAULT,
691 NULL);
692
693 /* Coudn't find default GID location */
694 WARN_ON(ix < 0);
695
696 zattr_type.gid_type = gid_type;
697
698 if (!__ib_cache_gid_get(ib_dev, port, ix,
699 &current_gid, &current_gid_attr) &&
700 mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
701 !memcmp(&gid, &current_gid, sizeof(gid)) &&
702 !memcmp(&gid_attr, &current_gid_attr, sizeof(gid_attr)))
703 goto release;
704
705 if (memcmp(&current_gid, &zgid, sizeof(current_gid)) ||
706 memcmp(&current_gid_attr, &zattr_type,
707 sizeof(current_gid_attr))) {
708 if (del_gid(ib_dev, port, table, ix, true)) {
709 pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
710 ix, gid.raw);
711 goto release;
712 } else {
713 dispatch_gid_change_event(ib_dev, port);
714 }
715 }
716
717 if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
718 if (add_gid(ib_dev, port, table, ix, &gid, &gid_attr, true))
719 pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
720 gid.raw);
721 else
722 dispatch_gid_change_event(ib_dev, port);
723 }
724
725 release:
726 if (current_gid_attr.ndev)
727 dev_put(current_gid_attr.ndev);
728 write_unlock_irq(&table->rwlock);
729 mutex_unlock(&table->lock);
730 }
731 }
732
733 static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
734 struct ib_gid_table *table)
735 {
736 unsigned int i;
737 unsigned long roce_gid_type_mask;
738 unsigned int num_default_gids;
739 unsigned int current_gid = 0;
740
741 roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
742 num_default_gids = hweight_long(roce_gid_type_mask);
743 for (i = 0; i < num_default_gids && i < table->sz; i++) {
744 struct ib_gid_table_entry *entry =
745 &table->data_vec[i];
746
747 entry->props |= GID_TABLE_ENTRY_DEFAULT;
748 current_gid = find_next_bit(&roce_gid_type_mask,
749 BITS_PER_LONG,
750 current_gid);
751 entry->attr.gid_type = current_gid++;
752 }
753
754 return 0;
755 }
756
757 static int _gid_table_setup_one(struct ib_device *ib_dev)
758 {
759 u8 port;
760 struct ib_gid_table **table;
761 int err = 0;
762
763 table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
764
765 if (!table) {
766 pr_warn("failed to allocate ib gid cache for %s\n",
767 ib_dev->name);
768 return -ENOMEM;
769 }
770
771 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
772 u8 rdma_port = port + rdma_start_port(ib_dev);
773
774 table[port] =
775 alloc_gid_table(
776 ib_dev->port_immutable[rdma_port].gid_tbl_len);
777 if (!table[port]) {
778 err = -ENOMEM;
779 goto rollback_table_setup;
780 }
781
782 err = gid_table_reserve_default(ib_dev,
783 port + rdma_start_port(ib_dev),
784 table[port]);
785 if (err)
786 goto rollback_table_setup;
787 }
788
789 ib_dev->cache.gid_cache = table;
790 return 0;
791
792 rollback_table_setup:
793 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
794 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
795 table[port]);
796 release_gid_table(table[port]);
797 }
798
799 kfree(table);
800 return err;
801 }
802
803 static void gid_table_release_one(struct ib_device *ib_dev)
804 {
805 struct ib_gid_table **table = ib_dev->cache.gid_cache;
806 u8 port;
807
808 if (!table)
809 return;
810
811 for (port = 0; port < ib_dev->phys_port_cnt; port++)
812 release_gid_table(table[port]);
813
814 kfree(table);
815 ib_dev->cache.gid_cache = NULL;
816 }
817
818 static void gid_table_cleanup_one(struct ib_device *ib_dev)
819 {
820 struct ib_gid_table **table = ib_dev->cache.gid_cache;
821 u8 port;
822
823 if (!table)
824 return;
825
826 for (port = 0; port < ib_dev->phys_port_cnt; port++)
827 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
828 table[port]);
829 }
830
831 static int gid_table_setup_one(struct ib_device *ib_dev)
832 {
833 int err;
834
835 err = _gid_table_setup_one(ib_dev);
836
837 if (err)
838 return err;
839
840 err = roce_rescan_device(ib_dev);
841
842 if (err) {
843 gid_table_cleanup_one(ib_dev);
844 gid_table_release_one(ib_dev);
845 }
846
847 return err;
848 }
849
850 int ib_get_cached_gid(struct ib_device *device,
851 u8 port_num,
852 int index,
853 union ib_gid *gid,
854 struct ib_gid_attr *gid_attr)
855 {
856 int res;
857 unsigned long flags;
858 struct ib_gid_table **ports_table = device->cache.gid_cache;
859 struct ib_gid_table *table = ports_table[port_num - rdma_start_port(device)];
860
861 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
862 return -EINVAL;
863
864 read_lock_irqsave(&table->rwlock, flags);
865 res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
866 read_unlock_irqrestore(&table->rwlock, flags);
867
868 return res;
869 }
870 EXPORT_SYMBOL(ib_get_cached_gid);
871
872 int ib_find_cached_gid(struct ib_device *device,
873 const union ib_gid *gid,
874 enum ib_gid_type gid_type,
875 struct net_device *ndev,
876 u8 *port_num,
877 u16 *index)
878 {
879 return ib_cache_gid_find(device, gid, gid_type, ndev, port_num, index);
880 }
881 EXPORT_SYMBOL(ib_find_cached_gid);
882
883 int ib_find_gid_by_filter(struct ib_device *device,
884 const union ib_gid *gid,
885 u8 port_num,
886 bool (*filter)(const union ib_gid *gid,
887 const struct ib_gid_attr *,
888 void *),
889 void *context, u16 *index)
890 {
891 /* Only RoCE GID table supports filter function */
892 if (!rdma_cap_roce_gid_table(device, port_num) && filter)
893 return -EPROTONOSUPPORT;
894
895 return ib_cache_gid_find_by_filter(device, gid,
896 port_num, filter,
897 context, index);
898 }
899 EXPORT_SYMBOL(ib_find_gid_by_filter);
900
901 int ib_get_cached_pkey(struct ib_device *device,
902 u8 port_num,
903 int index,
904 u16 *pkey)
905 {
906 struct ib_pkey_cache *cache;
907 unsigned long flags;
908 int ret = 0;
909
910 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
911 return -EINVAL;
912
913 read_lock_irqsave(&device->cache.lock, flags);
914
915 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
916
917 if (index < 0 || index >= cache->table_len)
918 ret = -EINVAL;
919 else
920 *pkey = cache->table[index];
921
922 read_unlock_irqrestore(&device->cache.lock, flags);
923
924 return ret;
925 }
926 EXPORT_SYMBOL(ib_get_cached_pkey);
927
928 int ib_find_cached_pkey(struct ib_device *device,
929 u8 port_num,
930 u16 pkey,
931 u16 *index)
932 {
933 struct ib_pkey_cache *cache;
934 unsigned long flags;
935 int i;
936 int ret = -ENOENT;
937 int partial_ix = -1;
938
939 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
940 return -EINVAL;
941
942 read_lock_irqsave(&device->cache.lock, flags);
943
944 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
945
946 *index = -1;
947
948 for (i = 0; i < cache->table_len; ++i)
949 if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
950 if (cache->table[i] & 0x8000) {
951 *index = i;
952 ret = 0;
953 break;
954 } else
955 partial_ix = i;
956 }
957
958 if (ret && partial_ix >= 0) {
959 *index = partial_ix;
960 ret = 0;
961 }
962
963 read_unlock_irqrestore(&device->cache.lock, flags);
964
965 return ret;
966 }
967 EXPORT_SYMBOL(ib_find_cached_pkey);
968
969 int ib_find_exact_cached_pkey(struct ib_device *device,
970 u8 port_num,
971 u16 pkey,
972 u16 *index)
973 {
974 struct ib_pkey_cache *cache;
975 unsigned long flags;
976 int i;
977 int ret = -ENOENT;
978
979 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
980 return -EINVAL;
981
982 read_lock_irqsave(&device->cache.lock, flags);
983
984 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
985
986 *index = -1;
987
988 for (i = 0; i < cache->table_len; ++i)
989 if (cache->table[i] == pkey) {
990 *index = i;
991 ret = 0;
992 break;
993 }
994
995 read_unlock_irqrestore(&device->cache.lock, flags);
996
997 return ret;
998 }
999 EXPORT_SYMBOL(ib_find_exact_cached_pkey);
1000
1001 int ib_get_cached_lmc(struct ib_device *device,
1002 u8 port_num,
1003 u8 *lmc)
1004 {
1005 unsigned long flags;
1006 int ret = 0;
1007
1008 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
1009 return -EINVAL;
1010
1011 read_lock_irqsave(&device->cache.lock, flags);
1012 *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
1013 read_unlock_irqrestore(&device->cache.lock, flags);
1014
1015 return ret;
1016 }
1017 EXPORT_SYMBOL(ib_get_cached_lmc);
1018
1019 static void ib_cache_update(struct ib_device *device,
1020 u8 port)
1021 {
1022 struct ib_port_attr *tprops = NULL;
1023 struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
1024 struct ib_gid_cache {
1025 int table_len;
1026 union ib_gid table[0];
1027 } *gid_cache = NULL;
1028 int i;
1029 int ret;
1030 struct ib_gid_table *table;
1031 struct ib_gid_table **ports_table = device->cache.gid_cache;
1032 bool use_roce_gid_table =
1033 rdma_cap_roce_gid_table(device, port);
1034
1035 if (port < rdma_start_port(device) || port > rdma_end_port(device))
1036 return;
1037
1038 table = ports_table[port - rdma_start_port(device)];
1039
1040 tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
1041 if (!tprops)
1042 return;
1043
1044 ret = ib_query_port(device, port, tprops);
1045 if (ret) {
1046 pr_warn("ib_query_port failed (%d) for %s\n",
1047 ret, device->name);
1048 goto err;
1049 }
1050
1051 pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
1052 sizeof *pkey_cache->table, GFP_KERNEL);
1053 if (!pkey_cache)
1054 goto err;
1055
1056 pkey_cache->table_len = tprops->pkey_tbl_len;
1057
1058 if (!use_roce_gid_table) {
1059 gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len *
1060 sizeof(*gid_cache->table), GFP_KERNEL);
1061 if (!gid_cache)
1062 goto err;
1063
1064 gid_cache->table_len = tprops->gid_tbl_len;
1065 }
1066
1067 for (i = 0; i < pkey_cache->table_len; ++i) {
1068 ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
1069 if (ret) {
1070 pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
1071 ret, device->name, i);
1072 goto err;
1073 }
1074 }
1075
1076 if (!use_roce_gid_table) {
1077 for (i = 0; i < gid_cache->table_len; ++i) {
1078 ret = ib_query_gid(device, port, i,
1079 gid_cache->table + i, NULL);
1080 if (ret) {
1081 pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
1082 ret, device->name, i);
1083 goto err;
1084 }
1085 }
1086 }
1087
1088 write_lock_irq(&device->cache.lock);
1089
1090 old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
1091
1092 device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
1093 if (!use_roce_gid_table) {
1094 write_lock(&table->rwlock);
1095 for (i = 0; i < gid_cache->table_len; i++) {
1096 modify_gid(device, port, table, i, gid_cache->table + i,
1097 &zattr, false);
1098 }
1099 write_unlock(&table->rwlock);
1100 }
1101
1102 device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
1103
1104 write_unlock_irq(&device->cache.lock);
1105
1106 kfree(gid_cache);
1107 kfree(old_pkey_cache);
1108 kfree(tprops);
1109 return;
1110
1111 err:
1112 kfree(pkey_cache);
1113 kfree(gid_cache);
1114 kfree(tprops);
1115 }
1116
1117 static void ib_cache_task(struct work_struct *_work)
1118 {
1119 struct ib_update_work *work =
1120 container_of(_work, struct ib_update_work, work);
1121
1122 ib_cache_update(work->device, work->port_num);
1123 kfree(work);
1124 }
1125
1126 static void ib_cache_event(struct ib_event_handler *handler,
1127 struct ib_event *event)
1128 {
1129 struct ib_update_work *work;
1130
1131 if (event->event == IB_EVENT_PORT_ERR ||
1132 event->event == IB_EVENT_PORT_ACTIVE ||
1133 event->event == IB_EVENT_LID_CHANGE ||
1134 event->event == IB_EVENT_PKEY_CHANGE ||
1135 event->event == IB_EVENT_SM_CHANGE ||
1136 event->event == IB_EVENT_CLIENT_REREGISTER ||
1137 event->event == IB_EVENT_GID_CHANGE) {
1138 work = kmalloc(sizeof *work, GFP_ATOMIC);
1139 if (work) {
1140 INIT_WORK(&work->work, ib_cache_task);
1141 work->device = event->device;
1142 work->port_num = event->element.port_num;
1143 queue_work(ib_wq, &work->work);
1144 }
1145 }
1146 }
1147
1148 int ib_cache_setup_one(struct ib_device *device)
1149 {
1150 int p;
1151 int err;
1152
1153 rwlock_init(&device->cache.lock);
1154
1155 device->cache.pkey_cache =
1156 kzalloc(sizeof *device->cache.pkey_cache *
1157 (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
1158 device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
1159 (rdma_end_port(device) -
1160 rdma_start_port(device) + 1),
1161 GFP_KERNEL);
1162 if (!device->cache.pkey_cache ||
1163 !device->cache.lmc_cache) {
1164 pr_warn("Couldn't allocate cache for %s\n", device->name);
1165 return -ENOMEM;
1166 }
1167
1168 err = gid_table_setup_one(device);
1169 if (err)
1170 /* Allocated memory will be cleaned in the release function */
1171 return err;
1172
1173 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1174 ib_cache_update(device, p + rdma_start_port(device));
1175
1176 INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
1177 device, ib_cache_event);
1178 err = ib_register_event_handler(&device->cache.event_handler);
1179 if (err)
1180 goto err;
1181
1182 return 0;
1183
1184 err:
1185 gid_table_cleanup_one(device);
1186 return err;
1187 }
1188
1189 void ib_cache_release_one(struct ib_device *device)
1190 {
1191 int p;
1192
1193 /*
1194 * The release function frees all the cache elements.
1195 * This function should be called as part of freeing
1196 * all the device's resources when the cache could no
1197 * longer be accessed.
1198 */
1199 if (device->cache.pkey_cache)
1200 for (p = 0;
1201 p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1202 kfree(device->cache.pkey_cache[p]);
1203
1204 gid_table_release_one(device);
1205 kfree(device->cache.pkey_cache);
1206 kfree(device->cache.lmc_cache);
1207 }
1208
1209 void ib_cache_cleanup_one(struct ib_device *device)
1210 {
1211 /* The cleanup function unregisters the event handler,
1212 * waits for all in-progress workqueue elements and cleans
1213 * up the GID cache. This function should be called after
1214 * the device was removed from the devices list and all
1215 * clients were removed, so the cache exists but is
1216 * non-functional and shouldn't be updated anymore.
1217 */
1218 ib_unregister_event_handler(&device->cache.event_handler);
1219 flush_workqueue(ib_wq);
1220 gid_table_cleanup_one(device);
1221 }
1222
1223 void __init ib_cache_setup(void)
1224 {
1225 roce_gid_mgmt_init();
1226 }
1227
1228 void __exit ib_cache_cleanup(void)
1229 {
1230 roce_gid_mgmt_cleanup();
1231 }
This page took 0.077276 seconds and 6 git commands to generate.