Merge remote-tracking branch 'staging/staging-next'
[deliverable/linux.git] / drivers / staging / lustre / lnet / klnds / socklnd / socklnd.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
19 *
20 * GPL HEADER END
21 */
22 /*
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Copyright (c) 2011, 2015, Intel Corporation.
27 */
28 /*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 *
32 * lnet/klnds/socklnd/socklnd.c
33 *
34 * Author: Zach Brown <zab@zabbo.net>
35 * Author: Peter J. Braam <braam@clusterfs.com>
36 * Author: Phil Schwan <phil@clusterfs.com>
37 * Author: Eric Barton <eric@bartonsoftware.com>
38 */
39
40 #include "socklnd.h"
41
42 static lnd_t the_ksocklnd;
43 struct ksock_nal_data ksocknal_data;
44
45 static struct ksock_interface *
46 ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip)
47 {
48 struct ksock_net *net = ni->ni_data;
49 int i;
50 struct ksock_interface *iface;
51
52 for (i = 0; i < net->ksnn_ninterfaces; i++) {
53 LASSERT(i < LNET_MAX_INTERFACES);
54 iface = &net->ksnn_interfaces[i];
55
56 if (iface->ksni_ipaddr == ip)
57 return iface;
58 }
59
60 return NULL;
61 }
62
63 static struct ksock_route *
64 ksocknal_create_route(__u32 ipaddr, int port)
65 {
66 struct ksock_route *route;
67
68 LIBCFS_ALLOC(route, sizeof(*route));
69 if (!route)
70 return NULL;
71
72 atomic_set(&route->ksnr_refcount, 1);
73 route->ksnr_peer = NULL;
74 route->ksnr_retry_interval = 0; /* OK to connect at any time */
75 route->ksnr_ipaddr = ipaddr;
76 route->ksnr_port = port;
77 route->ksnr_scheduled = 0;
78 route->ksnr_connecting = 0;
79 route->ksnr_connected = 0;
80 route->ksnr_deleted = 0;
81 route->ksnr_conn_count = 0;
82 route->ksnr_share_count = 0;
83
84 return route;
85 }
86
87 void
88 ksocknal_destroy_route(struct ksock_route *route)
89 {
90 LASSERT(!atomic_read(&route->ksnr_refcount));
91
92 if (route->ksnr_peer)
93 ksocknal_peer_decref(route->ksnr_peer);
94
95 LIBCFS_FREE(route, sizeof(*route));
96 }
97
98 static int
99 ksocknal_create_peer(struct ksock_peer **peerp, lnet_ni_t *ni, lnet_process_id_t id)
100 {
101 int cpt = lnet_cpt_of_nid(id.nid);
102 struct ksock_net *net = ni->ni_data;
103 struct ksock_peer *peer;
104
105 LASSERT(id.nid != LNET_NID_ANY);
106 LASSERT(id.pid != LNET_PID_ANY);
107 LASSERT(!in_interrupt());
108
109 LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
110 if (!peer)
111 return -ENOMEM;
112
113 peer->ksnp_ni = ni;
114 peer->ksnp_id = id;
115 atomic_set(&peer->ksnp_refcount, 1); /* 1 ref for caller */
116 peer->ksnp_closing = 0;
117 peer->ksnp_accepting = 0;
118 peer->ksnp_proto = NULL;
119 peer->ksnp_last_alive = 0;
120 peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
121
122 INIT_LIST_HEAD(&peer->ksnp_conns);
123 INIT_LIST_HEAD(&peer->ksnp_routes);
124 INIT_LIST_HEAD(&peer->ksnp_tx_queue);
125 INIT_LIST_HEAD(&peer->ksnp_zc_req_list);
126 spin_lock_init(&peer->ksnp_lock);
127
128 spin_lock_bh(&net->ksnn_lock);
129
130 if (net->ksnn_shutdown) {
131 spin_unlock_bh(&net->ksnn_lock);
132
133 LIBCFS_FREE(peer, sizeof(*peer));
134 CERROR("Can't create peer: network shutdown\n");
135 return -ESHUTDOWN;
136 }
137
138 net->ksnn_npeers++;
139
140 spin_unlock_bh(&net->ksnn_lock);
141
142 *peerp = peer;
143 return 0;
144 }
145
146 void
147 ksocknal_destroy_peer(struct ksock_peer *peer)
148 {
149 struct ksock_net *net = peer->ksnp_ni->ni_data;
150
151 CDEBUG(D_NET, "peer %s %p deleted\n",
152 libcfs_id2str(peer->ksnp_id), peer);
153
154 LASSERT(!atomic_read(&peer->ksnp_refcount));
155 LASSERT(!peer->ksnp_accepting);
156 LASSERT(list_empty(&peer->ksnp_conns));
157 LASSERT(list_empty(&peer->ksnp_routes));
158 LASSERT(list_empty(&peer->ksnp_tx_queue));
159 LASSERT(list_empty(&peer->ksnp_zc_req_list));
160
161 LIBCFS_FREE(peer, sizeof(*peer));
162
163 /*
164 * NB a peer's connections and routes keep a reference on their peer
165 * until they are destroyed, so we can be assured that _all_ state to
166 * do with this peer has been cleaned up when its refcount drops to
167 * zero.
168 */
169 spin_lock_bh(&net->ksnn_lock);
170 net->ksnn_npeers--;
171 spin_unlock_bh(&net->ksnn_lock);
172 }
173
174 struct ksock_peer *
175 ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id)
176 {
177 struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
178 struct list_head *tmp;
179 struct ksock_peer *peer;
180
181 list_for_each(tmp, peer_list) {
182 peer = list_entry(tmp, struct ksock_peer, ksnp_list);
183
184 LASSERT(!peer->ksnp_closing);
185
186 if (peer->ksnp_ni != ni)
187 continue;
188
189 if (peer->ksnp_id.nid != id.nid ||
190 peer->ksnp_id.pid != id.pid)
191 continue;
192
193 CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
194 peer, libcfs_id2str(id),
195 atomic_read(&peer->ksnp_refcount));
196 return peer;
197 }
198 return NULL;
199 }
200
201 struct ksock_peer *
202 ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id)
203 {
204 struct ksock_peer *peer;
205
206 read_lock(&ksocknal_data.ksnd_global_lock);
207 peer = ksocknal_find_peer_locked(ni, id);
208 if (peer) /* +1 ref for caller? */
209 ksocknal_peer_addref(peer);
210 read_unlock(&ksocknal_data.ksnd_global_lock);
211
212 return peer;
213 }
214
215 static void
216 ksocknal_unlink_peer_locked(struct ksock_peer *peer)
217 {
218 int i;
219 __u32 ip;
220 struct ksock_interface *iface;
221
222 for (i = 0; i < peer->ksnp_n_passive_ips; i++) {
223 LASSERT(i < LNET_MAX_INTERFACES);
224 ip = peer->ksnp_passive_ips[i];
225
226 iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
227 /*
228 * All IPs in peer->ksnp_passive_ips[] come from the
229 * interface list, therefore the call must succeed.
230 */
231 LASSERT(iface);
232
233 CDEBUG(D_NET, "peer=%p iface=%p ksni_nroutes=%d\n",
234 peer, iface, iface->ksni_nroutes);
235 iface->ksni_npeers--;
236 }
237
238 LASSERT(list_empty(&peer->ksnp_conns));
239 LASSERT(list_empty(&peer->ksnp_routes));
240 LASSERT(!peer->ksnp_closing);
241 peer->ksnp_closing = 1;
242 list_del(&peer->ksnp_list);
243 /* lose peerlist's ref */
244 ksocknal_peer_decref(peer);
245 }
246
247 static int
248 ksocknal_get_peer_info(lnet_ni_t *ni, int index,
249 lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
250 int *port, int *conn_count, int *share_count)
251 {
252 struct ksock_peer *peer;
253 struct list_head *ptmp;
254 struct ksock_route *route;
255 struct list_head *rtmp;
256 int i;
257 int j;
258 int rc = -ENOENT;
259
260 read_lock(&ksocknal_data.ksnd_global_lock);
261
262 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
263 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
264 peer = list_entry(ptmp, struct ksock_peer, ksnp_list);
265
266 if (peer->ksnp_ni != ni)
267 continue;
268
269 if (!peer->ksnp_n_passive_ips &&
270 list_empty(&peer->ksnp_routes)) {
271 if (index-- > 0)
272 continue;
273
274 *id = peer->ksnp_id;
275 *myip = 0;
276 *peer_ip = 0;
277 *port = 0;
278 *conn_count = 0;
279 *share_count = 0;
280 rc = 0;
281 goto out;
282 }
283
284 for (j = 0; j < peer->ksnp_n_passive_ips; j++) {
285 if (index-- > 0)
286 continue;
287
288 *id = peer->ksnp_id;
289 *myip = peer->ksnp_passive_ips[j];
290 *peer_ip = 0;
291 *port = 0;
292 *conn_count = 0;
293 *share_count = 0;
294 rc = 0;
295 goto out;
296 }
297
298 list_for_each(rtmp, &peer->ksnp_routes) {
299 if (index-- > 0)
300 continue;
301
302 route = list_entry(rtmp, struct ksock_route,
303 ksnr_list);
304
305 *id = peer->ksnp_id;
306 *myip = route->ksnr_myipaddr;
307 *peer_ip = route->ksnr_ipaddr;
308 *port = route->ksnr_port;
309 *conn_count = route->ksnr_conn_count;
310 *share_count = route->ksnr_share_count;
311 rc = 0;
312 goto out;
313 }
314 }
315 }
316 out:
317 read_unlock(&ksocknal_data.ksnd_global_lock);
318 return rc;
319 }
320
321 static void
322 ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn)
323 {
324 struct ksock_peer *peer = route->ksnr_peer;
325 int type = conn->ksnc_type;
326 struct ksock_interface *iface;
327
328 conn->ksnc_route = route;
329 ksocknal_route_addref(route);
330
331 if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
332 if (!route->ksnr_myipaddr) {
333 /* route wasn't bound locally yet (the initial route) */
334 CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
335 libcfs_id2str(peer->ksnp_id),
336 &route->ksnr_ipaddr,
337 &conn->ksnc_myipaddr);
338 } else {
339 CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h to %pI4h\n",
340 libcfs_id2str(peer->ksnp_id),
341 &route->ksnr_ipaddr,
342 &route->ksnr_myipaddr,
343 &conn->ksnc_myipaddr);
344
345 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
346 route->ksnr_myipaddr);
347 if (iface)
348 iface->ksni_nroutes--;
349 }
350 route->ksnr_myipaddr = conn->ksnc_myipaddr;
351 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
352 route->ksnr_myipaddr);
353 if (iface)
354 iface->ksni_nroutes++;
355 }
356
357 route->ksnr_connected |= (1 << type);
358 route->ksnr_conn_count++;
359
360 /*
361 * Successful connection => further attempts can
362 * proceed immediately
363 */
364 route->ksnr_retry_interval = 0;
365 }
366
367 static void
368 ksocknal_add_route_locked(struct ksock_peer *peer, struct ksock_route *route)
369 {
370 struct list_head *tmp;
371 struct ksock_conn *conn;
372 struct ksock_route *route2;
373
374 LASSERT(!peer->ksnp_closing);
375 LASSERT(!route->ksnr_peer);
376 LASSERT(!route->ksnr_scheduled);
377 LASSERT(!route->ksnr_connecting);
378 LASSERT(!route->ksnr_connected);
379
380 /* LASSERT(unique) */
381 list_for_each(tmp, &peer->ksnp_routes) {
382 route2 = list_entry(tmp, struct ksock_route, ksnr_list);
383
384 if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
385 CERROR("Duplicate route %s %pI4h\n",
386 libcfs_id2str(peer->ksnp_id),
387 &route->ksnr_ipaddr);
388 LBUG();
389 }
390 }
391
392 route->ksnr_peer = peer;
393 ksocknal_peer_addref(peer);
394 /* peer's routelist takes over my ref on 'route' */
395 list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
396
397 list_for_each(tmp, &peer->ksnp_conns) {
398 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
399
400 if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
401 continue;
402
403 ksocknal_associate_route_conn_locked(route, conn);
404 /* keep going (typed routes) */
405 }
406 }
407
408 static void
409 ksocknal_del_route_locked(struct ksock_route *route)
410 {
411 struct ksock_peer *peer = route->ksnr_peer;
412 struct ksock_interface *iface;
413 struct ksock_conn *conn;
414 struct list_head *ctmp;
415 struct list_head *cnxt;
416
417 LASSERT(!route->ksnr_deleted);
418
419 /* Close associated conns */
420 list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
421 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
422
423 if (conn->ksnc_route != route)
424 continue;
425
426 ksocknal_close_conn_locked(conn, 0);
427 }
428
429 if (route->ksnr_myipaddr) {
430 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
431 route->ksnr_myipaddr);
432 if (iface)
433 iface->ksni_nroutes--;
434 }
435
436 route->ksnr_deleted = 1;
437 list_del(&route->ksnr_list);
438 ksocknal_route_decref(route); /* drop peer's ref */
439
440 if (list_empty(&peer->ksnp_routes) &&
441 list_empty(&peer->ksnp_conns)) {
442 /*
443 * I've just removed the last route to a peer with no active
444 * connections
445 */
446 ksocknal_unlink_peer_locked(peer);
447 }
448 }
449
450 int
451 ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
452 {
453 struct list_head *tmp;
454 struct ksock_peer *peer;
455 struct ksock_peer *peer2;
456 struct ksock_route *route;
457 struct ksock_route *route2;
458 int rc;
459
460 if (id.nid == LNET_NID_ANY ||
461 id.pid == LNET_PID_ANY)
462 return -EINVAL;
463
464 /* Have a brand new peer ready... */
465 rc = ksocknal_create_peer(&peer, ni, id);
466 if (rc)
467 return rc;
468
469 route = ksocknal_create_route(ipaddr, port);
470 if (!route) {
471 ksocknal_peer_decref(peer);
472 return -ENOMEM;
473 }
474
475 write_lock_bh(&ksocknal_data.ksnd_global_lock);
476
477 /* always called with a ref on ni, so shutdown can't have started */
478 LASSERT(!((struct ksock_net *)ni->ni_data)->ksnn_shutdown);
479
480 peer2 = ksocknal_find_peer_locked(ni, id);
481 if (peer2) {
482 ksocknal_peer_decref(peer);
483 peer = peer2;
484 } else {
485 /* peer table takes my ref on peer */
486 list_add_tail(&peer->ksnp_list,
487 ksocknal_nid2peerlist(id.nid));
488 }
489
490 route2 = NULL;
491 list_for_each(tmp, &peer->ksnp_routes) {
492 route2 = list_entry(tmp, struct ksock_route, ksnr_list);
493
494 if (route2->ksnr_ipaddr == ipaddr)
495 break;
496
497 route2 = NULL;
498 }
499 if (!route2) {
500 ksocknal_add_route_locked(peer, route);
501 route->ksnr_share_count++;
502 } else {
503 ksocknal_route_decref(route);
504 route2->ksnr_share_count++;
505 }
506
507 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
508
509 return 0;
510 }
511
512 static void
513 ksocknal_del_peer_locked(struct ksock_peer *peer, __u32 ip)
514 {
515 struct ksock_conn *conn;
516 struct ksock_route *route;
517 struct list_head *tmp;
518 struct list_head *nxt;
519 int nshared;
520
521 LASSERT(!peer->ksnp_closing);
522
523 /* Extra ref prevents peer disappearing until I'm done with it */
524 ksocknal_peer_addref(peer);
525
526 list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
527 route = list_entry(tmp, struct ksock_route, ksnr_list);
528
529 /* no match */
530 if (!(!ip || route->ksnr_ipaddr == ip))
531 continue;
532
533 route->ksnr_share_count = 0;
534 /* This deletes associated conns too */
535 ksocknal_del_route_locked(route);
536 }
537
538 nshared = 0;
539 list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
540 route = list_entry(tmp, struct ksock_route, ksnr_list);
541 nshared += route->ksnr_share_count;
542 }
543
544 if (!nshared) {
545 /*
546 * remove everything else if there are no explicit entries
547 * left
548 */
549 list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
550 route = list_entry(tmp, struct ksock_route, ksnr_list);
551
552 /* we should only be removing auto-entries */
553 LASSERT(!route->ksnr_share_count);
554 ksocknal_del_route_locked(route);
555 }
556
557 list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
558 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
559
560 ksocknal_close_conn_locked(conn, 0);
561 }
562 }
563
564 ksocknal_peer_decref(peer);
565 /* NB peer unlinks itself when last conn/route is removed */
566 }
567
568 static int
569 ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
570 {
571 LIST_HEAD(zombies);
572 struct list_head *ptmp;
573 struct list_head *pnxt;
574 struct ksock_peer *peer;
575 int lo;
576 int hi;
577 int i;
578 int rc = -ENOENT;
579
580 write_lock_bh(&ksocknal_data.ksnd_global_lock);
581
582 if (id.nid != LNET_NID_ANY) {
583 lo = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
584 hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
585 } else {
586 lo = 0;
587 hi = ksocknal_data.ksnd_peer_hash_size - 1;
588 }
589
590 for (i = lo; i <= hi; i++) {
591 list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
592 peer = list_entry(ptmp, struct ksock_peer, ksnp_list);
593
594 if (peer->ksnp_ni != ni)
595 continue;
596
597 if (!((id.nid == LNET_NID_ANY || peer->ksnp_id.nid == id.nid) &&
598 (id.pid == LNET_PID_ANY || peer->ksnp_id.pid == id.pid)))
599 continue;
600
601 ksocknal_peer_addref(peer); /* a ref for me... */
602
603 ksocknal_del_peer_locked(peer, ip);
604
605 if (peer->ksnp_closing &&
606 !list_empty(&peer->ksnp_tx_queue)) {
607 LASSERT(list_empty(&peer->ksnp_conns));
608 LASSERT(list_empty(&peer->ksnp_routes));
609
610 list_splice_init(&peer->ksnp_tx_queue,
611 &zombies);
612 }
613
614 ksocknal_peer_decref(peer); /* ...till here */
615
616 rc = 0; /* matched! */
617 }
618 }
619
620 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
621
622 ksocknal_txlist_done(ni, &zombies, 1);
623
624 return rc;
625 }
626
627 static struct ksock_conn *
628 ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index)
629 {
630 struct ksock_peer *peer;
631 struct list_head *ptmp;
632 struct ksock_conn *conn;
633 struct list_head *ctmp;
634 int i;
635
636 read_lock(&ksocknal_data.ksnd_global_lock);
637
638 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
639 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
640 peer = list_entry(ptmp, struct ksock_peer, ksnp_list);
641
642 LASSERT(!peer->ksnp_closing);
643
644 if (peer->ksnp_ni != ni)
645 continue;
646
647 list_for_each(ctmp, &peer->ksnp_conns) {
648 if (index-- > 0)
649 continue;
650
651 conn = list_entry(ctmp, struct ksock_conn,
652 ksnc_list);
653 ksocknal_conn_addref(conn);
654 read_unlock(&ksocknal_data.ksnd_global_lock);
655 return conn;
656 }
657 }
658 }
659
660 read_unlock(&ksocknal_data.ksnd_global_lock);
661 return NULL;
662 }
663
664 static struct ksock_sched *
665 ksocknal_choose_scheduler_locked(unsigned int cpt)
666 {
667 struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
668 struct ksock_sched *sched;
669 int i;
670
671 LASSERT(info->ksi_nthreads > 0);
672
673 sched = &info->ksi_scheds[0];
674 /*
675 * NB: it's safe so far, but info->ksi_nthreads could be changed
676 * at runtime when we have dynamic LNet configuration, then we
677 * need to take care of this.
678 */
679 for (i = 1; i < info->ksi_nthreads; i++) {
680 if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns)
681 sched = &info->ksi_scheds[i];
682 }
683
684 return sched;
685 }
686
687 static int
688 ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs)
689 {
690 struct ksock_net *net = ni->ni_data;
691 int i;
692 int nip;
693
694 read_lock(&ksocknal_data.ksnd_global_lock);
695
696 nip = net->ksnn_ninterfaces;
697 LASSERT(nip <= LNET_MAX_INTERFACES);
698
699 /*
700 * Only offer interfaces for additional connections if I have
701 * more than one.
702 */
703 if (nip < 2) {
704 read_unlock(&ksocknal_data.ksnd_global_lock);
705 return 0;
706 }
707
708 for (i = 0; i < nip; i++) {
709 ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
710 LASSERT(ipaddrs[i]);
711 }
712
713 read_unlock(&ksocknal_data.ksnd_global_lock);
714 return nip;
715 }
716
717 static int
718 ksocknal_match_peerip(struct ksock_interface *iface, __u32 *ips, int nips)
719 {
720 int best_netmatch = 0;
721 int best_xor = 0;
722 int best = -1;
723 int this_xor;
724 int this_netmatch;
725 int i;
726
727 for (i = 0; i < nips; i++) {
728 if (!ips[i])
729 continue;
730
731 this_xor = ips[i] ^ iface->ksni_ipaddr;
732 this_netmatch = !(this_xor & iface->ksni_netmask) ? 1 : 0;
733
734 if (!(best < 0 ||
735 best_netmatch < this_netmatch ||
736 (best_netmatch == this_netmatch &&
737 best_xor > this_xor)))
738 continue;
739
740 best = i;
741 best_netmatch = this_netmatch;
742 best_xor = this_xor;
743 }
744
745 LASSERT(best >= 0);
746 return best;
747 }
748
749 static int
750 ksocknal_select_ips(struct ksock_peer *peer, __u32 *peerips, int n_peerips)
751 {
752 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
753 struct ksock_net *net = peer->ksnp_ni->ni_data;
754 struct ksock_interface *iface;
755 struct ksock_interface *best_iface;
756 int n_ips;
757 int i;
758 int j;
759 int k;
760 __u32 ip;
761 __u32 xor;
762 int this_netmatch;
763 int best_netmatch;
764 int best_npeers;
765
766 /*
767 * CAVEAT EMPTOR: We do all our interface matching with an
768 * exclusive hold of global lock at IRQ priority. We're only
769 * expecting to be dealing with small numbers of interfaces, so the
770 * O(n**3)-ness shouldn't matter
771 */
772 /*
773 * Also note that I'm not going to return more than n_peerips
774 * interfaces, even if I have more myself
775 */
776 write_lock_bh(global_lock);
777
778 LASSERT(n_peerips <= LNET_MAX_INTERFACES);
779 LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
780
781 /*
782 * Only match interfaces for additional connections
783 * if I have > 1 interface
784 */
785 n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
786 min(n_peerips, net->ksnn_ninterfaces);
787
788 for (i = 0; peer->ksnp_n_passive_ips < n_ips; i++) {
789 /* ^ yes really... */
790
791 /*
792 * If we have any new interfaces, first tick off all the
793 * peer IPs that match old interfaces, then choose new
794 * interfaces to match the remaining peer IPS.
795 * We don't forget interfaces we've stopped using; we might
796 * start using them again...
797 */
798 if (i < peer->ksnp_n_passive_ips) {
799 /* Old interface. */
800 ip = peer->ksnp_passive_ips[i];
801 best_iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
802
803 /* peer passive ips are kept up to date */
804 LASSERT(best_iface);
805 } else {
806 /* choose a new interface */
807 LASSERT(i == peer->ksnp_n_passive_ips);
808
809 best_iface = NULL;
810 best_netmatch = 0;
811 best_npeers = 0;
812
813 for (j = 0; j < net->ksnn_ninterfaces; j++) {
814 iface = &net->ksnn_interfaces[j];
815 ip = iface->ksni_ipaddr;
816
817 for (k = 0; k < peer->ksnp_n_passive_ips; k++)
818 if (peer->ksnp_passive_ips[k] == ip)
819 break;
820
821 if (k < peer->ksnp_n_passive_ips) /* using it already */
822 continue;
823
824 k = ksocknal_match_peerip(iface, peerips, n_peerips);
825 xor = ip ^ peerips[k];
826 this_netmatch = !(xor & iface->ksni_netmask) ? 1 : 0;
827
828 if (!(!best_iface ||
829 best_netmatch < this_netmatch ||
830 (best_netmatch == this_netmatch &&
831 best_npeers > iface->ksni_npeers)))
832 continue;
833
834 best_iface = iface;
835 best_netmatch = this_netmatch;
836 best_npeers = iface->ksni_npeers;
837 }
838
839 LASSERT(best_iface);
840
841 best_iface->ksni_npeers++;
842 ip = best_iface->ksni_ipaddr;
843 peer->ksnp_passive_ips[i] = ip;
844 peer->ksnp_n_passive_ips = i + 1;
845 }
846
847 /* mark the best matching peer IP used */
848 j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
849 peerips[j] = 0;
850 }
851
852 /* Overwrite input peer IP addresses */
853 memcpy(peerips, peer->ksnp_passive_ips, n_ips * sizeof(*peerips));
854
855 write_unlock_bh(global_lock);
856
857 return n_ips;
858 }
859
860 static void
861 ksocknal_create_routes(struct ksock_peer *peer, int port,
862 __u32 *peer_ipaddrs, int npeer_ipaddrs)
863 {
864 struct ksock_route *newroute = NULL;
865 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
866 lnet_ni_t *ni = peer->ksnp_ni;
867 struct ksock_net *net = ni->ni_data;
868 struct list_head *rtmp;
869 struct ksock_route *route;
870 struct ksock_interface *iface;
871 struct ksock_interface *best_iface;
872 int best_netmatch;
873 int this_netmatch;
874 int best_nroutes;
875 int i;
876 int j;
877
878 /*
879 * CAVEAT EMPTOR: We do all our interface matching with an
880 * exclusive hold of global lock at IRQ priority. We're only
881 * expecting to be dealing with small numbers of interfaces, so the
882 * O(n**3)-ness here shouldn't matter
883 */
884 write_lock_bh(global_lock);
885
886 if (net->ksnn_ninterfaces < 2) {
887 /*
888 * Only create additional connections
889 * if I have > 1 interface
890 */
891 write_unlock_bh(global_lock);
892 return;
893 }
894
895 LASSERT(npeer_ipaddrs <= LNET_MAX_INTERFACES);
896
897 for (i = 0; i < npeer_ipaddrs; i++) {
898 if (newroute) {
899 newroute->ksnr_ipaddr = peer_ipaddrs[i];
900 } else {
901 write_unlock_bh(global_lock);
902
903 newroute = ksocknal_create_route(peer_ipaddrs[i], port);
904 if (!newroute)
905 return;
906
907 write_lock_bh(global_lock);
908 }
909
910 if (peer->ksnp_closing) {
911 /* peer got closed under me */
912 break;
913 }
914
915 /* Already got a route? */
916 route = NULL;
917 list_for_each(rtmp, &peer->ksnp_routes) {
918 route = list_entry(rtmp, struct ksock_route, ksnr_list);
919
920 if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
921 break;
922
923 route = NULL;
924 }
925 if (route)
926 continue;
927
928 best_iface = NULL;
929 best_nroutes = 0;
930 best_netmatch = 0;
931
932 LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
933
934 /* Select interface to connect from */
935 for (j = 0; j < net->ksnn_ninterfaces; j++) {
936 iface = &net->ksnn_interfaces[j];
937
938 /* Using this interface already? */
939 list_for_each(rtmp, &peer->ksnp_routes) {
940 route = list_entry(rtmp, struct ksock_route,
941 ksnr_list);
942
943 if (route->ksnr_myipaddr == iface->ksni_ipaddr)
944 break;
945
946 route = NULL;
947 }
948 if (route)
949 continue;
950
951 this_netmatch = (!((iface->ksni_ipaddr ^
952 newroute->ksnr_ipaddr) &
953 iface->ksni_netmask)) ? 1 : 0;
954
955 if (!(!best_iface ||
956 best_netmatch < this_netmatch ||
957 (best_netmatch == this_netmatch &&
958 best_nroutes > iface->ksni_nroutes)))
959 continue;
960
961 best_iface = iface;
962 best_netmatch = this_netmatch;
963 best_nroutes = iface->ksni_nroutes;
964 }
965
966 if (!best_iface)
967 continue;
968
969 newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
970 best_iface->ksni_nroutes++;
971
972 ksocknal_add_route_locked(peer, newroute);
973 newroute = NULL;
974 }
975
976 write_unlock_bh(global_lock);
977 if (newroute)
978 ksocknal_route_decref(newroute);
979 }
980
981 int
982 ksocknal_accept(lnet_ni_t *ni, struct socket *sock)
983 {
984 struct ksock_connreq *cr;
985 int rc;
986 __u32 peer_ip;
987 int peer_port;
988
989 rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port);
990 LASSERT(!rc); /* we succeeded before */
991
992 LIBCFS_ALLOC(cr, sizeof(*cr));
993 if (!cr) {
994 LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from %pI4h: memory exhausted\n",
995 &peer_ip);
996 return -ENOMEM;
997 }
998
999 lnet_ni_addref(ni);
1000 cr->ksncr_ni = ni;
1001 cr->ksncr_sock = sock;
1002
1003 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
1004
1005 list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
1006 wake_up(&ksocknal_data.ksnd_connd_waitq);
1007
1008 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
1009 return 0;
1010 }
1011
1012 static int
1013 ksocknal_connecting(struct ksock_peer *peer, __u32 ipaddr)
1014 {
1015 struct ksock_route *route;
1016
1017 list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) {
1018 if (route->ksnr_ipaddr == ipaddr)
1019 return route->ksnr_connecting;
1020 }
1021 return 0;
1022 }
1023
1024 int
1025 ksocknal_create_conn(lnet_ni_t *ni, struct ksock_route *route,
1026 struct socket *sock, int type)
1027 {
1028 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
1029 LIST_HEAD(zombies);
1030 lnet_process_id_t peerid;
1031 struct list_head *tmp;
1032 __u64 incarnation;
1033 struct ksock_conn *conn;
1034 struct ksock_conn *conn2;
1035 struct ksock_peer *peer = NULL;
1036 struct ksock_peer *peer2;
1037 struct ksock_sched *sched;
1038 ksock_hello_msg_t *hello;
1039 int cpt;
1040 struct ksock_tx *tx;
1041 struct ksock_tx *txtmp;
1042 int rc;
1043 int active;
1044 char *warn = NULL;
1045
1046 active = !!route;
1047
1048 LASSERT(active == (type != SOCKLND_CONN_NONE));
1049
1050 LIBCFS_ALLOC(conn, sizeof(*conn));
1051 if (!conn) {
1052 rc = -ENOMEM;
1053 goto failed_0;
1054 }
1055
1056 conn->ksnc_peer = NULL;
1057 conn->ksnc_route = NULL;
1058 conn->ksnc_sock = sock;
1059 /*
1060 * 2 ref, 1 for conn, another extra ref prevents socket
1061 * being closed before establishment of connection
1062 */
1063 atomic_set(&conn->ksnc_sock_refcount, 2);
1064 conn->ksnc_type = type;
1065 ksocknal_lib_save_callback(sock, conn);
1066 atomic_set(&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
1067
1068 conn->ksnc_rx_ready = 0;
1069 conn->ksnc_rx_scheduled = 0;
1070
1071 INIT_LIST_HEAD(&conn->ksnc_tx_queue);
1072 conn->ksnc_tx_ready = 0;
1073 conn->ksnc_tx_scheduled = 0;
1074 conn->ksnc_tx_carrier = NULL;
1075 atomic_set(&conn->ksnc_tx_nob, 0);
1076
1077 LIBCFS_ALLOC(hello, offsetof(ksock_hello_msg_t,
1078 kshm_ips[LNET_MAX_INTERFACES]));
1079 if (!hello) {
1080 rc = -ENOMEM;
1081 goto failed_1;
1082 }
1083
1084 /* stash conn's local and remote addrs */
1085 rc = ksocknal_lib_get_conn_addrs(conn);
1086 if (rc)
1087 goto failed_1;
1088
1089 /*
1090 * Find out/confirm peer's NID and connection type and get the
1091 * vector of interfaces she's willing to let me connect to.
1092 * Passive connections use the listener timeout since the peer sends
1093 * eagerly
1094 */
1095 if (active) {
1096 peer = route->ksnr_peer;
1097 LASSERT(ni == peer->ksnp_ni);
1098
1099 /* Active connection sends HELLO eagerly */
1100 hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
1101 peerid = peer->ksnp_id;
1102
1103 write_lock_bh(global_lock);
1104 conn->ksnc_proto = peer->ksnp_proto;
1105 write_unlock_bh(global_lock);
1106
1107 if (!conn->ksnc_proto) {
1108 conn->ksnc_proto = &ksocknal_protocol_v3x;
1109 #if SOCKNAL_VERSION_DEBUG
1110 if (*ksocknal_tunables.ksnd_protocol == 2)
1111 conn->ksnc_proto = &ksocknal_protocol_v2x;
1112 else if (*ksocknal_tunables.ksnd_protocol == 1)
1113 conn->ksnc_proto = &ksocknal_protocol_v1x;
1114 #endif
1115 }
1116
1117 rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
1118 if (rc)
1119 goto failed_1;
1120 } else {
1121 peerid.nid = LNET_NID_ANY;
1122 peerid.pid = LNET_PID_ANY;
1123
1124 /* Passive, get protocol from peer */
1125 conn->ksnc_proto = NULL;
1126 }
1127
1128 rc = ksocknal_recv_hello(ni, conn, hello, &peerid, &incarnation);
1129 if (rc < 0)
1130 goto failed_1;
1131
1132 LASSERT(!rc || active);
1133 LASSERT(conn->ksnc_proto);
1134 LASSERT(peerid.nid != LNET_NID_ANY);
1135
1136 cpt = lnet_cpt_of_nid(peerid.nid);
1137
1138 if (active) {
1139 ksocknal_peer_addref(peer);
1140 write_lock_bh(global_lock);
1141 } else {
1142 rc = ksocknal_create_peer(&peer, ni, peerid);
1143 if (rc)
1144 goto failed_1;
1145
1146 write_lock_bh(global_lock);
1147
1148 /* called with a ref on ni, so shutdown can't have started */
1149 LASSERT(!((struct ksock_net *)ni->ni_data)->ksnn_shutdown);
1150
1151 peer2 = ksocknal_find_peer_locked(ni, peerid);
1152 if (!peer2) {
1153 /*
1154 * NB this puts an "empty" peer in the peer
1155 * table (which takes my ref)
1156 */
1157 list_add_tail(&peer->ksnp_list,
1158 ksocknal_nid2peerlist(peerid.nid));
1159 } else {
1160 ksocknal_peer_decref(peer);
1161 peer = peer2;
1162 }
1163
1164 /* +1 ref for me */
1165 ksocknal_peer_addref(peer);
1166 peer->ksnp_accepting++;
1167
1168 /*
1169 * Am I already connecting to this guy? Resolve in
1170 * favour of higher NID...
1171 */
1172 if (peerid.nid < ni->ni_nid &&
1173 ksocknal_connecting(peer, conn->ksnc_ipaddr)) {
1174 rc = EALREADY;
1175 warn = "connection race resolution";
1176 goto failed_2;
1177 }
1178 }
1179
1180 if (peer->ksnp_closing ||
1181 (active && route->ksnr_deleted)) {
1182 /* peer/route got closed under me */
1183 rc = -ESTALE;
1184 warn = "peer/route removed";
1185 goto failed_2;
1186 }
1187
1188 if (!peer->ksnp_proto) {
1189 /*
1190 * Never connected before.
1191 * NB recv_hello may have returned EPROTO to signal my peer
1192 * wants a different protocol than the one I asked for.
1193 */
1194 LASSERT(list_empty(&peer->ksnp_conns));
1195
1196 peer->ksnp_proto = conn->ksnc_proto;
1197 peer->ksnp_incarnation = incarnation;
1198 }
1199
1200 if (peer->ksnp_proto != conn->ksnc_proto ||
1201 peer->ksnp_incarnation != incarnation) {
1202 /* Peer rebooted or I've got the wrong protocol version */
1203 ksocknal_close_peer_conns_locked(peer, 0, 0);
1204
1205 peer->ksnp_proto = NULL;
1206 rc = ESTALE;
1207 warn = peer->ksnp_incarnation != incarnation ?
1208 "peer rebooted" :
1209 "wrong proto version";
1210 goto failed_2;
1211 }
1212
1213 switch (rc) {
1214 default:
1215 LBUG();
1216 case 0:
1217 break;
1218 case EALREADY:
1219 warn = "lost conn race";
1220 goto failed_2;
1221 case EPROTO:
1222 warn = "retry with different protocol version";
1223 goto failed_2;
1224 }
1225
1226 /*
1227 * Refuse to duplicate an existing connection, unless this is a
1228 * loopback connection
1229 */
1230 if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
1231 list_for_each(tmp, &peer->ksnp_conns) {
1232 conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
1233
1234 if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
1235 conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
1236 conn2->ksnc_type != conn->ksnc_type)
1237 continue;
1238
1239 /*
1240 * Reply on a passive connection attempt so the peer
1241 * realises we're connected.
1242 */
1243 LASSERT(!rc);
1244 if (!active)
1245 rc = EALREADY;
1246
1247 warn = "duplicate";
1248 goto failed_2;
1249 }
1250 }
1251
1252 /*
1253 * If the connection created by this route didn't bind to the IP
1254 * address the route connected to, the connection/route matching
1255 * code below probably isn't going to work.
1256 */
1257 if (active &&
1258 route->ksnr_ipaddr != conn->ksnc_ipaddr) {
1259 CERROR("Route %s %pI4h connected to %pI4h\n",
1260 libcfs_id2str(peer->ksnp_id),
1261 &route->ksnr_ipaddr,
1262 &conn->ksnc_ipaddr);
1263 }
1264
1265 /*
1266 * Search for a route corresponding to the new connection and
1267 * create an association. This allows incoming connections created
1268 * by routes in my peer to match my own route entries so I don't
1269 * continually create duplicate routes.
1270 */
1271 list_for_each(tmp, &peer->ksnp_routes) {
1272 route = list_entry(tmp, struct ksock_route, ksnr_list);
1273
1274 if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
1275 continue;
1276
1277 ksocknal_associate_route_conn_locked(route, conn);
1278 break;
1279 }
1280
1281 conn->ksnc_peer = peer; /* conn takes my ref on peer */
1282 peer->ksnp_last_alive = cfs_time_current();
1283 peer->ksnp_send_keepalive = 0;
1284 peer->ksnp_error = 0;
1285
1286 sched = ksocknal_choose_scheduler_locked(cpt);
1287 sched->kss_nconns++;
1288 conn->ksnc_scheduler = sched;
1289
1290 conn->ksnc_tx_last_post = cfs_time_current();
1291 /* Set the deadline for the outgoing HELLO to drain */
1292 conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
1293 conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1294 mb(); /* order with adding to peer's conn list */
1295
1296 list_add(&conn->ksnc_list, &peer->ksnp_conns);
1297 ksocknal_conn_addref(conn);
1298
1299 ksocknal_new_packet(conn, 0);
1300
1301 conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
1302
1303 /* Take packets blocking for this connection. */
1304 list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
1305 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) == SOCKNAL_MATCH_NO)
1306 continue;
1307
1308 list_del(&tx->tx_list);
1309 ksocknal_queue_tx_locked(tx, conn);
1310 }
1311
1312 write_unlock_bh(global_lock);
1313
1314 /*
1315 * We've now got a new connection. Any errors from here on are just
1316 * like "normal" comms errors and we close the connection normally.
1317 * NB (a) we still have to send the reply HELLO for passive
1318 * connections,
1319 * (b) normal I/O on the conn is blocked until I setup and call the
1320 * socket callbacks.
1321 */
1322 CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d incarnation:%lld sched[%d:%d]\n",
1323 libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
1324 &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
1325 conn->ksnc_port, incarnation, cpt,
1326 (int)(sched - &sched->kss_info->ksi_scheds[0]));
1327
1328 if (active) {
1329 /* additional routes after interface exchange? */
1330 ksocknal_create_routes(peer, conn->ksnc_port,
1331 hello->kshm_ips, hello->kshm_nips);
1332 } else {
1333 hello->kshm_nips = ksocknal_select_ips(peer, hello->kshm_ips,
1334 hello->kshm_nips);
1335 rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
1336 }
1337
1338 LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
1339 kshm_ips[LNET_MAX_INTERFACES]));
1340
1341 /*
1342 * setup the socket AFTER I've received hello (it disables
1343 * SO_LINGER). I might call back to the acceptor who may want
1344 * to send a protocol version response and then close the
1345 * socket; this ensures the socket only tears down after the
1346 * response has been sent.
1347 */
1348 if (!rc)
1349 rc = ksocknal_lib_setup_sock(sock);
1350
1351 write_lock_bh(global_lock);
1352
1353 /* NB my callbacks block while I hold ksnd_global_lock */
1354 ksocknal_lib_set_callback(sock, conn);
1355
1356 if (!active)
1357 peer->ksnp_accepting--;
1358
1359 write_unlock_bh(global_lock);
1360
1361 if (rc) {
1362 write_lock_bh(global_lock);
1363 if (!conn->ksnc_closing) {
1364 /* could be closed by another thread */
1365 ksocknal_close_conn_locked(conn, rc);
1366 }
1367 write_unlock_bh(global_lock);
1368 } else if (!ksocknal_connsock_addref(conn)) {
1369 /* Allow I/O to proceed. */
1370 ksocknal_read_callback(conn);
1371 ksocknal_write_callback(conn);
1372 ksocknal_connsock_decref(conn);
1373 }
1374
1375 ksocknal_connsock_decref(conn);
1376 ksocknal_conn_decref(conn);
1377 return rc;
1378
1379 failed_2:
1380 if (!peer->ksnp_closing &&
1381 list_empty(&peer->ksnp_conns) &&
1382 list_empty(&peer->ksnp_routes)) {
1383 list_add(&zombies, &peer->ksnp_tx_queue);
1384 list_del_init(&peer->ksnp_tx_queue);
1385 ksocknal_unlink_peer_locked(peer);
1386 }
1387
1388 write_unlock_bh(global_lock);
1389
1390 if (warn) {
1391 if (rc < 0)
1392 CERROR("Not creating conn %s type %d: %s\n",
1393 libcfs_id2str(peerid), conn->ksnc_type, warn);
1394 else
1395 CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
1396 libcfs_id2str(peerid), conn->ksnc_type, warn);
1397 }
1398
1399 if (!active) {
1400 if (rc > 0) {
1401 /*
1402 * Request retry by replying with CONN_NONE
1403 * ksnc_proto has been set already
1404 */
1405 conn->ksnc_type = SOCKLND_CONN_NONE;
1406 hello->kshm_nips = 0;
1407 ksocknal_send_hello(ni, conn, peerid.nid, hello);
1408 }
1409
1410 write_lock_bh(global_lock);
1411 peer->ksnp_accepting--;
1412 write_unlock_bh(global_lock);
1413 }
1414
1415 ksocknal_txlist_done(ni, &zombies, 1);
1416 ksocknal_peer_decref(peer);
1417
1418 failed_1:
1419 if (hello)
1420 LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
1421 kshm_ips[LNET_MAX_INTERFACES]));
1422
1423 LIBCFS_FREE(conn, sizeof(*conn));
1424
1425 failed_0:
1426 sock_release(sock);
1427 return rc;
1428 }
1429
1430 void
1431 ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
1432 {
1433 /*
1434 * This just does the immmediate housekeeping, and queues the
1435 * connection for the reaper to terminate.
1436 * Caller holds ksnd_global_lock exclusively in irq context
1437 */
1438 struct ksock_peer *peer = conn->ksnc_peer;
1439 struct ksock_route *route;
1440 struct ksock_conn *conn2;
1441 struct list_head *tmp;
1442
1443 LASSERT(!peer->ksnp_error);
1444 LASSERT(!conn->ksnc_closing);
1445 conn->ksnc_closing = 1;
1446
1447 /* ksnd_deathrow_conns takes over peer's ref */
1448 list_del(&conn->ksnc_list);
1449
1450 route = conn->ksnc_route;
1451 if (route) {
1452 /* dissociate conn from route... */
1453 LASSERT(!route->ksnr_deleted);
1454 LASSERT(route->ksnr_connected & (1 << conn->ksnc_type));
1455
1456 conn2 = NULL;
1457 list_for_each(tmp, &peer->ksnp_conns) {
1458 conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
1459
1460 if (conn2->ksnc_route == route &&
1461 conn2->ksnc_type == conn->ksnc_type)
1462 break;
1463
1464 conn2 = NULL;
1465 }
1466 if (!conn2)
1467 route->ksnr_connected &= ~(1 << conn->ksnc_type);
1468
1469 conn->ksnc_route = NULL;
1470
1471 ksocknal_route_decref(route); /* drop conn's ref on route */
1472 }
1473
1474 if (list_empty(&peer->ksnp_conns)) {
1475 /* No more connections to this peer */
1476
1477 if (!list_empty(&peer->ksnp_tx_queue)) {
1478 struct ksock_tx *tx;
1479
1480 LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
1481
1482 /*
1483 * throw them to the last connection...,
1484 * these TXs will be send to /dev/null by scheduler
1485 */
1486 list_for_each_entry(tx, &peer->ksnp_tx_queue,
1487 tx_list)
1488 ksocknal_tx_prep(conn, tx);
1489
1490 spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
1491 list_splice_init(&peer->ksnp_tx_queue,
1492 &conn->ksnc_tx_queue);
1493 spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1494 }
1495
1496 peer->ksnp_proto = NULL; /* renegotiate protocol version */
1497 peer->ksnp_error = error; /* stash last conn close reason */
1498
1499 if (list_empty(&peer->ksnp_routes)) {
1500 /*
1501 * I've just closed last conn belonging to a
1502 * peer with no routes to it
1503 */
1504 ksocknal_unlink_peer_locked(peer);
1505 }
1506 }
1507
1508 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1509
1510 list_add_tail(&conn->ksnc_list,
1511 &ksocknal_data.ksnd_deathrow_conns);
1512 wake_up(&ksocknal_data.ksnd_reaper_waitq);
1513
1514 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1515 }
1516
1517 void
1518 ksocknal_peer_failed(struct ksock_peer *peer)
1519 {
1520 int notify = 0;
1521 unsigned long last_alive = 0;
1522
1523 /*
1524 * There has been a connection failure or comms error; but I'll only
1525 * tell LNET I think the peer is dead if it's to another kernel and
1526 * there are no connections or connection attempts in existence.
1527 */
1528 read_lock(&ksocknal_data.ksnd_global_lock);
1529
1530 if (!(peer->ksnp_id.pid & LNET_PID_USERFLAG) &&
1531 list_empty(&peer->ksnp_conns) &&
1532 !peer->ksnp_accepting &&
1533 !ksocknal_find_connecting_route_locked(peer)) {
1534 notify = 1;
1535 last_alive = peer->ksnp_last_alive;
1536 }
1537
1538 read_unlock(&ksocknal_data.ksnd_global_lock);
1539
1540 if (notify)
1541 lnet_notify(peer->ksnp_ni, peer->ksnp_id.nid, 0,
1542 last_alive);
1543 }
1544
1545 void
1546 ksocknal_finalize_zcreq(struct ksock_conn *conn)
1547 {
1548 struct ksock_peer *peer = conn->ksnc_peer;
1549 struct ksock_tx *tx;
1550 struct ksock_tx *temp;
1551 struct ksock_tx *tmp;
1552 LIST_HEAD(zlist);
1553
1554 /*
1555 * NB safe to finalize TXs because closing of socket will
1556 * abort all buffered data
1557 */
1558 LASSERT(!conn->ksnc_sock);
1559
1560 spin_lock(&peer->ksnp_lock);
1561
1562 list_for_each_entry_safe(tx, tmp, &peer->ksnp_zc_req_list, tx_zc_list) {
1563 if (tx->tx_conn != conn)
1564 continue;
1565
1566 LASSERT(tx->tx_msg.ksm_zc_cookies[0]);
1567
1568 tx->tx_msg.ksm_zc_cookies[0] = 0;
1569 tx->tx_zc_aborted = 1; /* mark it as not-acked */
1570 list_del(&tx->tx_zc_list);
1571 list_add(&tx->tx_zc_list, &zlist);
1572 }
1573
1574 spin_unlock(&peer->ksnp_lock);
1575
1576 list_for_each_entry_safe(tx, temp, &zlist, tx_zc_list) {
1577 list_del(&tx->tx_zc_list);
1578 ksocknal_tx_decref(tx);
1579 }
1580 }
1581
1582 void
1583 ksocknal_terminate_conn(struct ksock_conn *conn)
1584 {
1585 /*
1586 * This gets called by the reaper (guaranteed thread context) to
1587 * disengage the socket from its callbacks and close it.
1588 * ksnc_refcount will eventually hit zero, and then the reaper will
1589 * destroy it.
1590 */
1591 struct ksock_peer *peer = conn->ksnc_peer;
1592 struct ksock_sched *sched = conn->ksnc_scheduler;
1593 int failed = 0;
1594
1595 LASSERT(conn->ksnc_closing);
1596
1597 /* wake up the scheduler to "send" all remaining packets to /dev/null */
1598 spin_lock_bh(&sched->kss_lock);
1599
1600 /* a closing conn is always ready to tx */
1601 conn->ksnc_tx_ready = 1;
1602
1603 if (!conn->ksnc_tx_scheduled &&
1604 !list_empty(&conn->ksnc_tx_queue)) {
1605 list_add_tail(&conn->ksnc_tx_list,
1606 &sched->kss_tx_conns);
1607 conn->ksnc_tx_scheduled = 1;
1608 /* extra ref for scheduler */
1609 ksocknal_conn_addref(conn);
1610
1611 wake_up(&sched->kss_waitq);
1612 }
1613
1614 spin_unlock_bh(&sched->kss_lock);
1615
1616 /* serialise with callbacks */
1617 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1618
1619 ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
1620
1621 /*
1622 * OK, so this conn may not be completely disengaged from its
1623 * scheduler yet, but it _has_ committed to terminate...
1624 */
1625 conn->ksnc_scheduler->kss_nconns--;
1626
1627 if (peer->ksnp_error) {
1628 /* peer's last conn closed in error */
1629 LASSERT(list_empty(&peer->ksnp_conns));
1630 failed = 1;
1631 peer->ksnp_error = 0; /* avoid multiple notifications */
1632 }
1633
1634 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1635
1636 if (failed)
1637 ksocknal_peer_failed(peer);
1638
1639 /*
1640 * The socket is closed on the final put; either here, or in
1641 * ksocknal_{send,recv}msg(). Since we set up the linger2 option
1642 * when the connection was established, this will close the socket
1643 * immediately, aborting anything buffered in it. Any hung
1644 * zero-copy transmits will therefore complete in finite time.
1645 */
1646 ksocknal_connsock_decref(conn);
1647 }
1648
1649 void
1650 ksocknal_queue_zombie_conn(struct ksock_conn *conn)
1651 {
1652 /* Queue the conn for the reaper to destroy */
1653
1654 LASSERT(!atomic_read(&conn->ksnc_conn_refcount));
1655 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1656
1657 list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
1658 wake_up(&ksocknal_data.ksnd_reaper_waitq);
1659
1660 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1661 }
1662
1663 void
1664 ksocknal_destroy_conn(struct ksock_conn *conn)
1665 {
1666 unsigned long last_rcv;
1667
1668 /* Final coup-de-grace of the reaper */
1669 CDEBUG(D_NET, "connection %p\n", conn);
1670
1671 LASSERT(!atomic_read(&conn->ksnc_conn_refcount));
1672 LASSERT(!atomic_read(&conn->ksnc_sock_refcount));
1673 LASSERT(!conn->ksnc_sock);
1674 LASSERT(!conn->ksnc_route);
1675 LASSERT(!conn->ksnc_tx_scheduled);
1676 LASSERT(!conn->ksnc_rx_scheduled);
1677 LASSERT(list_empty(&conn->ksnc_tx_queue));
1678
1679 /* complete current receive if any */
1680 switch (conn->ksnc_rx_state) {
1681 case SOCKNAL_RX_LNET_PAYLOAD:
1682 last_rcv = conn->ksnc_rx_deadline -
1683 cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
1684 CERROR("Completing partial receive from %s[%d], ip %pI4h:%d, with error, wanted: %d, left: %d, last alive is %ld secs ago\n",
1685 libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
1686 &conn->ksnc_ipaddr, conn->ksnc_port,
1687 conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
1688 cfs_duration_sec(cfs_time_sub(cfs_time_current(),
1689 last_rcv)));
1690 lnet_finalize(conn->ksnc_peer->ksnp_ni,
1691 conn->ksnc_cookie, -EIO);
1692 break;
1693 case SOCKNAL_RX_LNET_HEADER:
1694 if (conn->ksnc_rx_started)
1695 CERROR("Incomplete receive of lnet header from %s, ip %pI4h:%d, with error, protocol: %d.x.\n",
1696 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1697 &conn->ksnc_ipaddr, conn->ksnc_port,
1698 conn->ksnc_proto->pro_version);
1699 break;
1700 case SOCKNAL_RX_KSM_HEADER:
1701 if (conn->ksnc_rx_started)
1702 CERROR("Incomplete receive of ksock message from %s, ip %pI4h:%d, with error, protocol: %d.x.\n",
1703 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1704 &conn->ksnc_ipaddr, conn->ksnc_port,
1705 conn->ksnc_proto->pro_version);
1706 break;
1707 case SOCKNAL_RX_SLOP:
1708 if (conn->ksnc_rx_started)
1709 CERROR("Incomplete receive of slops from %s, ip %pI4h:%d, with error\n",
1710 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1711 &conn->ksnc_ipaddr, conn->ksnc_port);
1712 break;
1713 default:
1714 LBUG();
1715 break;
1716 }
1717
1718 ksocknal_peer_decref(conn->ksnc_peer);
1719
1720 LIBCFS_FREE(conn, sizeof(*conn));
1721 }
1722
1723 int
1724 ksocknal_close_peer_conns_locked(struct ksock_peer *peer, __u32 ipaddr, int why)
1725 {
1726 struct ksock_conn *conn;
1727 struct list_head *ctmp;
1728 struct list_head *cnxt;
1729 int count = 0;
1730
1731 list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
1732 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
1733
1734 if (!ipaddr || conn->ksnc_ipaddr == ipaddr) {
1735 count++;
1736 ksocknal_close_conn_locked(conn, why);
1737 }
1738 }
1739
1740 return count;
1741 }
1742
1743 int
1744 ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why)
1745 {
1746 struct ksock_peer *peer = conn->ksnc_peer;
1747 __u32 ipaddr = conn->ksnc_ipaddr;
1748 int count;
1749
1750 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1751
1752 count = ksocknal_close_peer_conns_locked(peer, ipaddr, why);
1753
1754 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1755
1756 return count;
1757 }
1758
1759 int
1760 ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
1761 {
1762 struct ksock_peer *peer;
1763 struct list_head *ptmp;
1764 struct list_head *pnxt;
1765 int lo;
1766 int hi;
1767 int i;
1768 int count = 0;
1769
1770 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1771
1772 if (id.nid != LNET_NID_ANY) {
1773 lo = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
1774 hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
1775 } else {
1776 lo = 0;
1777 hi = ksocknal_data.ksnd_peer_hash_size - 1;
1778 }
1779
1780 for (i = lo; i <= hi; i++) {
1781 list_for_each_safe(ptmp, pnxt,
1782 &ksocknal_data.ksnd_peers[i]) {
1783 peer = list_entry(ptmp, struct ksock_peer, ksnp_list);
1784
1785 if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) &&
1786 (id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
1787 continue;
1788
1789 count += ksocknal_close_peer_conns_locked(peer, ipaddr, 0);
1790 }
1791 }
1792
1793 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1794
1795 /* wildcards always succeed */
1796 if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || !ipaddr)
1797 return 0;
1798
1799 if (!count)
1800 return -ENOENT;
1801 else
1802 return 0;
1803 }
1804
1805 void
1806 ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive)
1807 {
1808 /*
1809 * The router is telling me she's been notified of a change in
1810 * gateway state....
1811 */
1812 lnet_process_id_t id = {0};
1813
1814 id.nid = gw_nid;
1815 id.pid = LNET_PID_ANY;
1816
1817 CDEBUG(D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
1818 alive ? "up" : "down");
1819
1820 if (!alive) {
1821 /* If the gateway crashed, close all open connections... */
1822 ksocknal_close_matching_conns(id, 0);
1823 return;
1824 }
1825
1826 /*
1827 * ...otherwise do nothing. We can only establish new connections
1828 * if we have autroutes, and these connect on demand.
1829 */
1830 }
1831
1832 void
1833 ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
1834 {
1835 int connect = 1;
1836 unsigned long last_alive = 0;
1837 unsigned long now = cfs_time_current();
1838 struct ksock_peer *peer = NULL;
1839 rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
1840 lnet_process_id_t id = {
1841 .nid = nid,
1842 .pid = LNET_PID_LUSTRE,
1843 };
1844
1845 read_lock(glock);
1846
1847 peer = ksocknal_find_peer_locked(ni, id);
1848 if (peer) {
1849 struct list_head *tmp;
1850 struct ksock_conn *conn;
1851 int bufnob;
1852
1853 list_for_each(tmp, &peer->ksnp_conns) {
1854 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
1855 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
1856
1857 if (bufnob < conn->ksnc_tx_bufnob) {
1858 /* something got ACKed */
1859 conn->ksnc_tx_deadline =
1860 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1861 peer->ksnp_last_alive = now;
1862 conn->ksnc_tx_bufnob = bufnob;
1863 }
1864 }
1865
1866 last_alive = peer->ksnp_last_alive;
1867 if (!ksocknal_find_connectable_route_locked(peer))
1868 connect = 0;
1869 }
1870
1871 read_unlock(glock);
1872
1873 if (last_alive)
1874 *when = last_alive;
1875
1876 CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago, connect %d\n",
1877 libcfs_nid2str(nid), peer,
1878 last_alive ? cfs_duration_sec(now - last_alive) : -1,
1879 connect);
1880
1881 if (!connect)
1882 return;
1883
1884 ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
1885
1886 write_lock_bh(glock);
1887
1888 peer = ksocknal_find_peer_locked(ni, id);
1889 if (peer)
1890 ksocknal_launch_all_connections_locked(peer);
1891
1892 write_unlock_bh(glock);
1893 }
1894
1895 static void
1896 ksocknal_push_peer(struct ksock_peer *peer)
1897 {
1898 int index;
1899 int i;
1900 struct list_head *tmp;
1901 struct ksock_conn *conn;
1902
1903 for (index = 0; ; index++) {
1904 read_lock(&ksocknal_data.ksnd_global_lock);
1905
1906 i = 0;
1907 conn = NULL;
1908
1909 list_for_each(tmp, &peer->ksnp_conns) {
1910 if (i++ == index) {
1911 conn = list_entry(tmp, struct ksock_conn,
1912 ksnc_list);
1913 ksocknal_conn_addref(conn);
1914 break;
1915 }
1916 }
1917
1918 read_unlock(&ksocknal_data.ksnd_global_lock);
1919
1920 if (!conn)
1921 break;
1922
1923 ksocknal_lib_push_conn(conn);
1924 ksocknal_conn_decref(conn);
1925 }
1926 }
1927
1928 static int ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id)
1929 {
1930 struct list_head *start;
1931 struct list_head *end;
1932 struct list_head *tmp;
1933 int rc = -ENOENT;
1934 unsigned int hsize = ksocknal_data.ksnd_peer_hash_size;
1935
1936 if (id.nid == LNET_NID_ANY) {
1937 start = &ksocknal_data.ksnd_peers[0];
1938 end = &ksocknal_data.ksnd_peers[hsize - 1];
1939 } else {
1940 start = ksocknal_nid2peerlist(id.nid);
1941 end = ksocknal_nid2peerlist(id.nid);
1942 }
1943
1944 for (tmp = start; tmp <= end; tmp++) {
1945 int peer_off; /* searching offset in peer hash table */
1946
1947 for (peer_off = 0; ; peer_off++) {
1948 struct ksock_peer *peer;
1949 int i = 0;
1950
1951 read_lock(&ksocknal_data.ksnd_global_lock);
1952 list_for_each_entry(peer, tmp, ksnp_list) {
1953 if (!((id.nid == LNET_NID_ANY ||
1954 id.nid == peer->ksnp_id.nid) &&
1955 (id.pid == LNET_PID_ANY ||
1956 id.pid == peer->ksnp_id.pid)))
1957 continue;
1958
1959 if (i++ == peer_off) {
1960 ksocknal_peer_addref(peer);
1961 break;
1962 }
1963 }
1964 read_unlock(&ksocknal_data.ksnd_global_lock);
1965
1966 if (!i) /* no match */
1967 break;
1968
1969 rc = 0;
1970 ksocknal_push_peer(peer);
1971 ksocknal_peer_decref(peer);
1972 }
1973 }
1974 return rc;
1975 }
1976
1977 static int
1978 ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
1979 {
1980 struct ksock_net *net = ni->ni_data;
1981 struct ksock_interface *iface;
1982 int rc;
1983 int i;
1984 int j;
1985 struct list_head *ptmp;
1986 struct ksock_peer *peer;
1987 struct list_head *rtmp;
1988 struct ksock_route *route;
1989
1990 if (!ipaddress || !netmask)
1991 return -EINVAL;
1992
1993 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1994
1995 iface = ksocknal_ip2iface(ni, ipaddress);
1996 if (iface) {
1997 /* silently ignore dups */
1998 rc = 0;
1999 } else if (net->ksnn_ninterfaces == LNET_MAX_INTERFACES) {
2000 rc = -ENOSPC;
2001 } else {
2002 iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
2003
2004 iface->ksni_ipaddr = ipaddress;
2005 iface->ksni_netmask = netmask;
2006 iface->ksni_nroutes = 0;
2007 iface->ksni_npeers = 0;
2008
2009 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2010 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
2011 peer = list_entry(ptmp, struct ksock_peer,
2012 ksnp_list);
2013
2014 for (j = 0; j < peer->ksnp_n_passive_ips; j++)
2015 if (peer->ksnp_passive_ips[j] == ipaddress)
2016 iface->ksni_npeers++;
2017
2018 list_for_each(rtmp, &peer->ksnp_routes) {
2019 route = list_entry(rtmp, struct ksock_route,
2020 ksnr_list);
2021
2022 if (route->ksnr_myipaddr == ipaddress)
2023 iface->ksni_nroutes++;
2024 }
2025 }
2026 }
2027
2028 rc = 0;
2029 /* NB only new connections will pay attention to the new interface! */
2030 }
2031
2032 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2033
2034 return rc;
2035 }
2036
2037 static void
2038 ksocknal_peer_del_interface_locked(struct ksock_peer *peer, __u32 ipaddr)
2039 {
2040 struct list_head *tmp;
2041 struct list_head *nxt;
2042 struct ksock_route *route;
2043 struct ksock_conn *conn;
2044 int i;
2045 int j;
2046
2047 for (i = 0; i < peer->ksnp_n_passive_ips; i++)
2048 if (peer->ksnp_passive_ips[i] == ipaddr) {
2049 for (j = i + 1; j < peer->ksnp_n_passive_ips; j++)
2050 peer->ksnp_passive_ips[j - 1] =
2051 peer->ksnp_passive_ips[j];
2052 peer->ksnp_n_passive_ips--;
2053 break;
2054 }
2055
2056 list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
2057 route = list_entry(tmp, struct ksock_route, ksnr_list);
2058
2059 if (route->ksnr_myipaddr != ipaddr)
2060 continue;
2061
2062 if (route->ksnr_share_count) {
2063 /* Manually created; keep, but unbind */
2064 route->ksnr_myipaddr = 0;
2065 } else {
2066 ksocknal_del_route_locked(route);
2067 }
2068 }
2069
2070 list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
2071 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
2072
2073 if (conn->ksnc_myipaddr == ipaddr)
2074 ksocknal_close_conn_locked(conn, 0);
2075 }
2076 }
2077
2078 static int
2079 ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
2080 {
2081 struct ksock_net *net = ni->ni_data;
2082 int rc = -ENOENT;
2083 struct list_head *tmp;
2084 struct list_head *nxt;
2085 struct ksock_peer *peer;
2086 __u32 this_ip;
2087 int i;
2088 int j;
2089
2090 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2091
2092 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2093 this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
2094
2095 if (!(!ipaddress || ipaddress == this_ip))
2096 continue;
2097
2098 rc = 0;
2099
2100 for (j = i + 1; j < net->ksnn_ninterfaces; j++)
2101 net->ksnn_interfaces[j - 1] =
2102 net->ksnn_interfaces[j];
2103
2104 net->ksnn_ninterfaces--;
2105
2106 for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
2107 list_for_each_safe(tmp, nxt,
2108 &ksocknal_data.ksnd_peers[j]) {
2109 peer = list_entry(tmp, struct ksock_peer, ksnp_list);
2110
2111 if (peer->ksnp_ni != ni)
2112 continue;
2113
2114 ksocknal_peer_del_interface_locked(peer, this_ip);
2115 }
2116 }
2117 }
2118
2119 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2120
2121 return rc;
2122 }
2123
2124 int
2125 ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
2126 {
2127 lnet_process_id_t id = {0};
2128 struct libcfs_ioctl_data *data = arg;
2129 int rc;
2130
2131 switch (cmd) {
2132 case IOC_LIBCFS_GET_INTERFACE: {
2133 struct ksock_net *net = ni->ni_data;
2134 struct ksock_interface *iface;
2135
2136 read_lock(&ksocknal_data.ksnd_global_lock);
2137
2138 if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
2139 rc = -ENOENT;
2140 } else {
2141 rc = 0;
2142 iface = &net->ksnn_interfaces[data->ioc_count];
2143
2144 data->ioc_u32[0] = iface->ksni_ipaddr;
2145 data->ioc_u32[1] = iface->ksni_netmask;
2146 data->ioc_u32[2] = iface->ksni_npeers;
2147 data->ioc_u32[3] = iface->ksni_nroutes;
2148 }
2149
2150 read_unlock(&ksocknal_data.ksnd_global_lock);
2151 return rc;
2152 }
2153
2154 case IOC_LIBCFS_ADD_INTERFACE:
2155 return ksocknal_add_interface(ni,
2156 data->ioc_u32[0], /* IP address */
2157 data->ioc_u32[1]); /* net mask */
2158
2159 case IOC_LIBCFS_DEL_INTERFACE:
2160 return ksocknal_del_interface(ni,
2161 data->ioc_u32[0]); /* IP address */
2162
2163 case IOC_LIBCFS_GET_PEER: {
2164 __u32 myip = 0;
2165 __u32 ip = 0;
2166 int port = 0;
2167 int conn_count = 0;
2168 int share_count = 0;
2169
2170 rc = ksocknal_get_peer_info(ni, data->ioc_count,
2171 &id, &myip, &ip, &port,
2172 &conn_count, &share_count);
2173 if (rc)
2174 return rc;
2175
2176 data->ioc_nid = id.nid;
2177 data->ioc_count = share_count;
2178 data->ioc_u32[0] = ip;
2179 data->ioc_u32[1] = port;
2180 data->ioc_u32[2] = myip;
2181 data->ioc_u32[3] = conn_count;
2182 data->ioc_u32[4] = id.pid;
2183 return 0;
2184 }
2185
2186 case IOC_LIBCFS_ADD_PEER:
2187 id.nid = data->ioc_nid;
2188 id.pid = LNET_PID_LUSTRE;
2189 return ksocknal_add_peer(ni, id,
2190 data->ioc_u32[0], /* IP */
2191 data->ioc_u32[1]); /* port */
2192
2193 case IOC_LIBCFS_DEL_PEER:
2194 id.nid = data->ioc_nid;
2195 id.pid = LNET_PID_ANY;
2196 return ksocknal_del_peer(ni, id,
2197 data->ioc_u32[0]); /* IP */
2198
2199 case IOC_LIBCFS_GET_CONN: {
2200 int txmem;
2201 int rxmem;
2202 int nagle;
2203 struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
2204
2205 if (!conn)
2206 return -ENOENT;
2207
2208 ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
2209
2210 data->ioc_count = txmem;
2211 data->ioc_nid = conn->ksnc_peer->ksnp_id.nid;
2212 data->ioc_flags = nagle;
2213 data->ioc_u32[0] = conn->ksnc_ipaddr;
2214 data->ioc_u32[1] = conn->ksnc_port;
2215 data->ioc_u32[2] = conn->ksnc_myipaddr;
2216 data->ioc_u32[3] = conn->ksnc_type;
2217 data->ioc_u32[4] = conn->ksnc_scheduler->kss_info->ksi_cpt;
2218 data->ioc_u32[5] = rxmem;
2219 data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
2220 ksocknal_conn_decref(conn);
2221 return 0;
2222 }
2223
2224 case IOC_LIBCFS_CLOSE_CONNECTION:
2225 id.nid = data->ioc_nid;
2226 id.pid = LNET_PID_ANY;
2227 return ksocknal_close_matching_conns(id,
2228 data->ioc_u32[0]);
2229
2230 case IOC_LIBCFS_REGISTER_MYNID:
2231 /* Ignore if this is a noop */
2232 if (data->ioc_nid == ni->ni_nid)
2233 return 0;
2234
2235 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
2236 libcfs_nid2str(data->ioc_nid),
2237 libcfs_nid2str(ni->ni_nid));
2238 return -EINVAL;
2239
2240 case IOC_LIBCFS_PUSH_CONNECTION:
2241 id.nid = data->ioc_nid;
2242 id.pid = LNET_PID_ANY;
2243 return ksocknal_push(ni, id);
2244
2245 default:
2246 return -EINVAL;
2247 }
2248 /* not reached */
2249 }
2250
2251 static void
2252 ksocknal_free_buffers(void)
2253 {
2254 LASSERT(!atomic_read(&ksocknal_data.ksnd_nactive_txs));
2255
2256 if (ksocknal_data.ksnd_sched_info) {
2257 struct ksock_sched_info *info;
2258 int i;
2259
2260 cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2261 if (info->ksi_scheds) {
2262 LIBCFS_FREE(info->ksi_scheds,
2263 info->ksi_nthreads_max *
2264 sizeof(info->ksi_scheds[0]));
2265 }
2266 }
2267 cfs_percpt_free(ksocknal_data.ksnd_sched_info);
2268 }
2269
2270 LIBCFS_FREE(ksocknal_data.ksnd_peers,
2271 sizeof(struct list_head) *
2272 ksocknal_data.ksnd_peer_hash_size);
2273
2274 spin_lock(&ksocknal_data.ksnd_tx_lock);
2275
2276 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
2277 struct list_head zlist;
2278 struct ksock_tx *tx;
2279 struct ksock_tx *temp;
2280
2281 list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
2282 list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
2283 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2284
2285 list_for_each_entry_safe(tx, temp, &zlist, tx_list) {
2286 list_del(&tx->tx_list);
2287 LIBCFS_FREE(tx, tx->tx_desc_size);
2288 }
2289 } else {
2290 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2291 }
2292 }
2293
2294 static void
2295 ksocknal_base_shutdown(void)
2296 {
2297 struct ksock_sched_info *info;
2298 struct ksock_sched *sched;
2299 int i;
2300 int j;
2301
2302 LASSERT(!ksocknal_data.ksnd_nnets);
2303
2304 switch (ksocknal_data.ksnd_init) {
2305 default:
2306 LASSERT(0);
2307
2308 case SOCKNAL_INIT_ALL:
2309 case SOCKNAL_INIT_DATA:
2310 LASSERT(ksocknal_data.ksnd_peers);
2311 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
2312 LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
2313
2314 LASSERT(list_empty(&ksocknal_data.ksnd_nets));
2315 LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
2316 LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
2317 LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
2318 LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
2319
2320 if (ksocknal_data.ksnd_sched_info) {
2321 cfs_percpt_for_each(info, i,
2322 ksocknal_data.ksnd_sched_info) {
2323 if (!info->ksi_scheds)
2324 continue;
2325
2326 for (j = 0; j < info->ksi_nthreads_max; j++) {
2327 sched = &info->ksi_scheds[j];
2328 LASSERT(list_empty(
2329 &sched->kss_tx_conns));
2330 LASSERT(list_empty(
2331 &sched->kss_rx_conns));
2332 LASSERT(list_empty(
2333 &sched->kss_zombie_noop_txs));
2334 LASSERT(!sched->kss_nconns);
2335 }
2336 }
2337 }
2338
2339 /* flag threads to terminate; wake and wait for them to die */
2340 ksocknal_data.ksnd_shuttingdown = 1;
2341 wake_up_all(&ksocknal_data.ksnd_connd_waitq);
2342 wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
2343
2344 if (ksocknal_data.ksnd_sched_info) {
2345 cfs_percpt_for_each(info, i,
2346 ksocknal_data.ksnd_sched_info) {
2347 if (!info->ksi_scheds)
2348 continue;
2349
2350 for (j = 0; j < info->ksi_nthreads_max; j++) {
2351 sched = &info->ksi_scheds[j];
2352 wake_up_all(&sched->kss_waitq);
2353 }
2354 }
2355 }
2356
2357 i = 4;
2358 read_lock(&ksocknal_data.ksnd_global_lock);
2359 while (ksocknal_data.ksnd_nthreads) {
2360 i++;
2361 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2362 "waiting for %d threads to terminate\n",
2363 ksocknal_data.ksnd_nthreads);
2364 read_unlock(&ksocknal_data.ksnd_global_lock);
2365 set_current_state(TASK_UNINTERRUPTIBLE);
2366 schedule_timeout(cfs_time_seconds(1));
2367 read_lock(&ksocknal_data.ksnd_global_lock);
2368 }
2369 read_unlock(&ksocknal_data.ksnd_global_lock);
2370
2371 ksocknal_free_buffers();
2372
2373 ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
2374 break;
2375 }
2376
2377 module_put(THIS_MODULE);
2378 }
2379
2380 static __u64
2381 ksocknal_new_incarnation(void)
2382 {
2383 /* The incarnation number is the time this module loaded and it
2384 * identifies this particular instance of the socknal.
2385 */
2386 return ktime_get_ns();
2387 }
2388
2389 static int
2390 ksocknal_base_startup(void)
2391 {
2392 struct ksock_sched_info *info;
2393 int rc;
2394 int i;
2395
2396 LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
2397 LASSERT(!ksocknal_data.ksnd_nnets);
2398
2399 memset(&ksocknal_data, 0, sizeof(ksocknal_data)); /* zero pointers */
2400
2401 ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
2402 LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
2403 sizeof(struct list_head) *
2404 ksocknal_data.ksnd_peer_hash_size);
2405 if (!ksocknal_data.ksnd_peers)
2406 return -ENOMEM;
2407
2408 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
2409 INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
2410
2411 rwlock_init(&ksocknal_data.ksnd_global_lock);
2412 INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
2413
2414 spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
2415 INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
2416 INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
2417 INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
2418 init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
2419
2420 spin_lock_init(&ksocknal_data.ksnd_connd_lock);
2421 INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
2422 INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
2423 init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
2424
2425 spin_lock_init(&ksocknal_data.ksnd_tx_lock);
2426 INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
2427
2428 /* NB memset above zeros whole of ksocknal_data */
2429
2430 /* flag lists/ptrs/locks initialised */
2431 ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
2432 try_module_get(THIS_MODULE);
2433
2434 ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
2435 sizeof(*info));
2436 if (!ksocknal_data.ksnd_sched_info)
2437 goto failed;
2438
2439 cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2440 struct ksock_sched *sched;
2441 int nthrs;
2442
2443 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2444 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2445 nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
2446 } else {
2447 /*
2448 * max to half of CPUs, assume another half should be
2449 * reserved for upper layer modules
2450 */
2451 nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2452 }
2453
2454 info->ksi_nthreads_max = nthrs;
2455 info->ksi_cpt = i;
2456
2457 LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
2458 info->ksi_nthreads_max * sizeof(*sched));
2459 if (!info->ksi_scheds)
2460 goto failed;
2461
2462 for (; nthrs > 0; nthrs--) {
2463 sched = &info->ksi_scheds[nthrs - 1];
2464
2465 sched->kss_info = info;
2466 spin_lock_init(&sched->kss_lock);
2467 INIT_LIST_HEAD(&sched->kss_rx_conns);
2468 INIT_LIST_HEAD(&sched->kss_tx_conns);
2469 INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
2470 init_waitqueue_head(&sched->kss_waitq);
2471 }
2472 }
2473
2474 ksocknal_data.ksnd_connd_starting = 0;
2475 ksocknal_data.ksnd_connd_failed_stamp = 0;
2476 ksocknal_data.ksnd_connd_starting_stamp = ktime_get_real_seconds();
2477 /*
2478 * must have at least 2 connds to remain responsive to accepts while
2479 * connecting
2480 */
2481 if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
2482 *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
2483
2484 if (*ksocknal_tunables.ksnd_nconnds_max <
2485 *ksocknal_tunables.ksnd_nconnds) {
2486 ksocknal_tunables.ksnd_nconnds_max =
2487 ksocknal_tunables.ksnd_nconnds;
2488 }
2489
2490 for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
2491 char name[16];
2492
2493 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2494 ksocknal_data.ksnd_connd_starting++;
2495 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2496
2497 snprintf(name, sizeof(name), "socknal_cd%02d", i);
2498 rc = ksocknal_thread_start(ksocknal_connd,
2499 (void *)((ulong_ptr_t)i), name);
2500 if (rc) {
2501 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2502 ksocknal_data.ksnd_connd_starting--;
2503 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2504 CERROR("Can't spawn socknal connd: %d\n", rc);
2505 goto failed;
2506 }
2507 }
2508
2509 rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
2510 if (rc) {
2511 CERROR("Can't spawn socknal reaper: %d\n", rc);
2512 goto failed;
2513 }
2514
2515 /* flag everything initialised */
2516 ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
2517
2518 return 0;
2519
2520 failed:
2521 ksocknal_base_shutdown();
2522 return -ENETDOWN;
2523 }
2524
2525 static void
2526 ksocknal_debug_peerhash(lnet_ni_t *ni)
2527 {
2528 struct ksock_peer *peer = NULL;
2529 struct list_head *tmp;
2530 int i;
2531
2532 read_lock(&ksocknal_data.ksnd_global_lock);
2533
2534 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2535 list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
2536 peer = list_entry(tmp, struct ksock_peer, ksnp_list);
2537
2538 if (peer->ksnp_ni == ni)
2539 break;
2540
2541 peer = NULL;
2542 }
2543 }
2544
2545 if (peer) {
2546 struct ksock_route *route;
2547 struct ksock_conn *conn;
2548
2549 CWARN("Active peer on shutdown: %s, ref %d, scnt %d, closing %d, accepting %d, err %d, zcookie %llu, txq %d, zc_req %d\n",
2550 libcfs_id2str(peer->ksnp_id),
2551 atomic_read(&peer->ksnp_refcount),
2552 peer->ksnp_sharecount, peer->ksnp_closing,
2553 peer->ksnp_accepting, peer->ksnp_error,
2554 peer->ksnp_zc_next_cookie,
2555 !list_empty(&peer->ksnp_tx_queue),
2556 !list_empty(&peer->ksnp_zc_req_list));
2557
2558 list_for_each(tmp, &peer->ksnp_routes) {
2559 route = list_entry(tmp, struct ksock_route, ksnr_list);
2560 CWARN("Route: ref %d, schd %d, conn %d, cnted %d, del %d\n",
2561 atomic_read(&route->ksnr_refcount),
2562 route->ksnr_scheduled, route->ksnr_connecting,
2563 route->ksnr_connected, route->ksnr_deleted);
2564 }
2565
2566 list_for_each(tmp, &peer->ksnp_conns) {
2567 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
2568 CWARN("Conn: ref %d, sref %d, t %d, c %d\n",
2569 atomic_read(&conn->ksnc_conn_refcount),
2570 atomic_read(&conn->ksnc_sock_refcount),
2571 conn->ksnc_type, conn->ksnc_closing);
2572 }
2573 }
2574
2575 read_unlock(&ksocknal_data.ksnd_global_lock);
2576 }
2577
2578 void
2579 ksocknal_shutdown(lnet_ni_t *ni)
2580 {
2581 struct ksock_net *net = ni->ni_data;
2582 int i;
2583 lnet_process_id_t anyid = {0};
2584
2585 anyid.nid = LNET_NID_ANY;
2586 anyid.pid = LNET_PID_ANY;
2587
2588 LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
2589 LASSERT(ksocknal_data.ksnd_nnets > 0);
2590
2591 spin_lock_bh(&net->ksnn_lock);
2592 net->ksnn_shutdown = 1; /* prevent new peers */
2593 spin_unlock_bh(&net->ksnn_lock);
2594
2595 /* Delete all peers */
2596 ksocknal_del_peer(ni, anyid, 0);
2597
2598 /* Wait for all peer state to clean up */
2599 i = 2;
2600 spin_lock_bh(&net->ksnn_lock);
2601 while (net->ksnn_npeers) {
2602 spin_unlock_bh(&net->ksnn_lock);
2603
2604 i++;
2605 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2606 "waiting for %d peers to disconnect\n",
2607 net->ksnn_npeers);
2608 set_current_state(TASK_UNINTERRUPTIBLE);
2609 schedule_timeout(cfs_time_seconds(1));
2610
2611 ksocknal_debug_peerhash(ni);
2612
2613 spin_lock_bh(&net->ksnn_lock);
2614 }
2615 spin_unlock_bh(&net->ksnn_lock);
2616
2617 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2618 LASSERT(!net->ksnn_interfaces[i].ksni_npeers);
2619 LASSERT(!net->ksnn_interfaces[i].ksni_nroutes);
2620 }
2621
2622 list_del(&net->ksnn_list);
2623 LIBCFS_FREE(net, sizeof(*net));
2624
2625 ksocknal_data.ksnd_nnets--;
2626 if (!ksocknal_data.ksnd_nnets)
2627 ksocknal_base_shutdown();
2628 }
2629
2630 static int
2631 ksocknal_enumerate_interfaces(struct ksock_net *net)
2632 {
2633 char **names;
2634 int i;
2635 int j;
2636 int rc;
2637 int n;
2638
2639 n = lnet_ipif_enumerate(&names);
2640 if (n <= 0) {
2641 CERROR("Can't enumerate interfaces: %d\n", n);
2642 return n;
2643 }
2644
2645 for (i = j = 0; i < n; i++) {
2646 int up;
2647 __u32 ip;
2648 __u32 mask;
2649
2650 if (!strcmp(names[i], "lo")) /* skip the loopback IF */
2651 continue;
2652
2653 rc = lnet_ipif_query(names[i], &up, &ip, &mask);
2654 if (rc) {
2655 CWARN("Can't get interface %s info: %d\n",
2656 names[i], rc);
2657 continue;
2658 }
2659
2660 if (!up) {
2661 CWARN("Ignoring interface %s (down)\n",
2662 names[i]);
2663 continue;
2664 }
2665
2666 if (j == LNET_MAX_INTERFACES) {
2667 CWARN("Ignoring interface %s (too many interfaces)\n",
2668 names[i]);
2669 continue;
2670 }
2671
2672 net->ksnn_interfaces[j].ksni_ipaddr = ip;
2673 net->ksnn_interfaces[j].ksni_netmask = mask;
2674 strlcpy(net->ksnn_interfaces[j].ksni_name,
2675 names[i], sizeof(net->ksnn_interfaces[j].ksni_name));
2676 j++;
2677 }
2678
2679 lnet_ipif_free_enumeration(names, n);
2680
2681 if (!j)
2682 CERROR("Can't find any usable interfaces\n");
2683
2684 return j;
2685 }
2686
2687 static int
2688 ksocknal_search_new_ipif(struct ksock_net *net)
2689 {
2690 int new_ipif = 0;
2691 int i;
2692
2693 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2694 char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
2695 char *colon = strchr(ifnam, ':');
2696 int found = 0;
2697 struct ksock_net *tmp;
2698 int j;
2699
2700 if (colon) /* ignore alias device */
2701 *colon = 0;
2702
2703 list_for_each_entry(tmp, &ksocknal_data.ksnd_nets, ksnn_list) {
2704 for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
2705 char *ifnam2 =
2706 &tmp->ksnn_interfaces[j].ksni_name[0];
2707 char *colon2 = strchr(ifnam2, ':');
2708
2709 if (colon2)
2710 *colon2 = 0;
2711
2712 found = !strcmp(ifnam, ifnam2);
2713 if (colon2)
2714 *colon2 = ':';
2715 }
2716 if (found)
2717 break;
2718 }
2719
2720 new_ipif += !found;
2721 if (colon)
2722 *colon = ':';
2723 }
2724
2725 return new_ipif;
2726 }
2727
2728 static int
2729 ksocknal_start_schedulers(struct ksock_sched_info *info)
2730 {
2731 int nthrs;
2732 int rc = 0;
2733 int i;
2734
2735 if (!info->ksi_nthreads) {
2736 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2737 nthrs = info->ksi_nthreads_max;
2738 } else {
2739 nthrs = cfs_cpt_weight(lnet_cpt_table(),
2740 info->ksi_cpt);
2741 nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2742 nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
2743 }
2744 nthrs = min(nthrs, info->ksi_nthreads_max);
2745 } else {
2746 LASSERT(info->ksi_nthreads <= info->ksi_nthreads_max);
2747 /* increase two threads if there is new interface */
2748 nthrs = min(2, info->ksi_nthreads_max - info->ksi_nthreads);
2749 }
2750
2751 for (i = 0; i < nthrs; i++) {
2752 long id;
2753 char name[20];
2754 struct ksock_sched *sched;
2755
2756 id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
2757 sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
2758 snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
2759 info->ksi_cpt, (int)(sched - &info->ksi_scheds[0]));
2760
2761 rc = ksocknal_thread_start(ksocknal_scheduler,
2762 (void *)id, name);
2763 if (!rc)
2764 continue;
2765
2766 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2767 info->ksi_cpt, info->ksi_nthreads + i, rc);
2768 break;
2769 }
2770
2771 info->ksi_nthreads += i;
2772 return rc;
2773 }
2774
2775 static int
2776 ksocknal_net_start_threads(struct ksock_net *net, __u32 *cpts, int ncpts)
2777 {
2778 int newif = ksocknal_search_new_ipif(net);
2779 int rc;
2780 int i;
2781
2782 LASSERT(ncpts > 0 && ncpts <= cfs_cpt_number(lnet_cpt_table()));
2783
2784 for (i = 0; i < ncpts; i++) {
2785 struct ksock_sched_info *info;
2786 int cpt = !cpts ? i : cpts[i];
2787
2788 LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
2789 info = ksocknal_data.ksnd_sched_info[cpt];
2790
2791 if (!newif && info->ksi_nthreads > 0)
2792 continue;
2793
2794 rc = ksocknal_start_schedulers(info);
2795 if (rc)
2796 return rc;
2797 }
2798 return 0;
2799 }
2800
2801 int
2802 ksocknal_startup(lnet_ni_t *ni)
2803 {
2804 struct ksock_net *net;
2805 int rc;
2806 int i;
2807
2808 LASSERT(ni->ni_lnd == &the_ksocklnd);
2809
2810 if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
2811 rc = ksocknal_base_startup();
2812 if (rc)
2813 return rc;
2814 }
2815
2816 LIBCFS_ALLOC(net, sizeof(*net));
2817 if (!net)
2818 goto fail_0;
2819
2820 spin_lock_init(&net->ksnn_lock);
2821 net->ksnn_incarnation = ksocknal_new_incarnation();
2822 ni->ni_data = net;
2823 ni->ni_peertimeout = *ksocknal_tunables.ksnd_peertimeout;
2824 ni->ni_maxtxcredits = *ksocknal_tunables.ksnd_credits;
2825 ni->ni_peertxcredits = *ksocknal_tunables.ksnd_peertxcredits;
2826 ni->ni_peerrtrcredits = *ksocknal_tunables.ksnd_peerrtrcredits;
2827
2828 if (!ni->ni_interfaces[0]) {
2829 rc = ksocknal_enumerate_interfaces(net);
2830 if (rc <= 0)
2831 goto fail_1;
2832
2833 net->ksnn_ninterfaces = 1;
2834 } else {
2835 for (i = 0; i < LNET_MAX_INTERFACES; i++) {
2836 int up;
2837
2838 if (!ni->ni_interfaces[i])
2839 break;
2840
2841 rc = lnet_ipif_query(ni->ni_interfaces[i], &up,
2842 &net->ksnn_interfaces[i].ksni_ipaddr,
2843 &net->ksnn_interfaces[i].ksni_netmask);
2844
2845 if (rc) {
2846 CERROR("Can't get interface %s info: %d\n",
2847 ni->ni_interfaces[i], rc);
2848 goto fail_1;
2849 }
2850
2851 if (!up) {
2852 CERROR("Interface %s is down\n",
2853 ni->ni_interfaces[i]);
2854 goto fail_1;
2855 }
2856
2857 strlcpy(net->ksnn_interfaces[i].ksni_name,
2858 ni->ni_interfaces[i],
2859 sizeof(net->ksnn_interfaces[i].ksni_name));
2860 }
2861 net->ksnn_ninterfaces = i;
2862 }
2863
2864 /* call it before add it to ksocknal_data.ksnd_nets */
2865 rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
2866 if (rc)
2867 goto fail_1;
2868
2869 ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
2870 net->ksnn_interfaces[0].ksni_ipaddr);
2871 list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
2872
2873 ksocknal_data.ksnd_nnets++;
2874
2875 return 0;
2876
2877 fail_1:
2878 LIBCFS_FREE(net, sizeof(*net));
2879 fail_0:
2880 if (!ksocknal_data.ksnd_nnets)
2881 ksocknal_base_shutdown();
2882
2883 return -ENETDOWN;
2884 }
2885
2886 static void __exit ksocklnd_exit(void)
2887 {
2888 lnet_unregister_lnd(&the_ksocklnd);
2889 }
2890
2891 static int __init ksocklnd_init(void)
2892 {
2893 int rc;
2894
2895 /* check ksnr_connected/connecting field large enough */
2896 CLASSERT(SOCKLND_CONN_NTYPES <= 4);
2897 CLASSERT(SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN);
2898
2899 /* initialize the_ksocklnd */
2900 the_ksocklnd.lnd_type = SOCKLND;
2901 the_ksocklnd.lnd_startup = ksocknal_startup;
2902 the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
2903 the_ksocklnd.lnd_ctl = ksocknal_ctl;
2904 the_ksocklnd.lnd_send = ksocknal_send;
2905 the_ksocklnd.lnd_recv = ksocknal_recv;
2906 the_ksocklnd.lnd_notify = ksocknal_notify;
2907 the_ksocklnd.lnd_query = ksocknal_query;
2908 the_ksocklnd.lnd_accept = ksocknal_accept;
2909
2910 rc = ksocknal_tunables_init();
2911 if (rc)
2912 return rc;
2913
2914 lnet_register_lnd(&the_ksocklnd);
2915
2916 return 0;
2917 }
2918
2919 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2920 MODULE_DESCRIPTION("TCP Socket LNet Network Driver");
2921 MODULE_VERSION("2.7.0");
2922 MODULE_LICENSE("GPL");
2923
2924 module_init(ksocklnd_init);
2925 module_exit(ksocklnd_exit);
This page took 0.094234 seconds and 5 git commands to generate.