Merge remote-tracking branch 'staging/staging-next'
[deliverable/linux.git] / drivers / staging / lustre / lnet / klnds / socklnd / socklnd.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
6a5b99a4 18 * http://www.gnu.org/licenses/gpl-2.0.html
d7e09d03 19 *
d7e09d03
PT
20 * GPL HEADER END
21 */
22/*
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
1dc563a6 26 * Copyright (c) 2011, 2015, Intel Corporation.
d7e09d03
PT
27 */
28/*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 *
32 * lnet/klnds/socklnd/socklnd.c
33 *
34 * Author: Zach Brown <zab@zabbo.net>
35 * Author: Peter J. Braam <braam@clusterfs.com>
36 * Author: Phil Schwan <phil@clusterfs.com>
37 * Author: Eric Barton <eric@bartonsoftware.com>
38 */
39
40#include "socklnd.h"
41
0b913529 42static lnd_t the_ksocklnd;
ff13fd40 43struct ksock_nal_data ksocknal_data;
d7e09d03 44
ff13fd40 45static struct ksock_interface *
d7e09d03
PT
46ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip)
47{
ff13fd40 48 struct ksock_net *net = ni->ni_data;
97d10d0a 49 int i;
ff13fd40 50 struct ksock_interface *iface;
d7e09d03
PT
51
52 for (i = 0; i < net->ksnn_ninterfaces; i++) {
53 LASSERT(i < LNET_MAX_INTERFACES);
54 iface = &net->ksnn_interfaces[i];
55
56 if (iface->ksni_ipaddr == ip)
a1f659d4 57 return iface;
d7e09d03
PT
58 }
59
a1f659d4 60 return NULL;
d7e09d03
PT
61}
62
ff13fd40 63static struct ksock_route *
d4de2ab8 64ksocknal_create_route(__u32 ipaddr, int port)
d7e09d03 65{
ff13fd40 66 struct ksock_route *route;
d7e09d03 67
d4de2ab8 68 LIBCFS_ALLOC(route, sizeof(*route));
06ace26e 69 if (!route)
a1f659d4 70 return NULL;
d7e09d03 71
d4de2ab8 72 atomic_set(&route->ksnr_refcount, 1);
d7e09d03
PT
73 route->ksnr_peer = NULL;
74 route->ksnr_retry_interval = 0; /* OK to connect at any time */
75 route->ksnr_ipaddr = ipaddr;
76 route->ksnr_port = port;
77 route->ksnr_scheduled = 0;
78 route->ksnr_connecting = 0;
79 route->ksnr_connected = 0;
80 route->ksnr_deleted = 0;
81 route->ksnr_conn_count = 0;
82 route->ksnr_share_count = 0;
83
a1f659d4 84 return route;
d7e09d03
PT
85}
86
87void
ff13fd40 88ksocknal_destroy_route(struct ksock_route *route)
d7e09d03 89{
5fd88337 90 LASSERT(!atomic_read(&route->ksnr_refcount));
d7e09d03 91
06ace26e 92 if (route->ksnr_peer)
d7e09d03
PT
93 ksocknal_peer_decref(route->ksnr_peer);
94
d4de2ab8 95 LIBCFS_FREE(route, sizeof(*route));
d7e09d03
PT
96}
97
0b913529 98static int
ff13fd40 99ksocknal_create_peer(struct ksock_peer **peerp, lnet_ni_t *ni, lnet_process_id_t id)
d7e09d03 100{
0d923635 101 int cpt = lnet_cpt_of_nid(id.nid);
ff13fd40
JS
102 struct ksock_net *net = ni->ni_data;
103 struct ksock_peer *peer;
d7e09d03 104
d4de2ab8
HE
105 LASSERT(id.nid != LNET_NID_ANY);
106 LASSERT(id.pid != LNET_PID_ANY);
107 LASSERT(!in_interrupt());
d7e09d03 108
0d923635 109 LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
06ace26e 110 if (!peer)
d7e09d03
PT
111 return -ENOMEM;
112
d7e09d03
PT
113 peer->ksnp_ni = ni;
114 peer->ksnp_id = id;
d4de2ab8 115 atomic_set(&peer->ksnp_refcount, 1); /* 1 ref for caller */
d7e09d03
PT
116 peer->ksnp_closing = 0;
117 peer->ksnp_accepting = 0;
118 peer->ksnp_proto = NULL;
119 peer->ksnp_last_alive = 0;
120 peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
121
d4de2ab8
HE
122 INIT_LIST_HEAD(&peer->ksnp_conns);
123 INIT_LIST_HEAD(&peer->ksnp_routes);
124 INIT_LIST_HEAD(&peer->ksnp_tx_queue);
125 INIT_LIST_HEAD(&peer->ksnp_zc_req_list);
d7e09d03
PT
126 spin_lock_init(&peer->ksnp_lock);
127
128 spin_lock_bh(&net->ksnn_lock);
129
130 if (net->ksnn_shutdown) {
131 spin_unlock_bh(&net->ksnn_lock);
132
133 LIBCFS_FREE(peer, sizeof(*peer));
134 CERROR("Can't create peer: network shutdown\n");
135 return -ESHUTDOWN;
136 }
137
138 net->ksnn_npeers++;
139
140 spin_unlock_bh(&net->ksnn_lock);
141
142 *peerp = peer;
143 return 0;
144}
145
146void
ff13fd40 147ksocknal_destroy_peer(struct ksock_peer *peer)
d7e09d03 148{
ff13fd40 149 struct ksock_net *net = peer->ksnp_ni->ni_data;
d7e09d03 150
d4de2ab8 151 CDEBUG(D_NET, "peer %s %p deleted\n",
c314c319 152 libcfs_id2str(peer->ksnp_id), peer);
d7e09d03 153
5fd88337
JS
154 LASSERT(!atomic_read(&peer->ksnp_refcount));
155 LASSERT(!peer->ksnp_accepting);
d4de2ab8
HE
156 LASSERT(list_empty(&peer->ksnp_conns));
157 LASSERT(list_empty(&peer->ksnp_routes));
158 LASSERT(list_empty(&peer->ksnp_tx_queue));
159 LASSERT(list_empty(&peer->ksnp_zc_req_list));
d7e09d03 160
d4de2ab8 161 LIBCFS_FREE(peer, sizeof(*peer));
d7e09d03 162
4420cfd3
JS
163 /*
164 * NB a peer's connections and routes keep a reference on their peer
d7e09d03
PT
165 * until they are destroyed, so we can be assured that _all_ state to
166 * do with this peer has been cleaned up when its refcount drops to
4420cfd3
JS
167 * zero.
168 */
d7e09d03
PT
169 spin_lock_bh(&net->ksnn_lock);
170 net->ksnn_npeers--;
171 spin_unlock_bh(&net->ksnn_lock);
172}
173
ff13fd40 174struct ksock_peer *
d4de2ab8 175ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id)
d7e09d03 176{
97d10d0a
MS
177 struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
178 struct list_head *tmp;
ff13fd40 179 struct ksock_peer *peer;
d7e09d03 180
d4de2ab8 181 list_for_each(tmp, peer_list) {
ff13fd40 182 peer = list_entry(tmp, struct ksock_peer, ksnp_list);
d7e09d03 183
d4de2ab8 184 LASSERT(!peer->ksnp_closing);
d7e09d03
PT
185
186 if (peer->ksnp_ni != ni)
187 continue;
188
189 if (peer->ksnp_id.nid != id.nid ||
190 peer->ksnp_id.pid != id.pid)
191 continue;
192
193 CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
194 peer, libcfs_id2str(id),
195 atomic_read(&peer->ksnp_refcount));
a1f659d4 196 return peer;
d7e09d03 197 }
a1f659d4 198 return NULL;
d7e09d03
PT
199}
200
ff13fd40 201struct ksock_peer *
d4de2ab8 202ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id)
d7e09d03 203{
ff13fd40 204 struct ksock_peer *peer;
d7e09d03
PT
205
206 read_lock(&ksocknal_data.ksnd_global_lock);
207 peer = ksocknal_find_peer_locked(ni, id);
06ace26e 208 if (peer) /* +1 ref for caller? */
d7e09d03
PT
209 ksocknal_peer_addref(peer);
210 read_unlock(&ksocknal_data.ksnd_global_lock);
211
a1f659d4 212 return peer;
d7e09d03
PT
213}
214
0b913529 215static void
ff13fd40 216ksocknal_unlink_peer_locked(struct ksock_peer *peer)
d7e09d03 217{
97d10d0a
MS
218 int i;
219 __u32 ip;
ff13fd40 220 struct ksock_interface *iface;
d7e09d03
PT
221
222 for (i = 0; i < peer->ksnp_n_passive_ips; i++) {
d4de2ab8 223 LASSERT(i < LNET_MAX_INTERFACES);
d7e09d03
PT
224 ip = peer->ksnp_passive_ips[i];
225
226 iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
4420cfd3
JS
227 /*
228 * All IPs in peer->ksnp_passive_ips[] come from the
229 * interface list, therefore the call must succeed.
230 */
06ace26e 231 LASSERT(iface);
d7e09d03
PT
232
233 CDEBUG(D_NET, "peer=%p iface=%p ksni_nroutes=%d\n",
234 peer, iface, iface->ksni_nroutes);
235 iface->ksni_npeers--;
236 }
237
d4de2ab8
HE
238 LASSERT(list_empty(&peer->ksnp_conns));
239 LASSERT(list_empty(&peer->ksnp_routes));
240 LASSERT(!peer->ksnp_closing);
d7e09d03 241 peer->ksnp_closing = 1;
d4de2ab8 242 list_del(&peer->ksnp_list);
d7e09d03
PT
243 /* lose peerlist's ref */
244 ksocknal_peer_decref(peer);
245}
246
0b913529 247static int
d4de2ab8 248ksocknal_get_peer_info(lnet_ni_t *ni, int index,
c314c319
JS
249 lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
250 int *port, int *conn_count, int *share_count)
d7e09d03 251{
ff13fd40 252 struct ksock_peer *peer;
97d10d0a 253 struct list_head *ptmp;
ff13fd40 254 struct ksock_route *route;
97d10d0a
MS
255 struct list_head *rtmp;
256 int i;
257 int j;
258 int rc = -ENOENT;
d7e09d03
PT
259
260 read_lock(&ksocknal_data.ksnd_global_lock);
261
262 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
d4de2ab8 263 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
ff13fd40 264 peer = list_entry(ptmp, struct ksock_peer, ksnp_list);
d7e09d03
PT
265
266 if (peer->ksnp_ni != ni)
267 continue;
268
5fd88337 269 if (!peer->ksnp_n_passive_ips &&
d7e09d03
PT
270 list_empty(&peer->ksnp_routes)) {
271 if (index-- > 0)
272 continue;
273
274 *id = peer->ksnp_id;
275 *myip = 0;
276 *peer_ip = 0;
277 *port = 0;
278 *conn_count = 0;
279 *share_count = 0;
280 rc = 0;
281 goto out;
282 }
283
284 for (j = 0; j < peer->ksnp_n_passive_ips; j++) {
285 if (index-- > 0)
286 continue;
287
288 *id = peer->ksnp_id;
289 *myip = peer->ksnp_passive_ips[j];
290 *peer_ip = 0;
291 *port = 0;
292 *conn_count = 0;
293 *share_count = 0;
294 rc = 0;
295 goto out;
296 }
297
d4de2ab8 298 list_for_each(rtmp, &peer->ksnp_routes) {
d7e09d03
PT
299 if (index-- > 0)
300 continue;
301
ff13fd40 302 route = list_entry(rtmp, struct ksock_route,
c314c319 303 ksnr_list);
d7e09d03
PT
304
305 *id = peer->ksnp_id;
306 *myip = route->ksnr_myipaddr;
307 *peer_ip = route->ksnr_ipaddr;
308 *port = route->ksnr_port;
309 *conn_count = route->ksnr_conn_count;
310 *share_count = route->ksnr_share_count;
311 rc = 0;
312 goto out;
313 }
314 }
315 }
316 out:
317 read_unlock(&ksocknal_data.ksnd_global_lock);
a1f659d4 318 return rc;
d7e09d03
PT
319}
320
0b913529 321static void
ff13fd40 322ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn)
d7e09d03 323{
ff13fd40 324 struct ksock_peer *peer = route->ksnr_peer;
97d10d0a 325 int type = conn->ksnc_type;
ff13fd40 326 struct ksock_interface *iface;
d7e09d03
PT
327
328 conn->ksnc_route = route;
329 ksocknal_route_addref(route);
330
331 if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
5fd88337 332 if (!route->ksnr_myipaddr) {
d7e09d03 333 /* route wasn't bound locally yet (the initial route) */
5e8f6920 334 CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
d7e09d03 335 libcfs_id2str(peer->ksnp_id),
5e8f6920
PT
336 &route->ksnr_ipaddr,
337 &conn->ksnc_myipaddr);
d7e09d03 338 } else {
2d00bd17 339 CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h to %pI4h\n",
d7e09d03 340 libcfs_id2str(peer->ksnp_id),
5e8f6920
PT
341 &route->ksnr_ipaddr,
342 &route->ksnr_myipaddr,
343 &conn->ksnc_myipaddr);
d7e09d03
PT
344
345 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
346 route->ksnr_myipaddr);
06ace26e 347 if (iface)
d7e09d03
PT
348 iface->ksni_nroutes--;
349 }
350 route->ksnr_myipaddr = conn->ksnc_myipaddr;
351 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
352 route->ksnr_myipaddr);
06ace26e 353 if (iface)
d7e09d03
PT
354 iface->ksni_nroutes++;
355 }
356
51078e25 357 route->ksnr_connected |= (1 << type);
d7e09d03
PT
358 route->ksnr_conn_count++;
359
4420cfd3
JS
360 /*
361 * Successful connection => further attempts can
362 * proceed immediately
363 */
d7e09d03
PT
364 route->ksnr_retry_interval = 0;
365}
366
0b913529 367static void
ff13fd40 368ksocknal_add_route_locked(struct ksock_peer *peer, struct ksock_route *route)
d7e09d03 369{
97d10d0a 370 struct list_head *tmp;
ff13fd40
JS
371 struct ksock_conn *conn;
372 struct ksock_route *route2;
d7e09d03 373
d4de2ab8 374 LASSERT(!peer->ksnp_closing);
06ace26e 375 LASSERT(!route->ksnr_peer);
d4de2ab8
HE
376 LASSERT(!route->ksnr_scheduled);
377 LASSERT(!route->ksnr_connecting);
5fd88337 378 LASSERT(!route->ksnr_connected);
d7e09d03
PT
379
380 /* LASSERT(unique) */
381 list_for_each(tmp, &peer->ksnp_routes) {
ff13fd40 382 route2 = list_entry(tmp, struct ksock_route, ksnr_list);
d7e09d03
PT
383
384 if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
5e8f6920 385 CERROR("Duplicate route %s %pI4h\n",
c314c319
JS
386 libcfs_id2str(peer->ksnp_id),
387 &route->ksnr_ipaddr);
d7e09d03
PT
388 LBUG();
389 }
390 }
391
392 route->ksnr_peer = peer;
393 ksocknal_peer_addref(peer);
394 /* peer's routelist takes over my ref on 'route' */
395 list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
396
397 list_for_each(tmp, &peer->ksnp_conns) {
ff13fd40 398 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
d7e09d03
PT
399
400 if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
401 continue;
402
403 ksocknal_associate_route_conn_locked(route, conn);
404 /* keep going (typed routes) */
405 }
406}
407
0b913529 408static void
ff13fd40 409ksocknal_del_route_locked(struct ksock_route *route)
d7e09d03 410{
ff13fd40
JS
411 struct ksock_peer *peer = route->ksnr_peer;
412 struct ksock_interface *iface;
413 struct ksock_conn *conn;
97d10d0a
MS
414 struct list_head *ctmp;
415 struct list_head *cnxt;
d7e09d03 416
d4de2ab8 417 LASSERT(!route->ksnr_deleted);
d7e09d03
PT
418
419 /* Close associated conns */
d4de2ab8 420 list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
ff13fd40 421 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
d7e09d03
PT
422
423 if (conn->ksnc_route != route)
424 continue;
425
d4de2ab8 426 ksocknal_close_conn_locked(conn, 0);
d7e09d03
PT
427 }
428
5fd88337 429 if (route->ksnr_myipaddr) {
d7e09d03
PT
430 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
431 route->ksnr_myipaddr);
06ace26e 432 if (iface)
d7e09d03
PT
433 iface->ksni_nroutes--;
434 }
435
436 route->ksnr_deleted = 1;
d4de2ab8 437 list_del(&route->ksnr_list);
d7e09d03
PT
438 ksocknal_route_decref(route); /* drop peer's ref */
439
d4de2ab8
HE
440 if (list_empty(&peer->ksnp_routes) &&
441 list_empty(&peer->ksnp_conns)) {
4420cfd3
JS
442 /*
443 * I've just removed the last route to a peer with no active
444 * connections
445 */
d4de2ab8 446 ksocknal_unlink_peer_locked(peer);
d7e09d03
PT
447 }
448}
449
450int
d4de2ab8 451ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
d7e09d03 452{
97d10d0a 453 struct list_head *tmp;
ff13fd40
JS
454 struct ksock_peer *peer;
455 struct ksock_peer *peer2;
456 struct ksock_route *route;
457 struct ksock_route *route2;
97d10d0a 458 int rc;
d7e09d03
PT
459
460 if (id.nid == LNET_NID_ANY ||
461 id.pid == LNET_PID_ANY)
71397095 462 return -EINVAL;
d7e09d03
PT
463
464 /* Have a brand new peer ready... */
465 rc = ksocknal_create_peer(&peer, ni, id);
5fd88337 466 if (rc)
d7e09d03
PT
467 return rc;
468
d4de2ab8 469 route = ksocknal_create_route(ipaddr, port);
06ace26e 470 if (!route) {
d7e09d03 471 ksocknal_peer_decref(peer);
71397095 472 return -ENOMEM;
d7e09d03
PT
473 }
474
475 write_lock_bh(&ksocknal_data.ksnd_global_lock);
476
477 /* always called with a ref on ni, so shutdown can't have started */
9797fb0e 478 LASSERT(!((struct ksock_net *)ni->ni_data)->ksnn_shutdown);
d7e09d03 479
d4de2ab8 480 peer2 = ksocknal_find_peer_locked(ni, id);
06ace26e 481 if (peer2) {
d7e09d03
PT
482 ksocknal_peer_decref(peer);
483 peer = peer2;
484 } else {
485 /* peer table takes my ref on peer */
d4de2ab8 486 list_add_tail(&peer->ksnp_list,
c314c319 487 ksocknal_nid2peerlist(id.nid));
d7e09d03
PT
488 }
489
490 route2 = NULL;
d4de2ab8 491 list_for_each(tmp, &peer->ksnp_routes) {
ff13fd40 492 route2 = list_entry(tmp, struct ksock_route, ksnr_list);
d7e09d03
PT
493
494 if (route2->ksnr_ipaddr == ipaddr)
495 break;
496
497 route2 = NULL;
498 }
06ace26e 499 if (!route2) {
d7e09d03
PT
500 ksocknal_add_route_locked(peer, route);
501 route->ksnr_share_count++;
502 } else {
503 ksocknal_route_decref(route);
504 route2->ksnr_share_count++;
505 }
506
507 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
508
a1f659d4 509 return 0;
d7e09d03
PT
510}
511
0b913529 512static void
ff13fd40 513ksocknal_del_peer_locked(struct ksock_peer *peer, __u32 ip)
d7e09d03 514{
ff13fd40
JS
515 struct ksock_conn *conn;
516 struct ksock_route *route;
97d10d0a
MS
517 struct list_head *tmp;
518 struct list_head *nxt;
519 int nshared;
d7e09d03 520
d4de2ab8 521 LASSERT(!peer->ksnp_closing);
d7e09d03
PT
522
523 /* Extra ref prevents peer disappearing until I'm done with it */
524 ksocknal_peer_addref(peer);
525
d4de2ab8 526 list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
ff13fd40 527 route = list_entry(tmp, struct ksock_route, ksnr_list);
d7e09d03
PT
528
529 /* no match */
5fd88337 530 if (!(!ip || route->ksnr_ipaddr == ip))
d7e09d03
PT
531 continue;
532
533 route->ksnr_share_count = 0;
534 /* This deletes associated conns too */
d4de2ab8 535 ksocknal_del_route_locked(route);
d7e09d03
PT
536 }
537
538 nshared = 0;
d4de2ab8 539 list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
ff13fd40 540 route = list_entry(tmp, struct ksock_route, ksnr_list);
d7e09d03
PT
541 nshared += route->ksnr_share_count;
542 }
543
5fd88337 544 if (!nshared) {
4420cfd3
JS
545 /*
546 * remove everything else if there are no explicit entries
547 * left
548 */
d4de2ab8 549 list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
ff13fd40 550 route = list_entry(tmp, struct ksock_route, ksnr_list);
d7e09d03
PT
551
552 /* we should only be removing auto-entries */
5fd88337 553 LASSERT(!route->ksnr_share_count);
d4de2ab8 554 ksocknal_del_route_locked(route);
d7e09d03
PT
555 }
556
d4de2ab8 557 list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
ff13fd40 558 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
d7e09d03
PT
559
560 ksocknal_close_conn_locked(conn, 0);
561 }
562 }
563
564 ksocknal_peer_decref(peer);
565 /* NB peer unlinks itself when last conn/route is removed */
566}
567
0b913529 568static int
d4de2ab8 569ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
d7e09d03 570{
d4de2ab8 571 LIST_HEAD(zombies);
97d10d0a
MS
572 struct list_head *ptmp;
573 struct list_head *pnxt;
ff13fd40 574 struct ksock_peer *peer;
97d10d0a
MS
575 int lo;
576 int hi;
577 int i;
578 int rc = -ENOENT;
d7e09d03
PT
579
580 write_lock_bh(&ksocknal_data.ksnd_global_lock);
581
d3d3d37a
JS
582 if (id.nid != LNET_NID_ANY) {
583 lo = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
584 hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
585 } else {
d7e09d03
PT
586 lo = 0;
587 hi = ksocknal_data.ksnd_peer_hash_size - 1;
588 }
589
590 for (i = lo; i <= hi; i++) {
c314c319 591 list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
ff13fd40 592 peer = list_entry(ptmp, struct ksock_peer, ksnp_list);
d7e09d03
PT
593
594 if (peer->ksnp_ni != ni)
595 continue;
596
597 if (!((id.nid == LNET_NID_ANY || peer->ksnp_id.nid == id.nid) &&
598 (id.pid == LNET_PID_ANY || peer->ksnp_id.pid == id.pid)))
599 continue;
600
601 ksocknal_peer_addref(peer); /* a ref for me... */
602
d4de2ab8 603 ksocknal_del_peer_locked(peer, ip);
d7e09d03
PT
604
605 if (peer->ksnp_closing &&
606 !list_empty(&peer->ksnp_tx_queue)) {
d4de2ab8
HE
607 LASSERT(list_empty(&peer->ksnp_conns));
608 LASSERT(list_empty(&peer->ksnp_routes));
d7e09d03
PT
609
610 list_splice_init(&peer->ksnp_tx_queue,
c314c319 611 &zombies);
d7e09d03
PT
612 }
613
614 ksocknal_peer_decref(peer); /* ...till here */
615
616 rc = 0; /* matched! */
617 }
618 }
619
620 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
621
622 ksocknal_txlist_done(ni, &zombies, 1);
623
a1f659d4 624 return rc;
d7e09d03
PT
625}
626
ff13fd40 627static struct ksock_conn *
d4de2ab8 628ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index)
d7e09d03 629{
ff13fd40 630 struct ksock_peer *peer;
97d10d0a 631 struct list_head *ptmp;
ff13fd40 632 struct ksock_conn *conn;
97d10d0a
MS
633 struct list_head *ctmp;
634 int i;
d7e09d03
PT
635
636 read_lock(&ksocknal_data.ksnd_global_lock);
637
638 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
d4de2ab8 639 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
ff13fd40 640 peer = list_entry(ptmp, struct ksock_peer, ksnp_list);
d7e09d03 641
d4de2ab8 642 LASSERT(!peer->ksnp_closing);
d7e09d03
PT
643
644 if (peer->ksnp_ni != ni)
645 continue;
646
d4de2ab8 647 list_for_each(ctmp, &peer->ksnp_conns) {
d7e09d03
PT
648 if (index-- > 0)
649 continue;
650
ff13fd40 651 conn = list_entry(ctmp, struct ksock_conn,
c314c319 652 ksnc_list);
d7e09d03 653 ksocknal_conn_addref(conn);
4b18c358 654 read_unlock(&ksocknal_data.ksnd_global_lock);
a1f659d4 655 return conn;
d7e09d03
PT
656 }
657 }
658 }
659
660 read_unlock(&ksocknal_data.ksnd_global_lock);
a1f659d4 661 return NULL;
d7e09d03
PT
662}
663
ff13fd40 664static struct ksock_sched *
d7e09d03
PT
665ksocknal_choose_scheduler_locked(unsigned int cpt)
666{
667 struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
ff13fd40 668 struct ksock_sched *sched;
97d10d0a 669 int i;
d7e09d03
PT
670
671 LASSERT(info->ksi_nthreads > 0);
672
673 sched = &info->ksi_scheds[0];
674 /*
675 * NB: it's safe so far, but info->ksi_nthreads could be changed
676 * at runtime when we have dynamic LNet configuration, then we
677 * need to take care of this.
678 */
679 for (i = 1; i < info->ksi_nthreads; i++) {
680 if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns)
681 sched = &info->ksi_scheds[i];
682 }
683
684 return sched;
685}
686
0b913529 687static int
d4de2ab8 688ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs)
d7e09d03 689{
ff13fd40 690 struct ksock_net *net = ni->ni_data;
97d10d0a
MS
691 int i;
692 int nip;
d7e09d03
PT
693
694 read_lock(&ksocknal_data.ksnd_global_lock);
695
696 nip = net->ksnn_ninterfaces;
d4de2ab8 697 LASSERT(nip <= LNET_MAX_INTERFACES);
d7e09d03 698
4420cfd3
JS
699 /*
700 * Only offer interfaces for additional connections if I have
701 * more than one.
702 */
d7e09d03
PT
703 if (nip < 2) {
704 read_unlock(&ksocknal_data.ksnd_global_lock);
705 return 0;
706 }
707
708 for (i = 0; i < nip; i++) {
709 ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
5fd88337 710 LASSERT(ipaddrs[i]);
d7e09d03
PT
711 }
712
713 read_unlock(&ksocknal_data.ksnd_global_lock);
a1f659d4 714 return nip;
d7e09d03
PT
715}
716
0b913529 717static int
ff13fd40 718ksocknal_match_peerip(struct ksock_interface *iface, __u32 *ips, int nips)
d7e09d03 719{
97d10d0a
MS
720 int best_netmatch = 0;
721 int best_xor = 0;
722 int best = -1;
723 int this_xor;
724 int this_netmatch;
725 int i;
d7e09d03
PT
726
727 for (i = 0; i < nips; i++) {
5fd88337 728 if (!ips[i])
d7e09d03
PT
729 continue;
730
b6ee3824 731 this_xor = ips[i] ^ iface->ksni_ipaddr;
5fd88337 732 this_netmatch = !(this_xor & iface->ksni_netmask) ? 1 : 0;
d7e09d03
PT
733
734 if (!(best < 0 ||
735 best_netmatch < this_netmatch ||
736 (best_netmatch == this_netmatch &&
737 best_xor > this_xor)))
738 continue;
739
740 best = i;
741 best_netmatch = this_netmatch;
742 best_xor = this_xor;
743 }
744
d4de2ab8 745 LASSERT(best >= 0);
a1f659d4 746 return best;
d7e09d03
PT
747}
748
0b913529 749static int
ff13fd40 750ksocknal_select_ips(struct ksock_peer *peer, __u32 *peerips, int n_peerips)
d7e09d03 751{
97d10d0a 752 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
ff13fd40
JS
753 struct ksock_net *net = peer->ksnp_ni->ni_data;
754 struct ksock_interface *iface;
755 struct ksock_interface *best_iface;
97d10d0a
MS
756 int n_ips;
757 int i;
758 int j;
759 int k;
760 __u32 ip;
761 __u32 xor;
762 int this_netmatch;
763 int best_netmatch;
764 int best_npeers;
d7e09d03 765
4420cfd3
JS
766 /*
767 * CAVEAT EMPTOR: We do all our interface matching with an
d7e09d03
PT
768 * exclusive hold of global lock at IRQ priority. We're only
769 * expecting to be dealing with small numbers of interfaces, so the
4420cfd3
JS
770 * O(n**3)-ness shouldn't matter
771 */
772 /*
773 * Also note that I'm not going to return more than n_peerips
774 * interfaces, even if I have more myself
775 */
d7e09d03
PT
776 write_lock_bh(global_lock);
777
d4de2ab8
HE
778 LASSERT(n_peerips <= LNET_MAX_INTERFACES);
779 LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
d7e09d03 780
4420cfd3
JS
781 /*
782 * Only match interfaces for additional connections
783 * if I have > 1 interface
784 */
d7e09d03 785 n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
0c575417 786 min(n_peerips, net->ksnn_ninterfaces);
d7e09d03
PT
787
788 for (i = 0; peer->ksnp_n_passive_ips < n_ips; i++) {
789 /* ^ yes really... */
790
4420cfd3
JS
791 /*
792 * If we have any new interfaces, first tick off all the
d7e09d03
PT
793 * peer IPs that match old interfaces, then choose new
794 * interfaces to match the remaining peer IPS.
795 * We don't forget interfaces we've stopped using; we might
4420cfd3
JS
796 * start using them again...
797 */
d7e09d03
PT
798 if (i < peer->ksnp_n_passive_ips) {
799 /* Old interface. */
800 ip = peer->ksnp_passive_ips[i];
801 best_iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
802
2b51cb03
DE
803 /* peer passive ips are kept up to date */
804 LASSERT(best_iface);
d7e09d03
PT
805 } else {
806 /* choose a new interface */
d4de2ab8 807 LASSERT(i == peer->ksnp_n_passive_ips);
d7e09d03
PT
808
809 best_iface = NULL;
810 best_netmatch = 0;
811 best_npeers = 0;
812
813 for (j = 0; j < net->ksnn_ninterfaces; j++) {
814 iface = &net->ksnn_interfaces[j];
815 ip = iface->ksni_ipaddr;
816
817 for (k = 0; k < peer->ksnp_n_passive_ips; k++)
818 if (peer->ksnp_passive_ips[k] == ip)
819 break;
820
821 if (k < peer->ksnp_n_passive_ips) /* using it already */
822 continue;
823
824 k = ksocknal_match_peerip(iface, peerips, n_peerips);
b6ee3824 825 xor = ip ^ peerips[k];
5fd88337 826 this_netmatch = !(xor & iface->ksni_netmask) ? 1 : 0;
d7e09d03 827
06ace26e 828 if (!(!best_iface ||
d7e09d03
PT
829 best_netmatch < this_netmatch ||
830 (best_netmatch == this_netmatch &&
831 best_npeers > iface->ksni_npeers)))
832 continue;
833
834 best_iface = iface;
835 best_netmatch = this_netmatch;
836 best_npeers = iface->ksni_npeers;
837 }
838
2b51cb03
DE
839 LASSERT(best_iface);
840
d7e09d03
PT
841 best_iface->ksni_npeers++;
842 ip = best_iface->ksni_ipaddr;
843 peer->ksnp_passive_ips[i] = ip;
51078e25 844 peer->ksnp_n_passive_ips = i + 1;
d7e09d03
PT
845 }
846
d7e09d03
PT
847 /* mark the best matching peer IP used */
848 j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
849 peerips[j] = 0;
850 }
851
852 /* Overwrite input peer IP addresses */
853 memcpy(peerips, peer->ksnp_passive_ips, n_ips * sizeof(*peerips));
854
855 write_unlock_bh(global_lock);
856
a1f659d4 857 return n_ips;
d7e09d03
PT
858}
859
0b913529 860static void
ff13fd40 861ksocknal_create_routes(struct ksock_peer *peer, int port,
d7e09d03
PT
862 __u32 *peer_ipaddrs, int npeer_ipaddrs)
863{
ff13fd40 864 struct ksock_route *newroute = NULL;
97d10d0a
MS
865 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
866 lnet_ni_t *ni = peer->ksnp_ni;
ff13fd40 867 struct ksock_net *net = ni->ni_data;
97d10d0a 868 struct list_head *rtmp;
ff13fd40
JS
869 struct ksock_route *route;
870 struct ksock_interface *iface;
871 struct ksock_interface *best_iface;
97d10d0a
MS
872 int best_netmatch;
873 int this_netmatch;
874 int best_nroutes;
875 int i;
876 int j;
d7e09d03 877
4420cfd3
JS
878 /*
879 * CAVEAT EMPTOR: We do all our interface matching with an
d7e09d03
PT
880 * exclusive hold of global lock at IRQ priority. We're only
881 * expecting to be dealing with small numbers of interfaces, so the
4420cfd3
JS
882 * O(n**3)-ness here shouldn't matter
883 */
d7e09d03
PT
884 write_lock_bh(global_lock);
885
886 if (net->ksnn_ninterfaces < 2) {
4420cfd3
JS
887 /*
888 * Only create additional connections
889 * if I have > 1 interface
890 */
d7e09d03
PT
891 write_unlock_bh(global_lock);
892 return;
893 }
894
d4de2ab8 895 LASSERT(npeer_ipaddrs <= LNET_MAX_INTERFACES);
d7e09d03
PT
896
897 for (i = 0; i < npeer_ipaddrs; i++) {
06ace26e 898 if (newroute) {
d7e09d03
PT
899 newroute->ksnr_ipaddr = peer_ipaddrs[i];
900 } else {
901 write_unlock_bh(global_lock);
902
903 newroute = ksocknal_create_route(peer_ipaddrs[i], port);
06ace26e 904 if (!newroute)
d7e09d03
PT
905 return;
906
907 write_lock_bh(global_lock);
908 }
909
910 if (peer->ksnp_closing) {
911 /* peer got closed under me */
912 break;
913 }
914
915 /* Already got a route? */
916 route = NULL;
917 list_for_each(rtmp, &peer->ksnp_routes) {
ff13fd40 918 route = list_entry(rtmp, struct ksock_route, ksnr_list);
d7e09d03
PT
919
920 if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
921 break;
922
923 route = NULL;
924 }
06ace26e 925 if (route)
d7e09d03
PT
926 continue;
927
928 best_iface = NULL;
929 best_nroutes = 0;
930 best_netmatch = 0;
931
d4de2ab8 932 LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
d7e09d03
PT
933
934 /* Select interface to connect from */
935 for (j = 0; j < net->ksnn_ninterfaces; j++) {
936 iface = &net->ksnn_interfaces[j];
937
938 /* Using this interface already? */
939 list_for_each(rtmp, &peer->ksnp_routes) {
ff13fd40 940 route = list_entry(rtmp, struct ksock_route,
c314c319 941 ksnr_list);
d7e09d03
PT
942
943 if (route->ksnr_myipaddr == iface->ksni_ipaddr)
944 break;
945
946 route = NULL;
947 }
06ace26e 948 if (route)
d7e09d03
PT
949 continue;
950
5fd88337 951 this_netmatch = (!((iface->ksni_ipaddr ^
d7e09d03 952 newroute->ksnr_ipaddr) &
5fd88337 953 iface->ksni_netmask)) ? 1 : 0;
d7e09d03 954
06ace26e 955 if (!(!best_iface ||
d7e09d03
PT
956 best_netmatch < this_netmatch ||
957 (best_netmatch == this_netmatch &&
958 best_nroutes > iface->ksni_nroutes)))
959 continue;
960
961 best_iface = iface;
962 best_netmatch = this_netmatch;
963 best_nroutes = iface->ksni_nroutes;
964 }
965
06ace26e 966 if (!best_iface)
d7e09d03
PT
967 continue;
968
969 newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
970 best_iface->ksni_nroutes++;
971
972 ksocknal_add_route_locked(peer, newroute);
973 newroute = NULL;
974 }
975
976 write_unlock_bh(global_lock);
06ace26e 977 if (newroute)
d7e09d03
PT
978 ksocknal_route_decref(newroute);
979}
980
981int
d4de2ab8 982ksocknal_accept(lnet_ni_t *ni, struct socket *sock)
d7e09d03 983{
ff13fd40 984 struct ksock_connreq *cr;
97d10d0a
MS
985 int rc;
986 __u32 peer_ip;
987 int peer_port;
d7e09d03 988
1ad6a73e 989 rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port);
5fd88337 990 LASSERT(!rc); /* we succeeded before */
d7e09d03
PT
991
992 LIBCFS_ALLOC(cr, sizeof(*cr));
06ace26e 993 if (!cr) {
2d00bd17 994 LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from %pI4h: memory exhausted\n",
5e8f6920 995 &peer_ip);
d7e09d03
PT
996 return -ENOMEM;
997 }
998
999 lnet_ni_addref(ni);
1000 cr->ksncr_ni = ni;
1001 cr->ksncr_sock = sock;
1002
1003 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
1004
1005 list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
1006 wake_up(&ksocknal_data.ksnd_connd_waitq);
1007
1008 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
1009 return 0;
1010}
1011
0b913529 1012static int
ff13fd40 1013ksocknal_connecting(struct ksock_peer *peer, __u32 ipaddr)
d7e09d03 1014{
ff13fd40 1015 struct ksock_route *route;
d7e09d03 1016
d4de2ab8 1017 list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) {
d7e09d03
PT
1018 if (route->ksnr_ipaddr == ipaddr)
1019 return route->ksnr_connecting;
1020 }
1021 return 0;
1022}
1023
1024int
ff13fd40 1025ksocknal_create_conn(lnet_ni_t *ni, struct ksock_route *route,
c314c319 1026 struct socket *sock, int type)
d7e09d03 1027{
97d10d0a 1028 rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
d4de2ab8 1029 LIST_HEAD(zombies);
97d10d0a
MS
1030 lnet_process_id_t peerid;
1031 struct list_head *tmp;
1032 __u64 incarnation;
ff13fd40
JS
1033 struct ksock_conn *conn;
1034 struct ksock_conn *conn2;
1035 struct ksock_peer *peer = NULL;
1036 struct ksock_peer *peer2;
1037 struct ksock_sched *sched;
d7e09d03 1038 ksock_hello_msg_t *hello;
97d10d0a 1039 int cpt;
ff13fd40
JS
1040 struct ksock_tx *tx;
1041 struct ksock_tx *txtmp;
97d10d0a
MS
1042 int rc;
1043 int active;
1044 char *warn = NULL;
d7e09d03 1045
06ace26e 1046 active = !!route;
d7e09d03 1047
d4de2ab8 1048 LASSERT(active == (type != SOCKLND_CONN_NONE));
d7e09d03
PT
1049
1050 LIBCFS_ALLOC(conn, sizeof(*conn));
06ace26e 1051 if (!conn) {
d7e09d03
PT
1052 rc = -ENOMEM;
1053 goto failed_0;
1054 }
1055
d7e09d03
PT
1056 conn->ksnc_peer = NULL;
1057 conn->ksnc_route = NULL;
1058 conn->ksnc_sock = sock;
4420cfd3
JS
1059 /*
1060 * 2 ref, 1 for conn, another extra ref prevents socket
1061 * being closed before establishment of connection
1062 */
d4de2ab8 1063 atomic_set(&conn->ksnc_sock_refcount, 2);
d7e09d03
PT
1064 conn->ksnc_type = type;
1065 ksocknal_lib_save_callback(sock, conn);
d4de2ab8 1066 atomic_set(&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
d7e09d03
PT
1067
1068 conn->ksnc_rx_ready = 0;
1069 conn->ksnc_rx_scheduled = 0;
1070
d4de2ab8 1071 INIT_LIST_HEAD(&conn->ksnc_tx_queue);
d7e09d03
PT
1072 conn->ksnc_tx_ready = 0;
1073 conn->ksnc_tx_scheduled = 0;
1074 conn->ksnc_tx_carrier = NULL;
d4de2ab8 1075 atomic_set(&conn->ksnc_tx_nob, 0);
d7e09d03
PT
1076
1077 LIBCFS_ALLOC(hello, offsetof(ksock_hello_msg_t,
1078 kshm_ips[LNET_MAX_INTERFACES]));
06ace26e 1079 if (!hello) {
d7e09d03
PT
1080 rc = -ENOMEM;
1081 goto failed_1;
1082 }
1083
1084 /* stash conn's local and remote addrs */
d4de2ab8 1085 rc = ksocknal_lib_get_conn_addrs(conn);
5fd88337 1086 if (rc)
d7e09d03
PT
1087 goto failed_1;
1088
4420cfd3
JS
1089 /*
1090 * Find out/confirm peer's NID and connection type and get the
d7e09d03
PT
1091 * vector of interfaces she's willing to let me connect to.
1092 * Passive connections use the listener timeout since the peer sends
4420cfd3
JS
1093 * eagerly
1094 */
d7e09d03
PT
1095 if (active) {
1096 peer = route->ksnr_peer;
1097 LASSERT(ni == peer->ksnp_ni);
1098
1099 /* Active connection sends HELLO eagerly */
1100 hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
1101 peerid = peer->ksnp_id;
1102
1103 write_lock_bh(global_lock);
1104 conn->ksnc_proto = peer->ksnp_proto;
1105 write_unlock_bh(global_lock);
1106
06ace26e 1107 if (!conn->ksnc_proto) {
d7e09d03
PT
1108 conn->ksnc_proto = &ksocknal_protocol_v3x;
1109#if SOCKNAL_VERSION_DEBUG
1110 if (*ksocknal_tunables.ksnd_protocol == 2)
1111 conn->ksnc_proto = &ksocknal_protocol_v2x;
1112 else if (*ksocknal_tunables.ksnd_protocol == 1)
1113 conn->ksnc_proto = &ksocknal_protocol_v1x;
1114#endif
1115 }
1116
d4de2ab8 1117 rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
5fd88337 1118 if (rc)
d7e09d03
PT
1119 goto failed_1;
1120 } else {
1121 peerid.nid = LNET_NID_ANY;
1122 peerid.pid = LNET_PID_ANY;
1123
1124 /* Passive, get protocol from peer */
1125 conn->ksnc_proto = NULL;
1126 }
1127
d4de2ab8 1128 rc = ksocknal_recv_hello(ni, conn, hello, &peerid, &incarnation);
d7e09d03
PT
1129 if (rc < 0)
1130 goto failed_1;
1131
5fd88337 1132 LASSERT(!rc || active);
06ace26e 1133 LASSERT(conn->ksnc_proto);
d4de2ab8 1134 LASSERT(peerid.nid != LNET_NID_ANY);
d7e09d03
PT
1135
1136 cpt = lnet_cpt_of_nid(peerid.nid);
1137
1138 if (active) {
1139 ksocknal_peer_addref(peer);
1140 write_lock_bh(global_lock);
1141 } else {
1142 rc = ksocknal_create_peer(&peer, ni, peerid);
5fd88337 1143 if (rc)
d7e09d03
PT
1144 goto failed_1;
1145
1146 write_lock_bh(global_lock);
1147
1148 /* called with a ref on ni, so shutdown can't have started */
9797fb0e 1149 LASSERT(!((struct ksock_net *)ni->ni_data)->ksnn_shutdown);
d7e09d03
PT
1150
1151 peer2 = ksocknal_find_peer_locked(ni, peerid);
06ace26e 1152 if (!peer2) {
4420cfd3
JS
1153 /*
1154 * NB this puts an "empty" peer in the peer
1155 * table (which takes my ref)
1156 */
d7e09d03 1157 list_add_tail(&peer->ksnp_list,
c314c319 1158 ksocknal_nid2peerlist(peerid.nid));
d7e09d03
PT
1159 } else {
1160 ksocknal_peer_decref(peer);
1161 peer = peer2;
1162 }
1163
1164 /* +1 ref for me */
1165 ksocknal_peer_addref(peer);
1166 peer->ksnp_accepting++;
1167
4420cfd3
JS
1168 /*
1169 * Am I already connecting to this guy? Resolve in
1170 * favour of higher NID...
1171 */
d7e09d03
PT
1172 if (peerid.nid < ni->ni_nid &&
1173 ksocknal_connecting(peer, conn->ksnc_ipaddr)) {
1174 rc = EALREADY;
1175 warn = "connection race resolution";
1176 goto failed_2;
1177 }
1178 }
1179
1180 if (peer->ksnp_closing ||
1181 (active && route->ksnr_deleted)) {
1182 /* peer/route got closed under me */
1183 rc = -ESTALE;
1184 warn = "peer/route removed";
1185 goto failed_2;
1186 }
1187
06ace26e 1188 if (!peer->ksnp_proto) {
4420cfd3
JS
1189 /*
1190 * Never connected before.
d7e09d03
PT
1191 * NB recv_hello may have returned EPROTO to signal my peer
1192 * wants a different protocol than the one I asked for.
1193 */
d4de2ab8 1194 LASSERT(list_empty(&peer->ksnp_conns));
d7e09d03
PT
1195
1196 peer->ksnp_proto = conn->ksnc_proto;
1197 peer->ksnp_incarnation = incarnation;
1198 }
1199
1200 if (peer->ksnp_proto != conn->ksnc_proto ||
1201 peer->ksnp_incarnation != incarnation) {
1202 /* Peer rebooted or I've got the wrong protocol version */
1203 ksocknal_close_peer_conns_locked(peer, 0, 0);
1204
1205 peer->ksnp_proto = NULL;
1206 rc = ESTALE;
1207 warn = peer->ksnp_incarnation != incarnation ?
1208 "peer rebooted" :
1209 "wrong proto version";
1210 goto failed_2;
1211 }
1212
1213 switch (rc) {
1214 default:
1215 LBUG();
1216 case 0:
1217 break;
1218 case EALREADY:
1219 warn = "lost conn race";
1220 goto failed_2;
1221 case EPROTO:
1222 warn = "retry with different protocol version";
1223 goto failed_2;
1224 }
1225
4420cfd3
JS
1226 /*
1227 * Refuse to duplicate an existing connection, unless this is a
1228 * loopback connection
1229 */
d7e09d03
PT
1230 if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
1231 list_for_each(tmp, &peer->ksnp_conns) {
ff13fd40 1232 conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
d7e09d03
PT
1233
1234 if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
1235 conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
1236 conn2->ksnc_type != conn->ksnc_type)
1237 continue;
1238
4420cfd3
JS
1239 /*
1240 * Reply on a passive connection attempt so the peer
1241 * realises we're connected.
1242 */
5fd88337 1243 LASSERT(!rc);
d7e09d03
PT
1244 if (!active)
1245 rc = EALREADY;
1246
1247 warn = "duplicate";
1248 goto failed_2;
1249 }
1250 }
1251
4420cfd3
JS
1252 /*
1253 * If the connection created by this route didn't bind to the IP
d7e09d03 1254 * address the route connected to, the connection/route matching
4420cfd3
JS
1255 * code below probably isn't going to work.
1256 */
d7e09d03
PT
1257 if (active &&
1258 route->ksnr_ipaddr != conn->ksnc_ipaddr) {
5e8f6920 1259 CERROR("Route %s %pI4h connected to %pI4h\n",
d7e09d03 1260 libcfs_id2str(peer->ksnp_id),
5e8f6920
PT
1261 &route->ksnr_ipaddr,
1262 &conn->ksnc_ipaddr);
d7e09d03
PT
1263 }
1264
4420cfd3
JS
1265 /*
1266 * Search for a route corresponding to the new connection and
d7e09d03
PT
1267 * create an association. This allows incoming connections created
1268 * by routes in my peer to match my own route entries so I don't
4420cfd3
JS
1269 * continually create duplicate routes.
1270 */
d4de2ab8 1271 list_for_each(tmp, &peer->ksnp_routes) {
ff13fd40 1272 route = list_entry(tmp, struct ksock_route, ksnr_list);
d7e09d03
PT
1273
1274 if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
1275 continue;
1276
1277 ksocknal_associate_route_conn_locked(route, conn);
1278 break;
1279 }
1280
1281 conn->ksnc_peer = peer; /* conn takes my ref on peer */
1282 peer->ksnp_last_alive = cfs_time_current();
1283 peer->ksnp_send_keepalive = 0;
1284 peer->ksnp_error = 0;
1285
1286 sched = ksocknal_choose_scheduler_locked(cpt);
1287 sched->kss_nconns++;
1288 conn->ksnc_scheduler = sched;
1289
1290 conn->ksnc_tx_last_post = cfs_time_current();
1291 /* Set the deadline for the outgoing HELLO to drain */
fb4a1539 1292 conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
d7e09d03
PT
1293 conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1294 mb(); /* order with adding to peer's conn list */
1295
d4de2ab8 1296 list_add(&conn->ksnc_list, &peer->ksnp_conns);
d7e09d03
PT
1297 ksocknal_conn_addref(conn);
1298
1299 ksocknal_new_packet(conn, 0);
1300
1301 conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
1302
1303 /* Take packets blocking for this connection. */
1304 list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
1305 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) == SOCKNAL_MATCH_NO)
1306 continue;
1307
d4de2ab8
HE
1308 list_del(&tx->tx_list);
1309 ksocknal_queue_tx_locked(tx, conn);
d7e09d03
PT
1310 }
1311
1312 write_unlock_bh(global_lock);
1313
4420cfd3
JS
1314 /*
1315 * We've now got a new connection. Any errors from here on are just
d7e09d03
PT
1316 * like "normal" comms errors and we close the connection normally.
1317 * NB (a) we still have to send the reply HELLO for passive
1318 * connections,
1319 * (b) normal I/O on the conn is blocked until I setup and call the
1320 * socket callbacks.
1321 */
2d00bd17 1322 CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d incarnation:%lld sched[%d:%d]\n",
d7e09d03 1323 libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
5e8f6920 1324 &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
d7e09d03
PT
1325 conn->ksnc_port, incarnation, cpt,
1326 (int)(sched - &sched->kss_info->ksi_scheds[0]));
1327
1328 if (active) {
1329 /* additional routes after interface exchange? */
1330 ksocknal_create_routes(peer, conn->ksnc_port,
1331 hello->kshm_ips, hello->kshm_nips);
1332 } else {
1333 hello->kshm_nips = ksocknal_select_ips(peer, hello->kshm_ips,
1334 hello->kshm_nips);
1335 rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
1336 }
1337
1338 LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
1339 kshm_ips[LNET_MAX_INTERFACES]));
1340
4420cfd3
JS
1341 /*
1342 * setup the socket AFTER I've received hello (it disables
d7e09d03
PT
1343 * SO_LINGER). I might call back to the acceptor who may want
1344 * to send a protocol version response and then close the
1345 * socket; this ensures the socket only tears down after the
4420cfd3
JS
1346 * response has been sent.
1347 */
5fd88337 1348 if (!rc)
d7e09d03
PT
1349 rc = ksocknal_lib_setup_sock(sock);
1350
1351 write_lock_bh(global_lock);
1352
1353 /* NB my callbacks block while I hold ksnd_global_lock */
1354 ksocknal_lib_set_callback(sock, conn);
1355
1356 if (!active)
1357 peer->ksnp_accepting--;
1358
1359 write_unlock_bh(global_lock);
1360
5fd88337 1361 if (rc) {
d7e09d03
PT
1362 write_lock_bh(global_lock);
1363 if (!conn->ksnc_closing) {
1364 /* could be closed by another thread */
1365 ksocknal_close_conn_locked(conn, rc);
1366 }
1367 write_unlock_bh(global_lock);
5fd88337 1368 } else if (!ksocknal_connsock_addref(conn)) {
d7e09d03
PT
1369 /* Allow I/O to proceed. */
1370 ksocknal_read_callback(conn);
1371 ksocknal_write_callback(conn);
1372 ksocknal_connsock_decref(conn);
1373 }
1374
1375 ksocknal_connsock_decref(conn);
1376 ksocknal_conn_decref(conn);
1377 return rc;
1378
1379 failed_2:
1380 if (!peer->ksnp_closing &&
d4de2ab8
HE
1381 list_empty(&peer->ksnp_conns) &&
1382 list_empty(&peer->ksnp_routes)) {
d7e09d03
PT
1383 list_add(&zombies, &peer->ksnp_tx_queue);
1384 list_del_init(&peer->ksnp_tx_queue);
1385 ksocknal_unlink_peer_locked(peer);
1386 }
1387
1388 write_unlock_bh(global_lock);
1389
06ace26e 1390 if (warn) {
d7e09d03
PT
1391 if (rc < 0)
1392 CERROR("Not creating conn %s type %d: %s\n",
1393 libcfs_id2str(peerid), conn->ksnc_type, warn);
1394 else
1395 CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
c314c319 1396 libcfs_id2str(peerid), conn->ksnc_type, warn);
d7e09d03
PT
1397 }
1398
1399 if (!active) {
1400 if (rc > 0) {
4420cfd3
JS
1401 /*
1402 * Request retry by replying with CONN_NONE
1403 * ksnc_proto has been set already
1404 */
d7e09d03
PT
1405 conn->ksnc_type = SOCKLND_CONN_NONE;
1406 hello->kshm_nips = 0;
1407 ksocknal_send_hello(ni, conn, peerid.nid, hello);
1408 }
1409
1410 write_lock_bh(global_lock);
1411 peer->ksnp_accepting--;
1412 write_unlock_bh(global_lock);
1413 }
1414
1415 ksocknal_txlist_done(ni, &zombies, 1);
1416 ksocknal_peer_decref(peer);
1417
73092892 1418failed_1:
06ace26e 1419 if (hello)
d7e09d03
PT
1420 LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
1421 kshm_ips[LNET_MAX_INTERFACES]));
1422
d4de2ab8 1423 LIBCFS_FREE(conn, sizeof(*conn));
d7e09d03 1424
73092892 1425failed_0:
e52fc91d 1426 sock_release(sock);
d7e09d03
PT
1427 return rc;
1428}
1429
1430void
ff13fd40 1431ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
d7e09d03 1432{
4420cfd3
JS
1433 /*
1434 * This just does the immmediate housekeeping, and queues the
d7e09d03 1435 * connection for the reaper to terminate.
4420cfd3
JS
1436 * Caller holds ksnd_global_lock exclusively in irq context
1437 */
ff13fd40
JS
1438 struct ksock_peer *peer = conn->ksnc_peer;
1439 struct ksock_route *route;
1440 struct ksock_conn *conn2;
97d10d0a 1441 struct list_head *tmp;
d7e09d03 1442
5fd88337 1443 LASSERT(!peer->ksnp_error);
d4de2ab8 1444 LASSERT(!conn->ksnc_closing);
d7e09d03
PT
1445 conn->ksnc_closing = 1;
1446
1447 /* ksnd_deathrow_conns takes over peer's ref */
d4de2ab8 1448 list_del(&conn->ksnc_list);
d7e09d03
PT
1449
1450 route = conn->ksnc_route;
06ace26e 1451 if (route) {
d7e09d03 1452 /* dissociate conn from route... */
d4de2ab8 1453 LASSERT(!route->ksnr_deleted);
5fd88337 1454 LASSERT(route->ksnr_connected & (1 << conn->ksnc_type));
d7e09d03
PT
1455
1456 conn2 = NULL;
1457 list_for_each(tmp, &peer->ksnp_conns) {
ff13fd40 1458 conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
d7e09d03
PT
1459
1460 if (conn2->ksnc_route == route &&
1461 conn2->ksnc_type == conn->ksnc_type)
1462 break;
1463
1464 conn2 = NULL;
1465 }
06ace26e 1466 if (!conn2)
d7e09d03
PT
1467 route->ksnr_connected &= ~(1 << conn->ksnc_type);
1468
1469 conn->ksnc_route = NULL;
1470
d7e09d03
PT
1471 ksocknal_route_decref(route); /* drop conn's ref on route */
1472 }
1473
d4de2ab8 1474 if (list_empty(&peer->ksnp_conns)) {
d7e09d03
PT
1475 /* No more connections to this peer */
1476
1477 if (!list_empty(&peer->ksnp_tx_queue)) {
ff13fd40 1478 struct ksock_tx *tx;
d7e09d03 1479
d4de2ab8 1480 LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
d7e09d03 1481
4420cfd3
JS
1482 /*
1483 * throw them to the last connection...,
1484 * these TXs will be send to /dev/null by scheduler
1485 */
d7e09d03 1486 list_for_each_entry(tx, &peer->ksnp_tx_queue,
c314c319 1487 tx_list)
d7e09d03
PT
1488 ksocknal_tx_prep(conn, tx);
1489
1490 spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
1491 list_splice_init(&peer->ksnp_tx_queue,
c314c319 1492 &conn->ksnc_tx_queue);
d7e09d03
PT
1493 spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1494 }
1495
1496 peer->ksnp_proto = NULL; /* renegotiate protocol version */
1497 peer->ksnp_error = error; /* stash last conn close reason */
1498
d4de2ab8 1499 if (list_empty(&peer->ksnp_routes)) {
4420cfd3
JS
1500 /*
1501 * I've just closed last conn belonging to a
1502 * peer with no routes to it
1503 */
d4de2ab8 1504 ksocknal_unlink_peer_locked(peer);
d7e09d03
PT
1505 }
1506 }
1507
1508 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1509
1510 list_add_tail(&conn->ksnc_list,
c314c319 1511 &ksocknal_data.ksnd_deathrow_conns);
d7e09d03
PT
1512 wake_up(&ksocknal_data.ksnd_reaper_waitq);
1513
1514 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1515}
1516
1517void
ff13fd40 1518ksocknal_peer_failed(struct ksock_peer *peer)
d7e09d03 1519{
97d10d0a 1520 int notify = 0;
a649ad1d 1521 unsigned long last_alive = 0;
d7e09d03 1522
4420cfd3
JS
1523 /*
1524 * There has been a connection failure or comms error; but I'll only
d7e09d03 1525 * tell LNET I think the peer is dead if it's to another kernel and
4420cfd3
JS
1526 * there are no connections or connection attempts in existence.
1527 */
d7e09d03
PT
1528 read_lock(&ksocknal_data.ksnd_global_lock);
1529
5fd88337 1530 if (!(peer->ksnp_id.pid & LNET_PID_USERFLAG) &&
d7e09d03 1531 list_empty(&peer->ksnp_conns) &&
5fd88337 1532 !peer->ksnp_accepting &&
06ace26e 1533 !ksocknal_find_connecting_route_locked(peer)) {
d7e09d03
PT
1534 notify = 1;
1535 last_alive = peer->ksnp_last_alive;
1536 }
1537
1538 read_unlock(&ksocknal_data.ksnd_global_lock);
1539
1540 if (notify)
d4de2ab8 1541 lnet_notify(peer->ksnp_ni, peer->ksnp_id.nid, 0,
c314c319 1542 last_alive);
d7e09d03
PT
1543}
1544
1545void
ff13fd40 1546ksocknal_finalize_zcreq(struct ksock_conn *conn)
d7e09d03 1547{
ff13fd40
JS
1548 struct ksock_peer *peer = conn->ksnc_peer;
1549 struct ksock_tx *tx;
1550 struct ksock_tx *temp;
1551 struct ksock_tx *tmp;
d4de2ab8 1552 LIST_HEAD(zlist);
d7e09d03 1553
4420cfd3
JS
1554 /*
1555 * NB safe to finalize TXs because closing of socket will
1556 * abort all buffered data
1557 */
06ace26e 1558 LASSERT(!conn->ksnc_sock);
d7e09d03
PT
1559
1560 spin_lock(&peer->ksnp_lock);
1561
1562 list_for_each_entry_safe(tx, tmp, &peer->ksnp_zc_req_list, tx_zc_list) {
1563 if (tx->tx_conn != conn)
1564 continue;
1565
5fd88337 1566 LASSERT(tx->tx_msg.ksm_zc_cookies[0]);
d7e09d03
PT
1567
1568 tx->tx_msg.ksm_zc_cookies[0] = 0;
1569 tx->tx_zc_aborted = 1; /* mark it as not-acked */
1570 list_del(&tx->tx_zc_list);
1571 list_add(&tx->tx_zc_list, &zlist);
1572 }
1573
1574 spin_unlock(&peer->ksnp_lock);
1575
2aff15d4 1576 list_for_each_entry_safe(tx, temp, &zlist, tx_zc_list) {
d7e09d03
PT
1577 list_del(&tx->tx_zc_list);
1578 ksocknal_tx_decref(tx);
1579 }
1580}
1581
1582void
ff13fd40 1583ksocknal_terminate_conn(struct ksock_conn *conn)
d7e09d03 1584{
4420cfd3
JS
1585 /*
1586 * This gets called by the reaper (guaranteed thread context) to
d7e09d03
PT
1587 * disengage the socket from its callbacks and close it.
1588 * ksnc_refcount will eventually hit zero, and then the reaper will
4420cfd3
JS
1589 * destroy it.
1590 */
ff13fd40
JS
1591 struct ksock_peer *peer = conn->ksnc_peer;
1592 struct ksock_sched *sched = conn->ksnc_scheduler;
97d10d0a 1593 int failed = 0;
d7e09d03
PT
1594
1595 LASSERT(conn->ksnc_closing);
1596
1597 /* wake up the scheduler to "send" all remaining packets to /dev/null */
1598 spin_lock_bh(&sched->kss_lock);
1599
1600 /* a closing conn is always ready to tx */
1601 conn->ksnc_tx_ready = 1;
1602
1603 if (!conn->ksnc_tx_scheduled &&
991cc8d6 1604 !list_empty(&conn->ksnc_tx_queue)) {
d4de2ab8 1605 list_add_tail(&conn->ksnc_tx_list,
c314c319 1606 &sched->kss_tx_conns);
d7e09d03
PT
1607 conn->ksnc_tx_scheduled = 1;
1608 /* extra ref for scheduler */
1609 ksocknal_conn_addref(conn);
1610
d4de2ab8 1611 wake_up(&sched->kss_waitq);
d7e09d03
PT
1612 }
1613
1614 spin_unlock_bh(&sched->kss_lock);
1615
1616 /* serialise with callbacks */
1617 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1618
1619 ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
1620
4420cfd3
JS
1621 /*
1622 * OK, so this conn may not be completely disengaged from its
1623 * scheduler yet, but it _has_ committed to terminate...
1624 */
d7e09d03
PT
1625 conn->ksnc_scheduler->kss_nconns--;
1626
5fd88337 1627 if (peer->ksnp_error) {
d7e09d03 1628 /* peer's last conn closed in error */
d4de2ab8 1629 LASSERT(list_empty(&peer->ksnp_conns));
d7e09d03
PT
1630 failed = 1;
1631 peer->ksnp_error = 0; /* avoid multiple notifications */
1632 }
1633
1634 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1635
1636 if (failed)
1637 ksocknal_peer_failed(peer);
1638
4420cfd3
JS
1639 /*
1640 * The socket is closed on the final put; either here, or in
d7e09d03
PT
1641 * ksocknal_{send,recv}msg(). Since we set up the linger2 option
1642 * when the connection was established, this will close the socket
1643 * immediately, aborting anything buffered in it. Any hung
4420cfd3
JS
1644 * zero-copy transmits will therefore complete in finite time.
1645 */
d7e09d03
PT
1646 ksocknal_connsock_decref(conn);
1647}
1648
1649void
ff13fd40 1650ksocknal_queue_zombie_conn(struct ksock_conn *conn)
d7e09d03
PT
1651{
1652 /* Queue the conn for the reaper to destroy */
1653
5fd88337 1654 LASSERT(!atomic_read(&conn->ksnc_conn_refcount));
d7e09d03
PT
1655 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1656
1657 list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
1658 wake_up(&ksocknal_data.ksnd_reaper_waitq);
1659
1660 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1661}
1662
1663void
ff13fd40 1664ksocknal_destroy_conn(struct ksock_conn *conn)
d7e09d03 1665{
97d10d0a 1666 unsigned long last_rcv;
d7e09d03
PT
1667
1668 /* Final coup-de-grace of the reaper */
d4de2ab8 1669 CDEBUG(D_NET, "connection %p\n", conn);
d7e09d03 1670
5fd88337
JS
1671 LASSERT(!atomic_read(&conn->ksnc_conn_refcount));
1672 LASSERT(!atomic_read(&conn->ksnc_sock_refcount));
06ace26e
JS
1673 LASSERT(!conn->ksnc_sock);
1674 LASSERT(!conn->ksnc_route);
d4de2ab8
HE
1675 LASSERT(!conn->ksnc_tx_scheduled);
1676 LASSERT(!conn->ksnc_rx_scheduled);
1677 LASSERT(list_empty(&conn->ksnc_tx_queue));
d7e09d03
PT
1678
1679 /* complete current receive if any */
1680 switch (conn->ksnc_rx_state) {
1681 case SOCKNAL_RX_LNET_PAYLOAD:
1682 last_rcv = conn->ksnc_rx_deadline -
1683 cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
2d00bd17 1684 CERROR("Completing partial receive from %s[%d], ip %pI4h:%d, with error, wanted: %d, left: %d, last alive is %ld secs ago\n",
d7e09d03 1685 libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
5e8f6920 1686 &conn->ksnc_ipaddr, conn->ksnc_port,
d7e09d03
PT
1687 conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
1688 cfs_duration_sec(cfs_time_sub(cfs_time_current(),
2d00bd17 1689 last_rcv)));
d4de2ab8 1690 lnet_finalize(conn->ksnc_peer->ksnp_ni,
c314c319 1691 conn->ksnc_cookie, -EIO);
d7e09d03
PT
1692 break;
1693 case SOCKNAL_RX_LNET_HEADER:
1694 if (conn->ksnc_rx_started)
2d00bd17 1695 CERROR("Incomplete receive of lnet header from %s, ip %pI4h:%d, with error, protocol: %d.x.\n",
d7e09d03 1696 libcfs_id2str(conn->ksnc_peer->ksnp_id),
5e8f6920 1697 &conn->ksnc_ipaddr, conn->ksnc_port,
d7e09d03
PT
1698 conn->ksnc_proto->pro_version);
1699 break;
1700 case SOCKNAL_RX_KSM_HEADER:
1701 if (conn->ksnc_rx_started)
2d00bd17 1702 CERROR("Incomplete receive of ksock message from %s, ip %pI4h:%d, with error, protocol: %d.x.\n",
d7e09d03 1703 libcfs_id2str(conn->ksnc_peer->ksnp_id),
5e8f6920 1704 &conn->ksnc_ipaddr, conn->ksnc_port,
d7e09d03
PT
1705 conn->ksnc_proto->pro_version);
1706 break;
1707 case SOCKNAL_RX_SLOP:
1708 if (conn->ksnc_rx_started)
2d00bd17 1709 CERROR("Incomplete receive of slops from %s, ip %pI4h:%d, with error\n",
d7e09d03 1710 libcfs_id2str(conn->ksnc_peer->ksnp_id),
5e8f6920 1711 &conn->ksnc_ipaddr, conn->ksnc_port);
d7e09d03
PT
1712 break;
1713 default:
d4de2ab8 1714 LBUG();
d7e09d03
PT
1715 break;
1716 }
1717
1718 ksocknal_peer_decref(conn->ksnc_peer);
1719
d4de2ab8 1720 LIBCFS_FREE(conn, sizeof(*conn));
d7e09d03
PT
1721}
1722
1723int
ff13fd40 1724ksocknal_close_peer_conns_locked(struct ksock_peer *peer, __u32 ipaddr, int why)
d7e09d03 1725{
ff13fd40 1726 struct ksock_conn *conn;
97d10d0a
MS
1727 struct list_head *ctmp;
1728 struct list_head *cnxt;
1729 int count = 0;
d7e09d03 1730
d4de2ab8 1731 list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
ff13fd40 1732 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
d7e09d03 1733
5fd88337 1734 if (!ipaddr || conn->ksnc_ipaddr == ipaddr) {
d7e09d03 1735 count++;
d4de2ab8 1736 ksocknal_close_conn_locked(conn, why);
d7e09d03
PT
1737 }
1738 }
1739
a1f659d4 1740 return count;
d7e09d03
PT
1741}
1742
1743int
ff13fd40 1744ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why)
d7e09d03 1745{
ff13fd40 1746 struct ksock_peer *peer = conn->ksnc_peer;
97d10d0a
MS
1747 __u32 ipaddr = conn->ksnc_ipaddr;
1748 int count;
d7e09d03
PT
1749
1750 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1751
d4de2ab8 1752 count = ksocknal_close_peer_conns_locked(peer, ipaddr, why);
d7e09d03
PT
1753
1754 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1755
a1f659d4 1756 return count;
d7e09d03
PT
1757}
1758
1759int
d4de2ab8 1760ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
d7e09d03 1761{
ff13fd40 1762 struct ksock_peer *peer;
97d10d0a
MS
1763 struct list_head *ptmp;
1764 struct list_head *pnxt;
1765 int lo;
1766 int hi;
1767 int i;
1768 int count = 0;
d7e09d03
PT
1769
1770 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1771
d3d3d37a
JS
1772 if (id.nid != LNET_NID_ANY) {
1773 lo = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
1774 hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
1775 } else {
d7e09d03
PT
1776 lo = 0;
1777 hi = ksocknal_data.ksnd_peer_hash_size - 1;
1778 }
1779
1780 for (i = lo; i <= hi; i++) {
d4de2ab8 1781 list_for_each_safe(ptmp, pnxt,
c314c319 1782 &ksocknal_data.ksnd_peers[i]) {
ff13fd40 1783 peer = list_entry(ptmp, struct ksock_peer, ksnp_list);
d7e09d03
PT
1784
1785 if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) &&
1786 (id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
1787 continue;
1788
d4de2ab8 1789 count += ksocknal_close_peer_conns_locked(peer, ipaddr, 0);
d7e09d03
PT
1790 }
1791 }
1792
1793 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1794
1795 /* wildcards always succeed */
5fd88337 1796 if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || !ipaddr)
a1f659d4 1797 return 0;
d7e09d03 1798
5fd88337 1799 if (!count)
6a123705
LN
1800 return -ENOENT;
1801 else
1802 return 0;
d7e09d03
PT
1803}
1804
1805void
d4de2ab8 1806ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive)
d7e09d03 1807{
4420cfd3
JS
1808 /*
1809 * The router is telling me she's been notified of a change in
1810 * gateway state....
1811 */
97d10d0a 1812 lnet_process_id_t id = {0};
d7e09d03
PT
1813
1814 id.nid = gw_nid;
1815 id.pid = LNET_PID_ANY;
1816
d4de2ab8 1817 CDEBUG(D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
c314c319 1818 alive ? "up" : "down");
d7e09d03
PT
1819
1820 if (!alive) {
1821 /* If the gateway crashed, close all open connections... */
d4de2ab8 1822 ksocknal_close_matching_conns(id, 0);
d7e09d03
PT
1823 return;
1824 }
1825
4420cfd3
JS
1826 /*
1827 * ...otherwise do nothing. We can only establish new connections
1828 * if we have autroutes, and these connect on demand.
1829 */
d7e09d03
PT
1830}
1831
1832void
d4de2ab8 1833ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
d7e09d03 1834{
97d10d0a
MS
1835 int connect = 1;
1836 unsigned long last_alive = 0;
1837 unsigned long now = cfs_time_current();
ff13fd40 1838 struct ksock_peer *peer = NULL;
97d10d0a 1839 rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
fe7cb65d
JH
1840 lnet_process_id_t id = {
1841 .nid = nid,
1842 .pid = LNET_PID_LUSTRE,
1843 };
d7e09d03
PT
1844
1845 read_lock(glock);
1846
1847 peer = ksocknal_find_peer_locked(ni, id);
06ace26e 1848 if (peer) {
97d10d0a 1849 struct list_head *tmp;
ff13fd40 1850 struct ksock_conn *conn;
97d10d0a 1851 int bufnob;
d7e09d03 1852
d4de2ab8 1853 list_for_each(tmp, &peer->ksnp_conns) {
ff13fd40 1854 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
fb4a1539 1855 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
d7e09d03
PT
1856
1857 if (bufnob < conn->ksnc_tx_bufnob) {
1858 /* something got ACKed */
1859 conn->ksnc_tx_deadline =
1860 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1861 peer->ksnp_last_alive = now;
1862 conn->ksnc_tx_bufnob = bufnob;
1863 }
1864 }
1865
1866 last_alive = peer->ksnp_last_alive;
06ace26e 1867 if (!ksocknal_find_connectable_route_locked(peer))
d7e09d03
PT
1868 connect = 0;
1869 }
1870
1871 read_unlock(glock);
1872
5fd88337 1873 if (last_alive)
d7e09d03
PT
1874 *when = last_alive;
1875
1876 CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago, connect %d\n",
1877 libcfs_nid2str(nid), peer,
1878 last_alive ? cfs_duration_sec(now - last_alive) : -1,
1879 connect);
1880
1881 if (!connect)
1882 return;
1883
1884 ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
1885
1886 write_lock_bh(glock);
1887
1888 peer = ksocknal_find_peer_locked(ni, id);
06ace26e 1889 if (peer)
d7e09d03
PT
1890 ksocknal_launch_all_connections_locked(peer);
1891
1892 write_unlock_bh(glock);
d7e09d03
PT
1893}
1894
0b913529 1895static void
ff13fd40 1896ksocknal_push_peer(struct ksock_peer *peer)
d7e09d03 1897{
97d10d0a
MS
1898 int index;
1899 int i;
1900 struct list_head *tmp;
ff13fd40 1901 struct ksock_conn *conn;
d7e09d03
PT
1902
1903 for (index = 0; ; index++) {
1904 read_lock(&ksocknal_data.ksnd_global_lock);
1905
1906 i = 0;
1907 conn = NULL;
1908
d4de2ab8 1909 list_for_each(tmp, &peer->ksnp_conns) {
d7e09d03 1910 if (i++ == index) {
ff13fd40 1911 conn = list_entry(tmp, struct ksock_conn,
c314c319 1912 ksnc_list);
d7e09d03
PT
1913 ksocknal_conn_addref(conn);
1914 break;
1915 }
1916 }
1917
1918 read_unlock(&ksocknal_data.ksnd_global_lock);
1919
06ace26e 1920 if (!conn)
d7e09d03
PT
1921 break;
1922
d4de2ab8 1923 ksocknal_lib_push_conn(conn);
d7e09d03
PT
1924 ksocknal_conn_decref(conn);
1925 }
1926}
1927
4dcb7109 1928static int ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id)
d7e09d03 1929{
4dcb7109
LZ
1930 struct list_head *start;
1931 struct list_head *end;
97d10d0a 1932 struct list_head *tmp;
97d10d0a 1933 int rc = -ENOENT;
4dcb7109 1934 unsigned int hsize = ksocknal_data.ksnd_peer_hash_size;
d7e09d03 1935
4dcb7109
LZ
1936 if (id.nid == LNET_NID_ANY) {
1937 start = &ksocknal_data.ksnd_peers[0];
1938 end = &ksocknal_data.ksnd_peers[hsize - 1];
1939 } else {
d3d3d37a
JS
1940 start = ksocknal_nid2peerlist(id.nid);
1941 end = ksocknal_nid2peerlist(id.nid);
4dcb7109 1942 }
d7e09d03 1943
4dcb7109
LZ
1944 for (tmp = start; tmp <= end; tmp++) {
1945 int peer_off; /* searching offset in peer hash table */
d7e09d03 1946
4dcb7109 1947 for (peer_off = 0; ; peer_off++) {
ff13fd40 1948 struct ksock_peer *peer;
4dcb7109 1949 int i = 0;
d7e09d03 1950
4dcb7109
LZ
1951 read_lock(&ksocknal_data.ksnd_global_lock);
1952 list_for_each_entry(peer, tmp, ksnp_list) {
d7e09d03
PT
1953 if (!((id.nid == LNET_NID_ANY ||
1954 id.nid == peer->ksnp_id.nid) &&
1955 (id.pid == LNET_PID_ANY ||
4dcb7109 1956 id.pid == peer->ksnp_id.pid)))
d7e09d03 1957 continue;
d7e09d03 1958
4dcb7109 1959 if (i++ == peer_off) {
d7e09d03
PT
1960 ksocknal_peer_addref(peer);
1961 break;
1962 }
1963 }
d7e09d03
PT
1964 read_unlock(&ksocknal_data.ksnd_global_lock);
1965
5fd88337 1966 if (!i) /* no match */
4dcb7109 1967 break;
d7e09d03 1968
4dcb7109
LZ
1969 rc = 0;
1970 ksocknal_push_peer(peer);
1971 ksocknal_peer_decref(peer);
1972 }
d7e09d03 1973 }
a1f659d4 1974 return rc;
d7e09d03
PT
1975}
1976
0b913529 1977static int
d7e09d03
PT
1978ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
1979{
ff13fd40
JS
1980 struct ksock_net *net = ni->ni_data;
1981 struct ksock_interface *iface;
97d10d0a
MS
1982 int rc;
1983 int i;
1984 int j;
1985 struct list_head *ptmp;
ff13fd40 1986 struct ksock_peer *peer;
97d10d0a 1987 struct list_head *rtmp;
ff13fd40 1988 struct ksock_route *route;
d7e09d03 1989
5fd88337 1990 if (!ipaddress || !netmask)
71397095 1991 return -EINVAL;
d7e09d03
PT
1992
1993 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1994
1995 iface = ksocknal_ip2iface(ni, ipaddress);
06ace26e 1996 if (iface) {
d7e09d03
PT
1997 /* silently ignore dups */
1998 rc = 0;
1999 } else if (net->ksnn_ninterfaces == LNET_MAX_INTERFACES) {
2000 rc = -ENOSPC;
2001 } else {
2002 iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
2003
2004 iface->ksni_ipaddr = ipaddress;
2005 iface->ksni_netmask = netmask;
2006 iface->ksni_nroutes = 0;
2007 iface->ksni_npeers = 0;
2008
2009 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2010 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
ff13fd40 2011 peer = list_entry(ptmp, struct ksock_peer,
c314c319 2012 ksnp_list);
d7e09d03
PT
2013
2014 for (j = 0; j < peer->ksnp_n_passive_ips; j++)
2015 if (peer->ksnp_passive_ips[j] == ipaddress)
2016 iface->ksni_npeers++;
2017
2018 list_for_each(rtmp, &peer->ksnp_routes) {
ff13fd40 2019 route = list_entry(rtmp, struct ksock_route,
c314c319 2020 ksnr_list);
d7e09d03
PT
2021
2022 if (route->ksnr_myipaddr == ipaddress)
2023 iface->ksni_nroutes++;
2024 }
2025 }
2026 }
2027
2028 rc = 0;
2029 /* NB only new connections will pay attention to the new interface! */
2030 }
2031
2032 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2033
a1f659d4 2034 return rc;
d7e09d03
PT
2035}
2036
0b913529 2037static void
ff13fd40 2038ksocknal_peer_del_interface_locked(struct ksock_peer *peer, __u32 ipaddr)
d7e09d03 2039{
97d10d0a
MS
2040 struct list_head *tmp;
2041 struct list_head *nxt;
ff13fd40
JS
2042 struct ksock_route *route;
2043 struct ksock_conn *conn;
97d10d0a
MS
2044 int i;
2045 int j;
d7e09d03
PT
2046
2047 for (i = 0; i < peer->ksnp_n_passive_ips; i++)
2048 if (peer->ksnp_passive_ips[i] == ipaddr) {
51078e25
JS
2049 for (j = i + 1; j < peer->ksnp_n_passive_ips; j++)
2050 peer->ksnp_passive_ips[j - 1] =
d7e09d03
PT
2051 peer->ksnp_passive_ips[j];
2052 peer->ksnp_n_passive_ips--;
2053 break;
2054 }
2055
2056 list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
ff13fd40 2057 route = list_entry(tmp, struct ksock_route, ksnr_list);
d7e09d03
PT
2058
2059 if (route->ksnr_myipaddr != ipaddr)
2060 continue;
2061
5fd88337 2062 if (route->ksnr_share_count) {
d7e09d03
PT
2063 /* Manually created; keep, but unbind */
2064 route->ksnr_myipaddr = 0;
2065 } else {
2066 ksocknal_del_route_locked(route);
2067 }
2068 }
2069
2070 list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
ff13fd40 2071 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
d7e09d03
PT
2072
2073 if (conn->ksnc_myipaddr == ipaddr)
d4de2ab8 2074 ksocknal_close_conn_locked(conn, 0);
d7e09d03
PT
2075 }
2076}
2077
0b913529 2078static int
d7e09d03
PT
2079ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
2080{
ff13fd40 2081 struct ksock_net *net = ni->ni_data;
97d10d0a
MS
2082 int rc = -ENOENT;
2083 struct list_head *tmp;
2084 struct list_head *nxt;
ff13fd40 2085 struct ksock_peer *peer;
97d10d0a
MS
2086 __u32 this_ip;
2087 int i;
2088 int j;
d7e09d03
PT
2089
2090 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2091
2092 for (i = 0; i < net->ksnn_ninterfaces; i++) {
2093 this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
2094
5fd88337 2095 if (!(!ipaddress || ipaddress == this_ip))
d7e09d03
PT
2096 continue;
2097
2098 rc = 0;
2099
51078e25
JS
2100 for (j = i + 1; j < net->ksnn_ninterfaces; j++)
2101 net->ksnn_interfaces[j - 1] =
d7e09d03
PT
2102 net->ksnn_interfaces[j];
2103
2104 net->ksnn_ninterfaces--;
2105
2106 for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
2107 list_for_each_safe(tmp, nxt,
c314c319 2108 &ksocknal_data.ksnd_peers[j]) {
ff13fd40 2109 peer = list_entry(tmp, struct ksock_peer, ksnp_list);
d7e09d03
PT
2110
2111 if (peer->ksnp_ni != ni)
2112 continue;
2113
2114 ksocknal_peer_del_interface_locked(peer, this_ip);
2115 }
2116 }
2117 }
2118
2119 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2120
a1f659d4 2121 return rc;
d7e09d03
PT
2122}
2123
2124int
2125ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
2126{
2127 lnet_process_id_t id = {0};
2128 struct libcfs_ioctl_data *data = arg;
2129 int rc;
2130
991cc8d6 2131 switch (cmd) {
d7e09d03 2132 case IOC_LIBCFS_GET_INTERFACE: {
ff13fd40
JS
2133 struct ksock_net *net = ni->ni_data;
2134 struct ksock_interface *iface;
d7e09d03
PT
2135
2136 read_lock(&ksocknal_data.ksnd_global_lock);
2137
2138 if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
2139 rc = -ENOENT;
2140 } else {
2141 rc = 0;
2142 iface = &net->ksnn_interfaces[data->ioc_count];
2143
2144 data->ioc_u32[0] = iface->ksni_ipaddr;
2145 data->ioc_u32[1] = iface->ksni_netmask;
2146 data->ioc_u32[2] = iface->ksni_npeers;
2147 data->ioc_u32[3] = iface->ksni_nroutes;
2148 }
2149
2150 read_unlock(&ksocknal_data.ksnd_global_lock);
2151 return rc;
2152 }
2153
2154 case IOC_LIBCFS_ADD_INTERFACE:
2155 return ksocknal_add_interface(ni,
2156 data->ioc_u32[0], /* IP address */
2157 data->ioc_u32[1]); /* net mask */
2158
2159 case IOC_LIBCFS_DEL_INTERFACE:
2160 return ksocknal_del_interface(ni,
2161 data->ioc_u32[0]); /* IP address */
2162
2163 case IOC_LIBCFS_GET_PEER: {
97d10d0a
MS
2164 __u32 myip = 0;
2165 __u32 ip = 0;
2166 int port = 0;
2167 int conn_count = 0;
2168 int share_count = 0;
d7e09d03
PT
2169
2170 rc = ksocknal_get_peer_info(ni, data->ioc_count,
2171 &id, &myip, &ip, &port,
2172 &conn_count, &share_count);
5fd88337 2173 if (rc)
d7e09d03
PT
2174 return rc;
2175
2176 data->ioc_nid = id.nid;
2177 data->ioc_count = share_count;
2178 data->ioc_u32[0] = ip;
2179 data->ioc_u32[1] = port;
2180 data->ioc_u32[2] = myip;
2181 data->ioc_u32[3] = conn_count;
2182 data->ioc_u32[4] = id.pid;
2183 return 0;
2184 }
2185
2186 case IOC_LIBCFS_ADD_PEER:
2187 id.nid = data->ioc_nid;
fe7cb65d 2188 id.pid = LNET_PID_LUSTRE;
d4de2ab8 2189 return ksocknal_add_peer(ni, id,
d7e09d03
PT
2190 data->ioc_u32[0], /* IP */
2191 data->ioc_u32[1]); /* port */
2192
2193 case IOC_LIBCFS_DEL_PEER:
2194 id.nid = data->ioc_nid;
2195 id.pid = LNET_PID_ANY;
d4de2ab8 2196 return ksocknal_del_peer(ni, id,
d7e09d03
PT
2197 data->ioc_u32[0]); /* IP */
2198
2199 case IOC_LIBCFS_GET_CONN: {
97d10d0a
MS
2200 int txmem;
2201 int rxmem;
2202 int nagle;
ff13fd40 2203 struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
d7e09d03 2204
06ace26e 2205 if (!conn)
d7e09d03
PT
2206 return -ENOENT;
2207
2208 ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
2209
2210 data->ioc_count = txmem;
2211 data->ioc_nid = conn->ksnc_peer->ksnp_id.nid;
2212 data->ioc_flags = nagle;
2213 data->ioc_u32[0] = conn->ksnc_ipaddr;
2214 data->ioc_u32[1] = conn->ksnc_port;
2215 data->ioc_u32[2] = conn->ksnc_myipaddr;
2216 data->ioc_u32[3] = conn->ksnc_type;
2217 data->ioc_u32[4] = conn->ksnc_scheduler->kss_info->ksi_cpt;
2218 data->ioc_u32[5] = rxmem;
2219 data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
2220 ksocknal_conn_decref(conn);
2221 return 0;
2222 }
2223
2224 case IOC_LIBCFS_CLOSE_CONNECTION:
2225 id.nid = data->ioc_nid;
2226 id.pid = LNET_PID_ANY;
d4de2ab8 2227 return ksocknal_close_matching_conns(id,
d7e09d03
PT
2228 data->ioc_u32[0]);
2229
2230 case IOC_LIBCFS_REGISTER_MYNID:
2231 /* Ignore if this is a noop */
2232 if (data->ioc_nid == ni->ni_nid)
2233 return 0;
2234
2235 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
2236 libcfs_nid2str(data->ioc_nid),
2237 libcfs_nid2str(ni->ni_nid));
2238 return -EINVAL;
2239
2240 case IOC_LIBCFS_PUSH_CONNECTION:
2241 id.nid = data->ioc_nid;
2242 id.pid = LNET_PID_ANY;
2243 return ksocknal_push(ni, id);
2244
2245 default:
2246 return -EINVAL;
2247 }
2248 /* not reached */
2249}
2250
0b913529 2251static void
d4de2ab8 2252ksocknal_free_buffers(void)
d7e09d03 2253{
5fd88337 2254 LASSERT(!atomic_read(&ksocknal_data.ksnd_nactive_txs));
d7e09d03 2255
06ace26e 2256 if (ksocknal_data.ksnd_sched_info) {
97d10d0a
MS
2257 struct ksock_sched_info *info;
2258 int i;
d7e09d03
PT
2259
2260 cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
06ace26e 2261 if (info->ksi_scheds) {
d7e09d03
PT
2262 LIBCFS_FREE(info->ksi_scheds,
2263 info->ksi_nthreads_max *
2264 sizeof(info->ksi_scheds[0]));
2265 }
2266 }
2267 cfs_percpt_free(ksocknal_data.ksnd_sched_info);
2268 }
2269
d4de2ab8 2270 LIBCFS_FREE(ksocknal_data.ksnd_peers,
c314c319
JS
2271 sizeof(struct list_head) *
2272 ksocknal_data.ksnd_peer_hash_size);
d7e09d03
PT
2273
2274 spin_lock(&ksocknal_data.ksnd_tx_lock);
2275
2276 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
97d10d0a 2277 struct list_head zlist;
ff13fd40
JS
2278 struct ksock_tx *tx;
2279 struct ksock_tx *temp;
d7e09d03
PT
2280
2281 list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
2282 list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
2283 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2284
2aff15d4 2285 list_for_each_entry_safe(tx, temp, &zlist, tx_list) {
d7e09d03
PT
2286 list_del(&tx->tx_list);
2287 LIBCFS_FREE(tx, tx->tx_desc_size);
2288 }
2289 } else {
2290 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2291 }
2292}
2293
0b913529 2294static void
d7e09d03
PT
2295ksocknal_base_shutdown(void)
2296{
2297 struct ksock_sched_info *info;
ff13fd40 2298 struct ksock_sched *sched;
97d10d0a
MS
2299 int i;
2300 int j;
d7e09d03 2301
5fd88337 2302 LASSERT(!ksocknal_data.ksnd_nnets);
d7e09d03
PT
2303
2304 switch (ksocknal_data.ksnd_init) {
2305 default:
d4de2ab8 2306 LASSERT(0);
d7e09d03
PT
2307
2308 case SOCKNAL_INIT_ALL:
2309 case SOCKNAL_INIT_DATA:
06ace26e 2310 LASSERT(ksocknal_data.ksnd_peers);
df5ddcc9 2311 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
d4de2ab8 2312 LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
d7e09d03
PT
2313
2314 LASSERT(list_empty(&ksocknal_data.ksnd_nets));
d4de2ab8
HE
2315 LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
2316 LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
2317 LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
2318 LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
d7e09d03 2319
06ace26e 2320 if (ksocknal_data.ksnd_sched_info) {
d7e09d03
PT
2321 cfs_percpt_for_each(info, i,
2322 ksocknal_data.ksnd_sched_info) {
06ace26e 2323 if (!info->ksi_scheds)
d7e09d03
PT
2324 continue;
2325
2326 for (j = 0; j < info->ksi_nthreads_max; j++) {
d7e09d03 2327 sched = &info->ksi_scheds[j];
4b18c358
MN
2328 LASSERT(list_empty(
2329 &sched->kss_tx_conns));
2330 LASSERT(list_empty(
2331 &sched->kss_rx_conns));
2332 LASSERT(list_empty(
2333 &sched->kss_zombie_noop_txs));
5fd88337 2334 LASSERT(!sched->kss_nconns);
d7e09d03
PT
2335 }
2336 }
2337 }
2338
2339 /* flag threads to terminate; wake and wait for them to die */
2340 ksocknal_data.ksnd_shuttingdown = 1;
2341 wake_up_all(&ksocknal_data.ksnd_connd_waitq);
2342 wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
2343
06ace26e 2344 if (ksocknal_data.ksnd_sched_info) {
d7e09d03
PT
2345 cfs_percpt_for_each(info, i,
2346 ksocknal_data.ksnd_sched_info) {
06ace26e 2347 if (!info->ksi_scheds)
d7e09d03
PT
2348 continue;
2349
2350 for (j = 0; j < info->ksi_nthreads_max; j++) {
2351 sched = &info->ksi_scheds[j];
2352 wake_up_all(&sched->kss_waitq);
2353 }
2354 }
2355 }
2356
2357 i = 4;
2358 read_lock(&ksocknal_data.ksnd_global_lock);
5fd88337 2359 while (ksocknal_data.ksnd_nthreads) {
d7e09d03
PT
2360 i++;
2361 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2362 "waiting for %d threads to terminate\n",
2363 ksocknal_data.ksnd_nthreads);
2364 read_unlock(&ksocknal_data.ksnd_global_lock);
d3caf4d5
PT
2365 set_current_state(TASK_UNINTERRUPTIBLE);
2366 schedule_timeout(cfs_time_seconds(1));
d7e09d03
PT
2367 read_lock(&ksocknal_data.ksnd_global_lock);
2368 }
2369 read_unlock(&ksocknal_data.ksnd_global_lock);
2370
2371 ksocknal_free_buffers();
2372
2373 ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
2374 break;
2375 }
2376
d7e09d03
PT
2377 module_put(THIS_MODULE);
2378}
2379
0b913529 2380static __u64
d4de2ab8 2381ksocknal_new_incarnation(void)
d7e09d03 2382{
d7e09d03 2383 /* The incarnation number is the time this module loaded and it
11dd2a97
TR
2384 * identifies this particular instance of the socknal.
2385 */
2386 return ktime_get_ns();
d7e09d03
PT
2387}
2388
0b913529 2389static int
d7e09d03
PT
2390ksocknal_base_startup(void)
2391{
2392 struct ksock_sched_info *info;
97d10d0a
MS
2393 int rc;
2394 int i;
d7e09d03 2395
d4de2ab8 2396 LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
5fd88337 2397 LASSERT(!ksocknal_data.ksnd_nnets);
d7e09d03 2398
d4de2ab8 2399 memset(&ksocknal_data, 0, sizeof(ksocknal_data)); /* zero pointers */
d7e09d03
PT
2400
2401 ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
d4de2ab8 2402 LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
c314c319
JS
2403 sizeof(struct list_head) *
2404 ksocknal_data.ksnd_peer_hash_size);
06ace26e 2405 if (!ksocknal_data.ksnd_peers)
d7e09d03
PT
2406 return -ENOMEM;
2407
2408 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
2409 INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
2410
2411 rwlock_init(&ksocknal_data.ksnd_global_lock);
2412 INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
2413
2414 spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
d4de2ab8
HE
2415 INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
2416 INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
2417 INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
d7e09d03
PT
2418 init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
2419
2420 spin_lock_init(&ksocknal_data.ksnd_connd_lock);
d4de2ab8
HE
2421 INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
2422 INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
d7e09d03
PT
2423 init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
2424
2425 spin_lock_init(&ksocknal_data.ksnd_tx_lock);
d4de2ab8 2426 INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
d7e09d03
PT
2427
2428 /* NB memset above zeros whole of ksocknal_data */
2429
2430 /* flag lists/ptrs/locks initialised */
2431 ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
2432 try_module_get(THIS_MODULE);
2433
2434 ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
2435 sizeof(*info));
06ace26e 2436 if (!ksocknal_data.ksnd_sched_info)
d7e09d03
PT
2437 goto failed;
2438
2439 cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
ff13fd40 2440 struct ksock_sched *sched;
97d10d0a 2441 int nthrs;
d7e09d03
PT
2442
2443 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2444 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2445 nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
2446 } else {
4420cfd3
JS
2447 /*
2448 * max to half of CPUs, assume another half should be
2449 * reserved for upper layer modules
2450 */
d7e09d03
PT
2451 nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2452 }
2453
2454 info->ksi_nthreads_max = nthrs;
2455 info->ksi_cpt = i;
2456
2457 LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
2458 info->ksi_nthreads_max * sizeof(*sched));
06ace26e 2459 if (!info->ksi_scheds)
d7e09d03
PT
2460 goto failed;
2461
2462 for (; nthrs > 0; nthrs--) {
2463 sched = &info->ksi_scheds[nthrs - 1];
2464
2465 sched->kss_info = info;
2466 spin_lock_init(&sched->kss_lock);
2467 INIT_LIST_HEAD(&sched->kss_rx_conns);
2468 INIT_LIST_HEAD(&sched->kss_tx_conns);
2469 INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
2470 init_waitqueue_head(&sched->kss_waitq);
2471 }
2472 }
2473
97d10d0a
MS
2474 ksocknal_data.ksnd_connd_starting = 0;
2475 ksocknal_data.ksnd_connd_failed_stamp = 0;
74ad578f 2476 ksocknal_data.ksnd_connd_starting_stamp = ktime_get_real_seconds();
4420cfd3
JS
2477 /*
2478 * must have at least 2 connds to remain responsive to accepts while
2479 * connecting
2480 */
d7e09d03
PT
2481 if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
2482 *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
2483
2484 if (*ksocknal_tunables.ksnd_nconnds_max <
2485 *ksocknal_tunables.ksnd_nconnds) {
2486 ksocknal_tunables.ksnd_nconnds_max =
2487 ksocknal_tunables.ksnd_nconnds;
2488 }
2489
2490 for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
2491 char name[16];
50ffcb7e 2492
d7e09d03
PT
2493 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2494 ksocknal_data.ksnd_connd_starting++;
2495 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2496
d7e09d03
PT
2497 snprintf(name, sizeof(name), "socknal_cd%02d", i);
2498 rc = ksocknal_thread_start(ksocknal_connd,
2499 (void *)((ulong_ptr_t)i), name);
5fd88337 2500 if (rc) {
d7e09d03
PT
2501 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2502 ksocknal_data.ksnd_connd_starting--;
2503 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2504 CERROR("Can't spawn socknal connd: %d\n", rc);
2505 goto failed;
2506 }
2507 }
2508
2509 rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
5fd88337 2510 if (rc) {
d4de2ab8 2511 CERROR("Can't spawn socknal reaper: %d\n", rc);
d7e09d03
PT
2512 goto failed;
2513 }
2514
2515 /* flag everything initialised */
2516 ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
2517
2518 return 0;
2519
2520 failed:
2521 ksocknal_base_shutdown();
2522 return -ENETDOWN;
2523}
2524
0b913529 2525static void
d4de2ab8 2526ksocknal_debug_peerhash(lnet_ni_t *ni)
d7e09d03 2527{
ff13fd40 2528 struct ksock_peer *peer = NULL;
97d10d0a
MS
2529 struct list_head *tmp;
2530 int i;
d7e09d03
PT
2531
2532 read_lock(&ksocknal_data.ksnd_global_lock);
2533
2534 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
d4de2ab8 2535 list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
ff13fd40 2536 peer = list_entry(tmp, struct ksock_peer, ksnp_list);
d7e09d03 2537
a10f33e9
MN
2538 if (peer->ksnp_ni == ni)
2539 break;
d7e09d03
PT
2540
2541 peer = NULL;
2542 }
2543 }
2544
06ace26e 2545 if (peer) {
ff13fd40
JS
2546 struct ksock_route *route;
2547 struct ksock_conn *conn;
d7e09d03 2548
2d00bd17
JP
2549 CWARN("Active peer on shutdown: %s, ref %d, scnt %d, closing %d, accepting %d, err %d, zcookie %llu, txq %d, zc_req %d\n",
2550 libcfs_id2str(peer->ksnp_id),
2551 atomic_read(&peer->ksnp_refcount),
2552 peer->ksnp_sharecount, peer->ksnp_closing,
2553 peer->ksnp_accepting, peer->ksnp_error,
2554 peer->ksnp_zc_next_cookie,
2555 !list_empty(&peer->ksnp_tx_queue),
2556 !list_empty(&peer->ksnp_zc_req_list));
d7e09d03 2557
d4de2ab8 2558 list_for_each(tmp, &peer->ksnp_routes) {
ff13fd40 2559 route = list_entry(tmp, struct ksock_route, ksnr_list);
2d00bd17
JP
2560 CWARN("Route: ref %d, schd %d, conn %d, cnted %d, del %d\n",
2561 atomic_read(&route->ksnr_refcount),
2562 route->ksnr_scheduled, route->ksnr_connecting,
2563 route->ksnr_connected, route->ksnr_deleted);
d7e09d03
PT
2564 }
2565
d4de2ab8 2566 list_for_each(tmp, &peer->ksnp_conns) {
ff13fd40 2567 conn = list_entry(tmp, struct ksock_conn, ksnc_list);
d4de2ab8 2568 CWARN("Conn: ref %d, sref %d, t %d, c %d\n",
c314c319
JS
2569 atomic_read(&conn->ksnc_conn_refcount),
2570 atomic_read(&conn->ksnc_sock_refcount),
2571 conn->ksnc_type, conn->ksnc_closing);
d7e09d03
PT
2572 }
2573 }
2574
2575 read_unlock(&ksocknal_data.ksnd_global_lock);
d7e09d03
PT
2576}
2577
2578void
d4de2ab8 2579ksocknal_shutdown(lnet_ni_t *ni)
d7e09d03 2580{
ff13fd40 2581 struct ksock_net *net = ni->ni_data;
97d10d0a 2582 int i;
d7e09d03
PT
2583 lnet_process_id_t anyid = {0};
2584
97d10d0a
MS
2585 anyid.nid = LNET_NID_ANY;
2586 anyid.pid = LNET_PID_ANY;
d7e09d03
PT
2587
2588 LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
2589 LASSERT(ksocknal_data.ksnd_nnets > 0);
2590
2591 spin_lock_bh(&net->ksnn_lock);
2592 net->ksnn_shutdown = 1; /* prevent new peers */
2593 spin_unlock_bh(&net->ksnn_lock);
2594
2595 /* Delete all peers */
2596 ksocknal_del_peer(ni, anyid, 0);
2597
2598 /* Wait for all peer state to clean up */
2599 i = 2;
2600 spin_lock_bh(&net->ksnn_lock);
5fd88337 2601 while (net->ksnn_npeers) {
d7e09d03
PT
2602 spin_unlock_bh(&net->ksnn_lock);
2603
2604 i++;
2605 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2606 "waiting for %d peers to disconnect\n",
2607 net->ksnn_npeers);
d3caf4d5
PT
2608 set_current_state(TASK_UNINTERRUPTIBLE);
2609 schedule_timeout(cfs_time_seconds(1));
d7e09d03
PT
2610
2611 ksocknal_debug_peerhash(ni);
2612
2613 spin_lock_bh(&net->ksnn_lock);
2614 }
2615 spin_unlock_bh(&net->ksnn_lock);
2616
2617 for (i = 0; i < net->ksnn_ninterfaces; i++) {
5fd88337
JS
2618 LASSERT(!net->ksnn_interfaces[i].ksni_npeers);
2619 LASSERT(!net->ksnn_interfaces[i].ksni_nroutes);
d7e09d03
PT
2620 }
2621
2622 list_del(&net->ksnn_list);
2623 LIBCFS_FREE(net, sizeof(*net));
2624
2625 ksocknal_data.ksnd_nnets--;
5fd88337 2626 if (!ksocknal_data.ksnd_nnets)
d7e09d03
PT
2627 ksocknal_base_shutdown();
2628}
2629
0b913529 2630static int
ff13fd40 2631ksocknal_enumerate_interfaces(struct ksock_net *net)
d7e09d03 2632{
97d10d0a
MS
2633 char **names;
2634 int i;
2635 int j;
2636 int rc;
2637 int n;
d7e09d03 2638
1ad6a73e 2639 n = lnet_ipif_enumerate(&names);
d7e09d03
PT
2640 if (n <= 0) {
2641 CERROR("Can't enumerate interfaces: %d\n", n);
2642 return n;
2643 }
2644
2645 for (i = j = 0; i < n; i++) {
97d10d0a
MS
2646 int up;
2647 __u32 ip;
2648 __u32 mask;
d7e09d03
PT
2649
2650 if (!strcmp(names[i], "lo")) /* skip the loopback IF */
2651 continue;
2652
1ad6a73e 2653 rc = lnet_ipif_query(names[i], &up, &ip, &mask);
5fd88337 2654 if (rc) {
d7e09d03
PT
2655 CWARN("Can't get interface %s info: %d\n",
2656 names[i], rc);
2657 continue;
2658 }
2659
2660 if (!up) {
2661 CWARN("Ignoring interface %s (down)\n",
2662 names[i]);
2663 continue;
2664 }
2665
2666 if (j == LNET_MAX_INTERFACES) {
2667 CWARN("Ignoring interface %s (too many interfaces)\n",
2668 names[i]);
2669 continue;
2670 }
2671
2672 net->ksnn_interfaces[j].ksni_ipaddr = ip;
2673 net->ksnn_interfaces[j].ksni_netmask = mask;
9563fe8a
DE
2674 strlcpy(net->ksnn_interfaces[j].ksni_name,
2675 names[i], sizeof(net->ksnn_interfaces[j].ksni_name));
d7e09d03
PT
2676 j++;
2677 }
2678
1ad6a73e 2679 lnet_ipif_free_enumeration(names, n);
d7e09d03 2680
5fd88337 2681 if (!j)
d7e09d03
PT
2682 CERROR("Can't find any usable interfaces\n");
2683
2684 return j;
2685}
2686
0b913529 2687static int
ff13fd40 2688ksocknal_search_new_ipif(struct ksock_net *net)
d7e09d03 2689{
97d10d0a
MS
2690 int new_ipif = 0;
2691 int i;
d7e09d03
PT
2692
2693 for (i = 0; i < net->ksnn_ninterfaces; i++) {
97d10d0a
MS
2694 char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
2695 char *colon = strchr(ifnam, ':');
2696 int found = 0;
ff13fd40 2697 struct ksock_net *tmp;
97d10d0a 2698 int j;
d7e09d03 2699
06ace26e 2700 if (colon) /* ignore alias device */
d7e09d03
PT
2701 *colon = 0;
2702
c314c319 2703 list_for_each_entry(tmp, &ksocknal_data.ksnd_nets, ksnn_list) {
d7e09d03 2704 for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
4b18c358
MN
2705 char *ifnam2 =
2706 &tmp->ksnn_interfaces[j].ksni_name[0];
d7e09d03
PT
2707 char *colon2 = strchr(ifnam2, ':');
2708
06ace26e 2709 if (colon2)
d7e09d03
PT
2710 *colon2 = 0;
2711
5fd88337 2712 found = !strcmp(ifnam, ifnam2);
06ace26e 2713 if (colon2)
d7e09d03
PT
2714 *colon2 = ':';
2715 }
2716 if (found)
2717 break;
2718 }
2719
2720 new_ipif += !found;
06ace26e 2721 if (colon)
d7e09d03
PT
2722 *colon = ':';
2723 }
2724
2725 return new_ipif;
2726}
2727
0b913529 2728static int
d7e09d03
PT
2729ksocknal_start_schedulers(struct ksock_sched_info *info)
2730{
97d10d0a
MS
2731 int nthrs;
2732 int rc = 0;
2733 int i;
d7e09d03 2734
5fd88337 2735 if (!info->ksi_nthreads) {
d7e09d03
PT
2736 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2737 nthrs = info->ksi_nthreads_max;
2738 } else {
2739 nthrs = cfs_cpt_weight(lnet_cpt_table(),
2740 info->ksi_cpt);
2741 nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2742 nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
2743 }
2744 nthrs = min(nthrs, info->ksi_nthreads_max);
2745 } else {
2746 LASSERT(info->ksi_nthreads <= info->ksi_nthreads_max);
2747 /* increase two threads if there is new interface */
2748 nthrs = min(2, info->ksi_nthreads_max - info->ksi_nthreads);
2749 }
2750
2751 for (i = 0; i < nthrs; i++) {
97d10d0a
MS
2752 long id;
2753 char name[20];
ff13fd40 2754 struct ksock_sched *sched;
50ffcb7e 2755
d7e09d03
PT
2756 id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
2757 sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
2758 snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
2759 info->ksi_cpt, (int)(sched - &info->ksi_scheds[0]));
2760
2761 rc = ksocknal_thread_start(ksocknal_scheduler,
2762 (void *)id, name);
5fd88337 2763 if (!rc)
d7e09d03
PT
2764 continue;
2765
2766 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2767 info->ksi_cpt, info->ksi_nthreads + i, rc);
2768 break;
2769 }
2770
2771 info->ksi_nthreads += i;
2772 return rc;
2773}
2774
0b913529 2775static int
ff13fd40 2776ksocknal_net_start_threads(struct ksock_net *net, __u32 *cpts, int ncpts)
d7e09d03 2777{
97d10d0a
MS
2778 int newif = ksocknal_search_new_ipif(net);
2779 int rc;
2780 int i;
d7e09d03
PT
2781
2782 LASSERT(ncpts > 0 && ncpts <= cfs_cpt_number(lnet_cpt_table()));
2783
2784 for (i = 0; i < ncpts; i++) {
97d10d0a 2785 struct ksock_sched_info *info;
06ace26e 2786 int cpt = !cpts ? i : cpts[i];
d7e09d03
PT
2787
2788 LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
2789 info = ksocknal_data.ksnd_sched_info[cpt];
2790
2791 if (!newif && info->ksi_nthreads > 0)
2792 continue;
2793
2794 rc = ksocknal_start_schedulers(info);
5fd88337 2795 if (rc)
d7e09d03
PT
2796 return rc;
2797 }
2798 return 0;
2799}
2800
2801int
d4de2ab8 2802ksocknal_startup(lnet_ni_t *ni)
d7e09d03 2803{
ff13fd40 2804 struct ksock_net *net;
97d10d0a
MS
2805 int rc;
2806 int i;
d7e09d03 2807
d4de2ab8 2808 LASSERT(ni->ni_lnd == &the_ksocklnd);
d7e09d03
PT
2809
2810 if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
2811 rc = ksocknal_base_startup();
5fd88337 2812 if (rc)
d7e09d03
PT
2813 return rc;
2814 }
2815
2816 LIBCFS_ALLOC(net, sizeof(*net));
06ace26e 2817 if (!net)
d7e09d03
PT
2818 goto fail_0;
2819
2820 spin_lock_init(&net->ksnn_lock);
2821 net->ksnn_incarnation = ksocknal_new_incarnation();
2822 ni->ni_data = net;
2823 ni->ni_peertimeout = *ksocknal_tunables.ksnd_peertimeout;
2824 ni->ni_maxtxcredits = *ksocknal_tunables.ksnd_credits;
2825 ni->ni_peertxcredits = *ksocknal_tunables.ksnd_peertxcredits;
2826 ni->ni_peerrtrcredits = *ksocknal_tunables.ksnd_peerrtrcredits;
2827
06ace26e 2828 if (!ni->ni_interfaces[0]) {
d7e09d03
PT
2829 rc = ksocknal_enumerate_interfaces(net);
2830 if (rc <= 0)
2831 goto fail_1;
2832
2833 net->ksnn_ninterfaces = 1;
2834 } else {
2835 for (i = 0; i < LNET_MAX_INTERFACES; i++) {
97d10d0a 2836 int up;
d7e09d03 2837
06ace26e 2838 if (!ni->ni_interfaces[i])
d7e09d03
PT
2839 break;
2840
1ad6a73e 2841 rc = lnet_ipif_query(ni->ni_interfaces[i], &up,
c314c319
JS
2842 &net->ksnn_interfaces[i].ksni_ipaddr,
2843 &net->ksnn_interfaces[i].ksni_netmask);
d7e09d03 2844
5fd88337 2845 if (rc) {
d7e09d03
PT
2846 CERROR("Can't get interface %s info: %d\n",
2847 ni->ni_interfaces[i], rc);
2848 goto fail_1;
2849 }
2850
2851 if (!up) {
2852 CERROR("Interface %s is down\n",
2853 ni->ni_interfaces[i]);
2854 goto fail_1;
2855 }
2856
9563fe8a
DE
2857 strlcpy(net->ksnn_interfaces[i].ksni_name,
2858 ni->ni_interfaces[i],
2859 sizeof(net->ksnn_interfaces[i].ksni_name));
d7e09d03
PT
2860 }
2861 net->ksnn_ninterfaces = i;
2862 }
2863
2864 /* call it before add it to ksocknal_data.ksnd_nets */
2865 rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
5fd88337 2866 if (rc)
d7e09d03
PT
2867 goto fail_1;
2868
2869 ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
2870 net->ksnn_interfaces[0].ksni_ipaddr);
2871 list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
2872
2873 ksocknal_data.ksnd_nnets++;
2874
2875 return 0;
2876
2877 fail_1:
2878 LIBCFS_FREE(net, sizeof(*net));
2879 fail_0:
5fd88337 2880 if (!ksocknal_data.ksnd_nnets)
d7e09d03
PT
2881 ksocknal_base_shutdown();
2882
2883 return -ENETDOWN;
2884}
2885
e0f94113 2886static void __exit ksocklnd_exit(void)
d7e09d03
PT
2887{
2888 lnet_unregister_lnd(&the_ksocklnd);
d7e09d03
PT
2889}
2890
e0f94113 2891static int __init ksocklnd_init(void)
d7e09d03 2892{
97d10d0a 2893 int rc;
d7e09d03
PT
2894
2895 /* check ksnr_connected/connecting field large enough */
d4de2ab8
HE
2896 CLASSERT(SOCKLND_CONN_NTYPES <= 4);
2897 CLASSERT(SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN);
d7e09d03
PT
2898
2899 /* initialize the_ksocklnd */
2900 the_ksocklnd.lnd_type = SOCKLND;
2901 the_ksocklnd.lnd_startup = ksocknal_startup;
2902 the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
2903 the_ksocklnd.lnd_ctl = ksocknal_ctl;
2904 the_ksocklnd.lnd_send = ksocknal_send;
2905 the_ksocklnd.lnd_recv = ksocknal_recv;
2906 the_ksocklnd.lnd_notify = ksocknal_notify;
2907 the_ksocklnd.lnd_query = ksocknal_query;
2908 the_ksocklnd.lnd_accept = ksocknal_accept;
2909
2910 rc = ksocknal_tunables_init();
5fd88337 2911 if (rc)
d7e09d03
PT
2912 return rc;
2913
2914 lnet_register_lnd(&the_ksocklnd);
2915
2916 return 0;
2917}
2918
a0455471 2919MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
57878e17 2920MODULE_DESCRIPTION("TCP Socket LNet Network Driver");
5b0e50b9 2921MODULE_VERSION("2.7.0");
d7e09d03
PT
2922MODULE_LICENSE("GPL");
2923
e0f94113
AD
2924module_init(ksocklnd_init);
2925module_exit(ksocklnd_exit);
This page took 0.758206 seconds and 5 git commands to generate.