ASoC: rt5645: Add struct dmi_system_id "Google Ultima" for chrome platform
[deliverable/linux.git] / drivers / staging / lustre / lnet / klnds / socklnd / socklnd.h
1 /*
2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
3 *
4 * Copyright (c) 2011, 2012, Intel Corporation.
5 *
6 * Author: Zach Brown <zab@zabbo.net>
7 * Author: Peter J. Braam <braam@clusterfs.com>
8 * Author: Phil Schwan <phil@clusterfs.com>
9 * Author: Eric Barton <eric@bartonsoftware.com>
10 *
11 * This file is part of Lustre, http://www.lustre.org
12 *
13 * Portals is free software; you can redistribute it and/or
14 * modify it under the terms of version 2 of the GNU General Public
15 * License as published by the Free Software Foundation.
16 *
17 * Portals is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with Portals; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 */
27
28 #ifndef _SOCKLND_SOCKLND_H_
29 #define _SOCKLND_SOCKLND_H_
30
31 #define DEBUG_PORTAL_ALLOC
32 #define DEBUG_SUBSYSTEM S_LND
33
34 #include <asm/irq.h>
35 #include <linux/crc32.h>
36 #include <linux/errno.h>
37 #include <linux/if.h>
38 #include <linux/init.h>
39 #include <linux/kernel.h>
40 #include <linux/kmod.h>
41 #include <linux/list.h>
42 #include <linux/mm.h>
43 #include <linux/module.h>
44 #include <linux/stat.h>
45 #include <linux/string.h>
46 #include <linux/syscalls.h>
47 #include <linux/sysctl.h>
48 #include <linux/uio.h>
49 #include <linux/unistd.h>
50 #include <net/sock.h>
51 #include <net/tcp.h>
52
53 #include "../../../include/linux/libcfs/libcfs.h"
54 #include "../../../include/linux/lnet/lnet.h"
55 #include "../../../include/linux/lnet/lib-lnet.h"
56 #include "../../../include/linux/lnet/socklnd.h"
57
58 /* assume one thread for each connection type */
59 #define SOCKNAL_NSCHEDS 3
60 #define SOCKNAL_NSCHEDS_HIGH (SOCKNAL_NSCHEDS << 1)
61
62 #define SOCKNAL_PEER_HASH_SIZE 101 /* # peer lists */
63 #define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */
64 #define SOCKNAL_INSANITY_RECONN 5000 /* connd is trying on reconn infinitely */
65 #define SOCKNAL_ENOMEM_RETRY CFS_TICK /* jiffies between retries */
66
67 #define SOCKNAL_SINGLE_FRAG_TX 0 /* disable multi-fragment sends */
68 #define SOCKNAL_SINGLE_FRAG_RX 0 /* disable multi-fragment receives */
69
70 #define SOCKNAL_VERSION_DEBUG 0 /* enable protocol version debugging */
71
72 /* risk kmap deadlock on multi-frag I/O (backs off to single-frag if disabled).
73 * no risk if we're not running on a CONFIG_HIGHMEM platform. */
74 #ifdef CONFIG_HIGHMEM
75 # define SOCKNAL_RISK_KMAP_DEADLOCK 0
76 #else
77 # define SOCKNAL_RISK_KMAP_DEADLOCK 1
78 #endif
79
80 struct ksock_sched_info;
81
82 typedef struct /* per scheduler state */
83 {
84 spinlock_t kss_lock; /* serialise */
85 struct list_head kss_rx_conns; /* conn waiting to be read */
86 struct list_head kss_tx_conns; /* conn waiting to be written */
87 struct list_head kss_zombie_noop_txs; /* zombie noop tx list */
88 wait_queue_head_t kss_waitq; /* where scheduler sleeps */
89 int kss_nconns; /* # connections assigned to
90 * this scheduler */
91 struct ksock_sched_info *kss_info; /* owner of it */
92 struct page *kss_rx_scratch_pgs[LNET_MAX_IOV];
93 struct kvec kss_scratch_iov[LNET_MAX_IOV];
94 } ksock_sched_t;
95
96 struct ksock_sched_info {
97 int ksi_nthreads_max; /* max allowed threads */
98 int ksi_nthreads; /* number of threads */
99 int ksi_cpt; /* CPT id */
100 ksock_sched_t *ksi_scheds; /* array of schedulers */
101 };
102
103 #define KSOCK_CPT_SHIFT 16
104 #define KSOCK_THREAD_ID(cpt, sid) (((cpt) << KSOCK_CPT_SHIFT) | (sid))
105 #define KSOCK_THREAD_CPT(id) ((id) >> KSOCK_CPT_SHIFT)
106 #define KSOCK_THREAD_SID(id) ((id) & ((1UL << KSOCK_CPT_SHIFT) - 1))
107
108 typedef struct /* in-use interface */
109 {
110 __u32 ksni_ipaddr; /* interface's IP address */
111 __u32 ksni_netmask; /* interface's network mask */
112 int ksni_nroutes; /* # routes using (active) */
113 int ksni_npeers; /* # peers using (passive) */
114 char ksni_name[IFNAMSIZ]; /* interface name */
115 } ksock_interface_t;
116
117 typedef struct {
118 int *ksnd_timeout; /* "stuck" socket timeout
119 * (seconds) */
120 int *ksnd_nscheds; /* # scheduler threads in each
121 * pool while starting */
122 int *ksnd_nconnds; /* # connection daemons */
123 int *ksnd_nconnds_max; /* max # connection daemons */
124 int *ksnd_min_reconnectms; /* first connection retry after
125 * (ms)... */
126 int *ksnd_max_reconnectms; /* ...exponentially increasing to
127 * this */
128 int *ksnd_eager_ack; /* make TCP ack eagerly? */
129 int *ksnd_typed_conns; /* drive sockets by type? */
130 int *ksnd_min_bulk; /* smallest "large" message */
131 int *ksnd_tx_buffer_size; /* socket tx buffer size */
132 int *ksnd_rx_buffer_size; /* socket rx buffer size */
133 int *ksnd_nagle; /* enable NAGLE? */
134 int *ksnd_round_robin; /* round robin for multiple
135 * interfaces */
136 int *ksnd_keepalive; /* # secs for sending keepalive
137 * NOOP */
138 int *ksnd_keepalive_idle; /* # idle secs before 1st probe
139 */
140 int *ksnd_keepalive_count; /* # probes */
141 int *ksnd_keepalive_intvl; /* time between probes */
142 int *ksnd_credits; /* # concurrent sends */
143 int *ksnd_peertxcredits; /* # concurrent sends to 1 peer
144 */
145 int *ksnd_peerrtrcredits; /* # per-peer router buffer
146 * credits */
147 int *ksnd_peertimeout; /* seconds to consider peer dead
148 */
149 int *ksnd_enable_csum; /* enable check sum */
150 int *ksnd_inject_csum_error; /* set non-zero to inject
151 * checksum error */
152 int *ksnd_nonblk_zcack; /* always send zc-ack on
153 * non-blocking connection */
154 unsigned int *ksnd_zc_min_payload; /* minimum zero copy payload
155 * size */
156 int *ksnd_zc_recv; /* enable ZC receive (for
157 * Chelsio TOE) */
158 int *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to
159 * enable ZC receive */
160 } ksock_tunables_t;
161
162 typedef struct {
163 __u64 ksnn_incarnation; /* my epoch */
164 spinlock_t ksnn_lock; /* serialise */
165 struct list_head ksnn_list; /* chain on global list */
166 int ksnn_npeers; /* # peers */
167 int ksnn_shutdown; /* shutting down? */
168 int ksnn_ninterfaces; /* IP interfaces */
169 ksock_interface_t ksnn_interfaces[LNET_MAX_INTERFACES];
170 } ksock_net_t;
171
172 /** connd timeout */
173 #define SOCKNAL_CONND_TIMEOUT 120
174 /** reserved thread for accepting & creating new connd */
175 #define SOCKNAL_CONND_RESV 1
176
177 typedef struct {
178 int ksnd_init; /* initialisation state
179 */
180 int ksnd_nnets; /* # networks set up */
181 struct list_head ksnd_nets; /* list of nets */
182 rwlock_t ksnd_global_lock; /* stabilize peer/conn
183 * ops */
184 struct list_head *ksnd_peers; /* hash table of all my
185 * known peers */
186 int ksnd_peer_hash_size; /* size of ksnd_peers */
187
188 int ksnd_nthreads; /* # live threads */
189 int ksnd_shuttingdown; /* tell threads to exit
190 */
191 struct ksock_sched_info **ksnd_sched_info; /* schedulers info */
192
193 atomic_t ksnd_nactive_txs; /* #active txs */
194
195 struct list_head ksnd_deathrow_conns; /* conns to close:
196 * reaper_lock*/
197 struct list_head ksnd_zombie_conns; /* conns to free:
198 * reaper_lock */
199 struct list_head ksnd_enomem_conns; /* conns to retry:
200 * reaper_lock*/
201 wait_queue_head_t ksnd_reaper_waitq; /* reaper sleeps here */
202 unsigned long ksnd_reaper_waketime; /* when reaper will wake
203 */
204 spinlock_t ksnd_reaper_lock; /* serialise */
205
206 int ksnd_enomem_tx; /* test ENOMEM sender */
207 int ksnd_stall_tx; /* test sluggish sender
208 */
209 int ksnd_stall_rx; /* test sluggish
210 * receiver */
211
212 struct list_head ksnd_connd_connreqs; /* incoming connection
213 * requests */
214 struct list_head ksnd_connd_routes; /* routes waiting to be
215 * connected */
216 wait_queue_head_t ksnd_connd_waitq; /* connds sleep here */
217 int ksnd_connd_connecting; /* # connds connecting
218 */
219 long ksnd_connd_failed_stamp;/* time stamp of the
220 * last failed
221 * connecting attempt */
222 unsigned ksnd_connd_starting; /* # starting connd */
223 long ksnd_connd_starting_stamp;/* time stamp of the
224 * last starting connd
225 */
226 unsigned ksnd_connd_running; /* # running connd */
227 spinlock_t ksnd_connd_lock; /* serialise */
228
229 struct list_head ksnd_idle_noop_txs; /* list head for freed
230 * noop tx */
231 spinlock_t ksnd_tx_lock; /* serialise, g_lock
232 * unsafe */
233
234 } ksock_nal_data_t;
235
236 #define SOCKNAL_INIT_NOTHING 0
237 #define SOCKNAL_INIT_DATA 1
238 #define SOCKNAL_INIT_ALL 2
239
240 /* A packet just assembled for transmission is represented by 1 or more
241 * struct iovec fragments (the first frag contains the portals header),
242 * followed by 0 or more lnet_kiov_t fragments.
243 *
244 * On the receive side, initially 1 struct iovec fragment is posted for
245 * receive (the header). Once the header has been received, the payload is
246 * received into either struct iovec or lnet_kiov_t fragments, depending on
247 * what the header matched or whether the message needs forwarding. */
248
249 struct ksock_conn; /* forward ref */
250 struct ksock_peer; /* forward ref */
251 struct ksock_route; /* forward ref */
252 struct ksock_proto; /* forward ref */
253
254 typedef struct /* transmit packet */
255 {
256 struct list_head tx_list; /* queue on conn for transmission etc
257 */
258 struct list_head tx_zc_list; /* queue on peer for ZC request */
259 atomic_t tx_refcount; /* tx reference count */
260 int tx_nob; /* # packet bytes */
261 int tx_resid; /* residual bytes */
262 int tx_niov; /* # packet iovec frags */
263 struct kvec *tx_iov; /* packet iovec frags */
264 int tx_nkiov; /* # packet page frags */
265 unsigned short tx_zc_aborted; /* aborted ZC request */
266 unsigned short tx_zc_capable:1; /* payload is large enough for ZC */
267 unsigned short tx_zc_checked:1; /* Have I checked if I should ZC? */
268 unsigned short tx_nonblk:1; /* it's a non-blocking ACK */
269 lnet_kiov_t *tx_kiov; /* packet page frags */
270 struct ksock_conn *tx_conn; /* owning conn */
271 lnet_msg_t *tx_lnetmsg; /* lnet message for lnet_finalize()
272 */
273 unsigned long tx_deadline; /* when (in jiffies) tx times out */
274 ksock_msg_t tx_msg; /* socklnd message buffer */
275 int tx_desc_size; /* size of this descriptor */
276 union {
277 struct {
278 struct kvec iov; /* virt hdr */
279 lnet_kiov_t kiov[0]; /* paged payload */
280 } paged;
281 struct {
282 struct kvec iov[1]; /* virt hdr + payload */
283 } virt;
284 } tx_frags;
285 } ksock_tx_t;
286
287 #define KSOCK_NOOP_TX_SIZE ((int)offsetof(ksock_tx_t, tx_frags.paged.kiov[0]))
288
289 /* network zero copy callback descriptor embedded in ksock_tx_t */
290
291 /* space for the rx frag descriptors; we either read a single contiguous
292 * header, or up to LNET_MAX_IOV frags of payload of either type. */
293 typedef union {
294 struct kvec iov[LNET_MAX_IOV];
295 lnet_kiov_t kiov[LNET_MAX_IOV];
296 } ksock_rxiovspace_t;
297
298 #define SOCKNAL_RX_KSM_HEADER 1 /* reading ksock message header */
299 #define SOCKNAL_RX_LNET_HEADER 2 /* reading lnet message header */
300 #define SOCKNAL_RX_PARSE 3 /* Calling lnet_parse() */
301 #define SOCKNAL_RX_PARSE_WAIT 4 /* waiting to be told to read the body */
302 #define SOCKNAL_RX_LNET_PAYLOAD 5 /* reading lnet payload (to deliver here) */
303 #define SOCKNAL_RX_SLOP 6 /* skipping body */
304
305 typedef struct ksock_conn {
306 struct ksock_peer *ksnc_peer; /* owning peer */
307 struct ksock_route *ksnc_route; /* owning route */
308 struct list_head ksnc_list; /* stash on peer's conn list */
309 struct socket *ksnc_sock; /* actual socket */
310 void *ksnc_saved_data_ready; /* socket's original
311 * data_ready() callback */
312 void *ksnc_saved_write_space; /* socket's original
313 * write_space() callback */
314 atomic_t ksnc_conn_refcount;/* conn refcount */
315 atomic_t ksnc_sock_refcount;/* sock refcount */
316 ksock_sched_t *ksnc_scheduler; /* who schedules this connection
317 */
318 __u32 ksnc_myipaddr; /* my IP */
319 __u32 ksnc_ipaddr; /* peer's IP */
320 int ksnc_port; /* peer's port */
321 signed int ksnc_type:3; /* type of connection, should be
322 * signed value */
323 unsigned int ksnc_closing:1; /* being shut down */
324 unsigned int ksnc_flip:1; /* flip or not, only for V2.x */
325 unsigned int ksnc_zc_capable:1; /* enable to ZC */
326 struct ksock_proto *ksnc_proto; /* protocol for the connection */
327
328 /* reader */
329 struct list_head ksnc_rx_list; /* where I enq waiting input or a
330 * forwarding descriptor */
331 unsigned long ksnc_rx_deadline; /* when (in jiffies) receive times
332 * out */
333 __u8 ksnc_rx_started; /* started receiving a message */
334 __u8 ksnc_rx_ready; /* data ready to read */
335 __u8 ksnc_rx_scheduled; /* being progressed */
336 __u8 ksnc_rx_state; /* what is being read */
337 int ksnc_rx_nob_left; /* # bytes to next hdr/body */
338 int ksnc_rx_nob_wanted;/* bytes actually wanted */
339 int ksnc_rx_niov; /* # iovec frags */
340 struct kvec *ksnc_rx_iov; /* the iovec frags */
341 int ksnc_rx_nkiov; /* # page frags */
342 lnet_kiov_t *ksnc_rx_kiov; /* the page frags */
343 ksock_rxiovspace_t ksnc_rx_iov_space; /* space for frag descriptors */
344 __u32 ksnc_rx_csum; /* partial checksum for incoming
345 * data */
346 void *ksnc_cookie; /* rx lnet_finalize passthru arg
347 */
348 ksock_msg_t ksnc_msg; /* incoming message buffer:
349 * V2.x message takes the
350 * whole struct
351 * V1.x message is a bare
352 * lnet_hdr_t, it's stored in
353 * ksnc_msg.ksm_u.lnetmsg */
354
355 /* WRITER */
356 struct list_head ksnc_tx_list; /* where I enq waiting for output
357 * space */
358 struct list_head ksnc_tx_queue; /* packets waiting to be sent */
359 ksock_tx_t *ksnc_tx_carrier; /* next TX that can carry a LNet
360 * message or ZC-ACK */
361 unsigned long ksnc_tx_deadline; /* when (in jiffies) tx times out
362 */
363 int ksnc_tx_bufnob; /* send buffer marker */
364 atomic_t ksnc_tx_nob; /* # bytes queued */
365 int ksnc_tx_ready; /* write space */
366 int ksnc_tx_scheduled; /* being progressed */
367 unsigned long ksnc_tx_last_post; /* time stamp of the last posted
368 * TX */
369 } ksock_conn_t;
370
371 typedef struct ksock_route {
372 struct list_head ksnr_list; /* chain on peer route list */
373 struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */
374 struct ksock_peer *ksnr_peer; /* owning peer */
375 atomic_t ksnr_refcount; /* # users */
376 unsigned long ksnr_timeout; /* when (in jiffies) reconnection
377 * can happen next */
378 long ksnr_retry_interval; /* how long between retries */
379 __u32 ksnr_myipaddr; /* my IP */
380 __u32 ksnr_ipaddr; /* IP address to connect to */
381 int ksnr_port; /* port to connect to */
382 unsigned int ksnr_scheduled:1; /* scheduled for attention */
383 unsigned int ksnr_connecting:1; /* connection establishment in
384 * progress */
385 unsigned int ksnr_connected:4; /* connections established by
386 * type */
387 unsigned int ksnr_deleted:1; /* been removed from peer? */
388 unsigned int ksnr_share_count; /* created explicitly? */
389 int ksnr_conn_count; /* # conns established by this
390 * route */
391 } ksock_route_t;
392
393 #define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */
394
395 typedef struct ksock_peer {
396 struct list_head ksnp_list; /* stash on global peer list */
397 unsigned long ksnp_last_alive; /* when (in jiffies) I was last
398 * alive */
399 lnet_process_id_t ksnp_id; /* who's on the other end(s) */
400 atomic_t ksnp_refcount; /* # users */
401 int ksnp_sharecount; /* lconf usage counter */
402 int ksnp_closing; /* being closed */
403 int ksnp_accepting; /* # passive connections pending
404 */
405 int ksnp_error; /* errno on closing last conn */
406 __u64 ksnp_zc_next_cookie; /* ZC completion cookie */
407 __u64 ksnp_incarnation; /* latest known peer incarnation
408 */
409 struct ksock_proto *ksnp_proto; /* latest known peer protocol */
410 struct list_head ksnp_conns; /* all active connections */
411 struct list_head ksnp_routes; /* routes */
412 struct list_head ksnp_tx_queue; /* waiting packets */
413 spinlock_t ksnp_lock; /* serialize, g_lock unsafe */
414 struct list_head ksnp_zc_req_list; /* zero copy requests wait for
415 * ACK */
416 unsigned long ksnp_send_keepalive; /* time to send keepalive */
417 lnet_ni_t *ksnp_ni; /* which network */
418 int ksnp_n_passive_ips; /* # of... */
419
420 /* preferred local interfaces */
421 __u32 ksnp_passive_ips[LNET_MAX_INTERFACES];
422 } ksock_peer_t;
423
424 typedef struct ksock_connreq {
425 struct list_head ksncr_list; /* stash on ksnd_connd_connreqs */
426 lnet_ni_t *ksncr_ni; /* chosen NI */
427 struct socket *ksncr_sock; /* accepted socket */
428 } ksock_connreq_t;
429
430 extern ksock_nal_data_t ksocknal_data;
431 extern ksock_tunables_t ksocknal_tunables;
432
433 #define SOCKNAL_MATCH_NO 0 /* TX can't match type of connection */
434 #define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */
435 #define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not
436 * preferred */
437
438 typedef struct ksock_proto {
439 /* version number of protocol */
440 int pro_version;
441
442 /* handshake function */
443 int (*pro_send_hello)(ksock_conn_t *, ksock_hello_msg_t *);
444
445 /* handshake function */
446 int (*pro_recv_hello)(ksock_conn_t *, ksock_hello_msg_t *, int);
447
448 /* message pack */
449 void (*pro_pack)(ksock_tx_t *);
450
451 /* message unpack */
452 void (*pro_unpack)(ksock_msg_t *);
453
454 /* queue tx on the connection */
455 ksock_tx_t *(*pro_queue_tx_msg)(ksock_conn_t *, ksock_tx_t *);
456
457 /* queue ZC ack on the connection */
458 int (*pro_queue_tx_zcack)(ksock_conn_t *, ksock_tx_t *, __u64);
459
460 /* handle ZC request */
461 int (*pro_handle_zcreq)(ksock_conn_t *, __u64, int);
462
463 /* handle ZC ACK */
464 int (*pro_handle_zcack)(ksock_conn_t *, __u64, __u64);
465
466 /* msg type matches the connection type:
467 * return value:
468 * return MATCH_NO : no
469 * return MATCH_YES : matching type
470 * return MATCH_MAY : can be backup */
471 int (*pro_match_tx)(ksock_conn_t *, ksock_tx_t *, int);
472 } ksock_proto_t;
473
474 extern ksock_proto_t ksocknal_protocol_v1x;
475 extern ksock_proto_t ksocknal_protocol_v2x;
476 extern ksock_proto_t ksocknal_protocol_v3x;
477
478 #define KSOCK_PROTO_V1_MAJOR LNET_PROTO_TCP_VERSION_MAJOR
479 #define KSOCK_PROTO_V1_MINOR LNET_PROTO_TCP_VERSION_MINOR
480 #define KSOCK_PROTO_V1 KSOCK_PROTO_V1_MAJOR
481
482 #ifndef CPU_MASK_NONE
483 #define CPU_MASK_NONE 0UL
484 #endif
485
486 static inline __u32 ksocknal_csum(__u32 crc, unsigned char const *p, size_t len)
487 {
488 #if 1
489 return crc32_le(crc, p, len);
490 #else
491 while (len-- > 0)
492 crc = ((crc + 0x100) & ~0xff) | ((crc + *p++) & 0xff) ;
493 return crc;
494 #endif
495 }
496
497 static inline int
498 ksocknal_route_mask(void)
499 {
500 if (!*ksocknal_tunables.ksnd_typed_conns)
501 return (1 << SOCKLND_CONN_ANY);
502
503 return ((1 << SOCKLND_CONN_CONTROL) |
504 (1 << SOCKLND_CONN_BULK_IN) |
505 (1 << SOCKLND_CONN_BULK_OUT));
506 }
507
508 static inline struct list_head *
509 ksocknal_nid2peerlist(lnet_nid_t nid)
510 {
511 unsigned int hash = ((unsigned int)nid) % ksocknal_data.ksnd_peer_hash_size;
512
513 return &ksocknal_data.ksnd_peers[hash];
514 }
515
516 static inline void
517 ksocknal_conn_addref(ksock_conn_t *conn)
518 {
519 LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
520 atomic_inc(&conn->ksnc_conn_refcount);
521 }
522
523 extern void ksocknal_queue_zombie_conn(ksock_conn_t *conn);
524 extern void ksocknal_finalize_zcreq(ksock_conn_t *conn);
525
526 static inline void
527 ksocknal_conn_decref(ksock_conn_t *conn)
528 {
529 LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
530 if (atomic_dec_and_test(&conn->ksnc_conn_refcount))
531 ksocknal_queue_zombie_conn(conn);
532 }
533
534 static inline int
535 ksocknal_connsock_addref(ksock_conn_t *conn)
536 {
537 int rc = -ESHUTDOWN;
538
539 read_lock(&ksocknal_data.ksnd_global_lock);
540 if (!conn->ksnc_closing) {
541 LASSERT(atomic_read(&conn->ksnc_sock_refcount) > 0);
542 atomic_inc(&conn->ksnc_sock_refcount);
543 rc = 0;
544 }
545 read_unlock(&ksocknal_data.ksnd_global_lock);
546
547 return rc;
548 }
549
550 static inline void
551 ksocknal_connsock_decref(ksock_conn_t *conn)
552 {
553 LASSERT(atomic_read(&conn->ksnc_sock_refcount) > 0);
554 if (atomic_dec_and_test(&conn->ksnc_sock_refcount)) {
555 LASSERT(conn->ksnc_closing);
556 sock_release(conn->ksnc_sock);
557 conn->ksnc_sock = NULL;
558 ksocknal_finalize_zcreq(conn);
559 }
560 }
561
562 static inline void
563 ksocknal_tx_addref(ksock_tx_t *tx)
564 {
565 LASSERT(atomic_read(&tx->tx_refcount) > 0);
566 atomic_inc(&tx->tx_refcount);
567 }
568
569 extern void ksocknal_tx_prep(ksock_conn_t *, ksock_tx_t *tx);
570 extern void ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx);
571
572 static inline void
573 ksocknal_tx_decref(ksock_tx_t *tx)
574 {
575 LASSERT(atomic_read(&tx->tx_refcount) > 0);
576 if (atomic_dec_and_test(&tx->tx_refcount))
577 ksocknal_tx_done(NULL, tx);
578 }
579
580 static inline void
581 ksocknal_route_addref(ksock_route_t *route)
582 {
583 LASSERT(atomic_read(&route->ksnr_refcount) > 0);
584 atomic_inc(&route->ksnr_refcount);
585 }
586
587 extern void ksocknal_destroy_route(ksock_route_t *route);
588
589 static inline void
590 ksocknal_route_decref(ksock_route_t *route)
591 {
592 LASSERT(atomic_read(&route->ksnr_refcount) > 0);
593 if (atomic_dec_and_test(&route->ksnr_refcount))
594 ksocknal_destroy_route(route);
595 }
596
597 static inline void
598 ksocknal_peer_addref(ksock_peer_t *peer)
599 {
600 LASSERT(atomic_read(&peer->ksnp_refcount) > 0);
601 atomic_inc(&peer->ksnp_refcount);
602 }
603
604 extern void ksocknal_destroy_peer(ksock_peer_t *peer);
605
606 static inline void
607 ksocknal_peer_decref(ksock_peer_t *peer)
608 {
609 LASSERT(atomic_read(&peer->ksnp_refcount) > 0);
610 if (atomic_dec_and_test(&peer->ksnp_refcount))
611 ksocknal_destroy_peer(peer);
612 }
613
614 int ksocknal_startup(lnet_ni_t *ni);
615 void ksocknal_shutdown(lnet_ni_t *ni);
616 int ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg);
617 int ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
618 int ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
619 int delayed, unsigned int niov,
620 struct kvec *iov, lnet_kiov_t *kiov,
621 unsigned int offset, unsigned int mlen, unsigned int rlen);
622 int ksocknal_accept(lnet_ni_t *ni, struct socket *sock);
623
624 extern int ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip, int port);
625 extern ksock_peer_t *ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id);
626 extern ksock_peer_t *ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id);
627 extern void ksocknal_peer_failed(ksock_peer_t *peer);
628 extern int ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
629 struct socket *sock, int type);
630 extern void ksocknal_close_conn_locked(ksock_conn_t *conn, int why);
631 extern void ksocknal_terminate_conn(ksock_conn_t *conn);
632 extern void ksocknal_destroy_conn(ksock_conn_t *conn);
633 extern int ksocknal_close_peer_conns_locked(ksock_peer_t *peer,
634 __u32 ipaddr, int why);
635 extern int ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why);
636 extern int ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr);
637 extern ksock_conn_t *ksocknal_find_conn_locked(ksock_peer_t *peer,
638 ksock_tx_t *tx, int nonblk);
639
640 extern int ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx,
641 lnet_process_id_t id);
642 extern ksock_tx_t *ksocknal_alloc_tx(int type, int size);
643 extern void ksocknal_free_tx(ksock_tx_t *tx);
644 extern ksock_tx_t *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk);
645 extern void ksocknal_next_tx_carrier(ksock_conn_t *conn);
646 extern void ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn);
647 extern void ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist,
648 int error);
649 extern void ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive);
650 extern void ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
651 extern int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
652 extern void ksocknal_thread_fini(void);
653 extern void ksocknal_launch_all_connections_locked(ksock_peer_t *peer);
654 extern ksock_route_t *ksocknal_find_connectable_route_locked(ksock_peer_t *peer);
655 extern ksock_route_t *ksocknal_find_connecting_route_locked(ksock_peer_t *peer);
656 extern int ksocknal_new_packet(ksock_conn_t *conn, int skip);
657 extern int ksocknal_scheduler(void *arg);
658 extern int ksocknal_connd(void *arg);
659 extern int ksocknal_reaper(void *arg);
660 extern int ksocknal_send_hello(lnet_ni_t *ni, ksock_conn_t *conn,
661 lnet_nid_t peer_nid, ksock_hello_msg_t *hello);
662 extern int ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
663 ksock_hello_msg_t *hello, lnet_process_id_t *id,
664 __u64 *incarnation);
665 extern void ksocknal_read_callback(ksock_conn_t *conn);
666 extern void ksocknal_write_callback(ksock_conn_t *conn);
667
668 extern int ksocknal_lib_zc_capable(ksock_conn_t *conn);
669 extern void ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn);
670 extern void ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn);
671 extern void ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn);
672 extern void ksocknal_lib_push_conn(ksock_conn_t *conn);
673 extern int ksocknal_lib_get_conn_addrs(ksock_conn_t *conn);
674 extern int ksocknal_lib_setup_sock(struct socket *so);
675 extern int ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx);
676 extern int ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx);
677 extern void ksocknal_lib_eager_ack(ksock_conn_t *conn);
678 extern int ksocknal_lib_recv_iov(ksock_conn_t *conn);
679 extern int ksocknal_lib_recv_kiov(ksock_conn_t *conn);
680 extern int ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem,
681 int *rxmem, int *nagle);
682
683 extern int ksocknal_tunables_init(void);
684
685 extern void ksocknal_lib_csum_tx(ksock_tx_t *tx);
686
687 extern int ksocknal_lib_memory_pressure(ksock_conn_t *conn);
688 extern int ksocknal_lib_bind_thread_to_cpu(int id);
689
690 #endif /* _SOCKLND_SOCKLND_H_ */
This page took 0.051698 seconds and 5 git commands to generate.