2 * IUCV protocol stack for Linux on zSeries
4 * Copyright IBM Corp. 2006, 2009
6 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
7 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
9 * Ursula Braun <ursula.braun@de.ibm.com>
12 #define KMSG_COMPONENT "af_iucv"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/list.h>
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/init.h>
24 #include <linux/poll.h>
26 #include <asm/ebcdic.h>
27 #include <asm/cpcmd.h>
28 #include <linux/kmod.h>
30 #include <net/iucv/af_iucv.h>
34 static char iucv_userid
[80];
36 static const struct proto_ops iucv_sock_ops
;
38 static struct proto iucv_proto
= {
41 .obj_size
= sizeof(struct iucv_sock
),
44 static struct iucv_interface
*pr_iucv
;
46 /* special AF_IUCV IPRM messages */
47 static const u8 iprm_shutdown
[8] =
48 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
50 #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
52 /* macros to set/get socket control buffer at correct offset */
53 #define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
54 #define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
55 #define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
56 #define CB_TRGCLS_LEN (TRGCLS_SIZE)
58 #define __iucv_sock_wait(sk, condition, timeo, ret) \
60 DEFINE_WAIT(__wait); \
61 long __timeo = timeo; \
63 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
64 while (!(condition)) { \
69 if (signal_pending(current)) { \
70 ret = sock_intr_errno(__timeo); \
74 __timeo = schedule_timeout(__timeo); \
76 ret = sock_error(sk); \
80 finish_wait(sk_sleep(sk), &__wait); \
83 #define iucv_sock_wait(sk, condition, timeo) \
87 __iucv_sock_wait(sk, condition, timeo, __ret); \
91 static void iucv_sock_kill(struct sock
*sk
);
92 static void iucv_sock_close(struct sock
*sk
);
93 static void iucv_sever_path(struct sock
*, int);
95 static int afiucv_hs_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
96 struct packet_type
*pt
, struct net_device
*orig_dev
);
97 static int afiucv_hs_send(struct iucv_message
*imsg
, struct sock
*sock
,
98 struct sk_buff
*skb
, u8 flags
);
99 static void afiucv_hs_callback_txnotify(struct sk_buff
*, enum iucv_tx_notify
);
101 /* Call Back functions */
102 static void iucv_callback_rx(struct iucv_path
*, struct iucv_message
*);
103 static void iucv_callback_txdone(struct iucv_path
*, struct iucv_message
*);
104 static void iucv_callback_connack(struct iucv_path
*, u8 ipuser
[16]);
105 static int iucv_callback_connreq(struct iucv_path
*, u8 ipvmid
[8],
107 static void iucv_callback_connrej(struct iucv_path
*, u8 ipuser
[16]);
108 static void iucv_callback_shutdown(struct iucv_path
*, u8 ipuser
[16]);
110 static struct iucv_sock_list iucv_sk_list
= {
111 .lock
= __RW_LOCK_UNLOCKED(iucv_sk_list
.lock
),
112 .autobind_name
= ATOMIC_INIT(0)
115 static struct iucv_handler af_iucv_handler
= {
116 .path_pending
= iucv_callback_connreq
,
117 .path_complete
= iucv_callback_connack
,
118 .path_severed
= iucv_callback_connrej
,
119 .message_pending
= iucv_callback_rx
,
120 .message_complete
= iucv_callback_txdone
,
121 .path_quiesced
= iucv_callback_shutdown
,
124 static inline void high_nmcpy(unsigned char *dst
, char *src
)
129 static inline void low_nmcpy(unsigned char *dst
, char *src
)
131 memcpy(&dst
[8], src
, 8);
134 static int afiucv_pm_prepare(struct device
*dev
)
136 #ifdef CONFIG_PM_DEBUG
137 printk(KERN_WARNING
"afiucv_pm_prepare\n");
142 static void afiucv_pm_complete(struct device
*dev
)
144 #ifdef CONFIG_PM_DEBUG
145 printk(KERN_WARNING
"afiucv_pm_complete\n");
150 * afiucv_pm_freeze() - Freeze PM callback
151 * @dev: AFIUCV dummy device
153 * Sever all established IUCV communication pathes
155 static int afiucv_pm_freeze(struct device
*dev
)
157 struct iucv_sock
*iucv
;
161 #ifdef CONFIG_PM_DEBUG
162 printk(KERN_WARNING
"afiucv_pm_freeze\n");
164 read_lock(&iucv_sk_list
.lock
);
165 sk_for_each(sk
, &iucv_sk_list
.head
) {
167 switch (sk
->sk_state
) {
171 iucv_sever_path(sk
, 0);
180 skb_queue_purge(&iucv
->send_skb_q
);
181 skb_queue_purge(&iucv
->backlog_skb_q
);
183 read_unlock(&iucv_sk_list
.lock
);
188 * afiucv_pm_restore_thaw() - Thaw and restore PM callback
189 * @dev: AFIUCV dummy device
191 * socket clean up after freeze
193 static int afiucv_pm_restore_thaw(struct device
*dev
)
197 #ifdef CONFIG_PM_DEBUG
198 printk(KERN_WARNING
"afiucv_pm_restore_thaw\n");
200 read_lock(&iucv_sk_list
.lock
);
201 sk_for_each(sk
, &iucv_sk_list
.head
) {
202 switch (sk
->sk_state
) {
205 sk
->sk_state
= IUCV_DISCONN
;
206 sk
->sk_state_change(sk
);
217 read_unlock(&iucv_sk_list
.lock
);
221 static const struct dev_pm_ops afiucv_pm_ops
= {
222 .prepare
= afiucv_pm_prepare
,
223 .complete
= afiucv_pm_complete
,
224 .freeze
= afiucv_pm_freeze
,
225 .thaw
= afiucv_pm_restore_thaw
,
226 .restore
= afiucv_pm_restore_thaw
,
229 static struct device_driver af_iucv_driver
= {
230 .owner
= THIS_MODULE
,
233 .pm
= &afiucv_pm_ops
,
236 /* dummy device used as trigger for PM functions */
237 static struct device
*af_iucv_dev
;
240 * iucv_msg_length() - Returns the length of an iucv message.
241 * @msg: Pointer to struct iucv_message, MUST NOT be NULL
243 * The function returns the length of the specified iucv message @msg of data
244 * stored in a buffer and of data stored in the parameter list (PRMDATA).
246 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
248 * PRMDATA[0..6] socket data (max 7 bytes);
249 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
251 * The socket data length is computed by subtracting the socket data length
253 * If the socket data len is greater 7, then PRMDATA can be used for special
254 * notifications (see iucv_sock_shutdown); and further,
255 * if the socket data len is > 7, the function returns 8.
257 * Use this function to allocate socket buffers to store iucv message data.
259 static inline size_t iucv_msg_length(struct iucv_message
*msg
)
263 if (msg
->flags
& IUCV_IPRMDATA
) {
264 datalen
= 0xff - msg
->rmmsg
[7];
265 return (datalen
< 8) ? datalen
: 8;
271 * iucv_sock_in_state() - check for specific states
272 * @sk: sock structure
273 * @state: first iucv sk state
274 * @state: second iucv sk state
276 * Returns true if the socket in either in the first or second state.
278 static int iucv_sock_in_state(struct sock
*sk
, int state
, int state2
)
280 return (sk
->sk_state
== state
|| sk
->sk_state
== state2
);
284 * iucv_below_msglim() - function to check if messages can be sent
285 * @sk: sock structure
287 * Returns true if the send queue length is lower than the message limit.
288 * Always returns true if the socket is not connected (no iucv path for
289 * checking the message limit).
291 static inline int iucv_below_msglim(struct sock
*sk
)
293 struct iucv_sock
*iucv
= iucv_sk(sk
);
295 if (sk
->sk_state
!= IUCV_CONNECTED
)
297 if (iucv
->transport
== AF_IUCV_TRANS_IUCV
)
298 return (skb_queue_len(&iucv
->send_skb_q
) < iucv
->path
->msglim
);
300 return ((atomic_read(&iucv
->msg_sent
) < iucv
->msglimit_peer
) &&
301 (atomic_read(&iucv
->pendings
) <= 0));
305 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
307 static void iucv_sock_wake_msglim(struct sock
*sk
)
309 struct socket_wq
*wq
;
312 wq
= rcu_dereference(sk
->sk_wq
);
313 if (wq_has_sleeper(wq
))
314 wake_up_interruptible_all(&wq
->wait
);
315 sk_wake_async(sk
, SOCK_WAKE_SPACE
, POLL_OUT
);
320 * afiucv_hs_send() - send a message through HiperSockets transport
322 static int afiucv_hs_send(struct iucv_message
*imsg
, struct sock
*sock
,
323 struct sk_buff
*skb
, u8 flags
)
325 struct iucv_sock
*iucv
= iucv_sk(sock
);
326 struct af_iucv_trans_hdr
*phs_hdr
;
327 struct sk_buff
*nskb
;
328 int err
, confirm_recv
= 0;
330 memset(skb
->head
, 0, ETH_HLEN
);
331 phs_hdr
= (struct af_iucv_trans_hdr
*)skb_push(skb
,
332 sizeof(struct af_iucv_trans_hdr
));
333 skb_reset_mac_header(skb
);
334 skb_reset_network_header(skb
);
335 skb_push(skb
, ETH_HLEN
);
336 skb_reset_mac_header(skb
);
337 memset(phs_hdr
, 0, sizeof(struct af_iucv_trans_hdr
));
339 phs_hdr
->magic
= ETH_P_AF_IUCV
;
340 phs_hdr
->version
= 1;
341 phs_hdr
->flags
= flags
;
342 if (flags
== AF_IUCV_FLAG_SYN
)
343 phs_hdr
->window
= iucv
->msglimit
;
344 else if ((flags
== AF_IUCV_FLAG_WIN
) || !flags
) {
345 confirm_recv
= atomic_read(&iucv
->msg_recv
);
346 phs_hdr
->window
= confirm_recv
;
348 phs_hdr
->flags
= phs_hdr
->flags
| AF_IUCV_FLAG_WIN
;
350 memcpy(phs_hdr
->destUserID
, iucv
->dst_user_id
, 8);
351 memcpy(phs_hdr
->destAppName
, iucv
->dst_name
, 8);
352 memcpy(phs_hdr
->srcUserID
, iucv
->src_user_id
, 8);
353 memcpy(phs_hdr
->srcAppName
, iucv
->src_name
, 8);
354 ASCEBC(phs_hdr
->destUserID
, sizeof(phs_hdr
->destUserID
));
355 ASCEBC(phs_hdr
->destAppName
, sizeof(phs_hdr
->destAppName
));
356 ASCEBC(phs_hdr
->srcUserID
, sizeof(phs_hdr
->srcUserID
));
357 ASCEBC(phs_hdr
->srcAppName
, sizeof(phs_hdr
->srcAppName
));
359 memcpy(&phs_hdr
->iucv_hdr
, imsg
, sizeof(struct iucv_message
));
361 skb
->dev
= iucv
->hs_dev
;
364 if (!(skb
->dev
->flags
& IFF_UP
) || !netif_carrier_ok(skb
->dev
))
366 if (skb
->len
> skb
->dev
->mtu
) {
367 if (sock
->sk_type
== SOCK_SEQPACKET
)
370 skb_trim(skb
, skb
->dev
->mtu
);
372 skb
->protocol
= ETH_P_AF_IUCV
;
373 nskb
= skb_clone(skb
, GFP_ATOMIC
);
376 skb_queue_tail(&iucv
->send_skb_q
, nskb
);
377 err
= dev_queue_xmit(skb
);
378 if (net_xmit_eval(err
)) {
379 skb_unlink(nskb
, &iucv
->send_skb_q
);
382 atomic_sub(confirm_recv
, &iucv
->msg_recv
);
383 WARN_ON(atomic_read(&iucv
->msg_recv
) < 0);
385 return net_xmit_eval(err
);
388 static struct sock
*__iucv_get_sock_by_name(char *nm
)
392 sk_for_each(sk
, &iucv_sk_list
.head
)
393 if (!memcmp(&iucv_sk(sk
)->src_name
, nm
, 8))
399 static void iucv_sock_destruct(struct sock
*sk
)
401 skb_queue_purge(&sk
->sk_receive_queue
);
402 skb_queue_purge(&sk
->sk_error_queue
);
406 if (!sock_flag(sk
, SOCK_DEAD
)) {
407 pr_err("Attempt to release alive iucv socket %p\n", sk
);
411 WARN_ON(atomic_read(&sk
->sk_rmem_alloc
));
412 WARN_ON(atomic_read(&sk
->sk_wmem_alloc
));
413 WARN_ON(sk
->sk_wmem_queued
);
414 WARN_ON(sk
->sk_forward_alloc
);
418 static void iucv_sock_cleanup_listen(struct sock
*parent
)
422 /* Close non-accepted connections */
423 while ((sk
= iucv_accept_dequeue(parent
, NULL
))) {
428 parent
->sk_state
= IUCV_CLOSED
;
431 /* Kill socket (only if zapped and orphaned) */
432 static void iucv_sock_kill(struct sock
*sk
)
434 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
437 iucv_sock_unlink(&iucv_sk_list
, sk
);
438 sock_set_flag(sk
, SOCK_DEAD
);
442 /* Terminate an IUCV path */
443 static void iucv_sever_path(struct sock
*sk
, int with_user_data
)
445 unsigned char user_data
[16];
446 struct iucv_sock
*iucv
= iucv_sk(sk
);
447 struct iucv_path
*path
= iucv
->path
;
451 if (with_user_data
) {
452 low_nmcpy(user_data
, iucv
->src_name
);
453 high_nmcpy(user_data
, iucv
->dst_name
);
454 ASCEBC(user_data
, sizeof(user_data
));
455 pr_iucv
->path_sever(path
, user_data
);
457 pr_iucv
->path_sever(path
, NULL
);
458 iucv_path_free(path
);
462 /* Send FIN through an IUCV socket for HIPER transport */
463 static int iucv_send_ctrl(struct sock
*sk
, u8 flags
)
469 blen
= sizeof(struct af_iucv_trans_hdr
) + ETH_HLEN
;
470 skb
= sock_alloc_send_skb(sk
, blen
, 1, &err
);
472 skb_reserve(skb
, blen
);
473 err
= afiucv_hs_send(NULL
, sk
, skb
, flags
);
478 /* Close an IUCV socket */
479 static void iucv_sock_close(struct sock
*sk
)
481 struct iucv_sock
*iucv
= iucv_sk(sk
);
487 switch (sk
->sk_state
) {
489 iucv_sock_cleanup_listen(sk
);
493 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
) {
494 err
= iucv_send_ctrl(sk
, AF_IUCV_FLAG_FIN
);
495 sk
->sk_state
= IUCV_DISCONN
;
496 sk
->sk_state_change(sk
);
498 case IUCV_DISCONN
: /* fall through */
499 sk
->sk_state
= IUCV_CLOSING
;
500 sk
->sk_state_change(sk
);
502 if (!err
&& !skb_queue_empty(&iucv
->send_skb_q
)) {
503 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
504 timeo
= sk
->sk_lingertime
;
506 timeo
= IUCV_DISCONN_TIMEOUT
;
508 iucv_sock_in_state(sk
, IUCV_CLOSED
, 0),
512 case IUCV_CLOSING
: /* fall through */
513 sk
->sk_state
= IUCV_CLOSED
;
514 sk
->sk_state_change(sk
);
516 sk
->sk_err
= ECONNRESET
;
517 sk
->sk_state_change(sk
);
519 skb_queue_purge(&iucv
->send_skb_q
);
520 skb_queue_purge(&iucv
->backlog_skb_q
);
522 default: /* fall through */
523 iucv_sever_path(sk
, 1);
527 dev_put(iucv
->hs_dev
);
529 sk
->sk_bound_dev_if
= 0;
532 /* mark socket for deletion by iucv_sock_kill() */
533 sock_set_flag(sk
, SOCK_ZAPPED
);
538 static void iucv_sock_init(struct sock
*sk
, struct sock
*parent
)
541 sk
->sk_type
= parent
->sk_type
;
544 static struct sock
*iucv_sock_alloc(struct socket
*sock
, int proto
, gfp_t prio
)
547 struct iucv_sock
*iucv
;
549 sk
= sk_alloc(&init_net
, PF_IUCV
, prio
, &iucv_proto
);
554 sock_init_data(sock
, sk
);
555 INIT_LIST_HEAD(&iucv
->accept_q
);
556 spin_lock_init(&iucv
->accept_q_lock
);
557 skb_queue_head_init(&iucv
->send_skb_q
);
558 INIT_LIST_HEAD(&iucv
->message_q
.list
);
559 spin_lock_init(&iucv
->message_q
.lock
);
560 skb_queue_head_init(&iucv
->backlog_skb_q
);
562 atomic_set(&iucv
->pendings
, 0);
565 atomic_set(&iucv
->msg_sent
, 0);
566 atomic_set(&iucv
->msg_recv
, 0);
568 iucv
->sk_txnotify
= afiucv_hs_callback_txnotify
;
569 memset(&iucv
->src_user_id
, 0, 32);
571 iucv
->transport
= AF_IUCV_TRANS_IUCV
;
573 iucv
->transport
= AF_IUCV_TRANS_HIPER
;
575 sk
->sk_destruct
= iucv_sock_destruct
;
576 sk
->sk_sndtimeo
= IUCV_CONN_TIMEOUT
;
577 sk
->sk_allocation
= GFP_DMA
;
579 sock_reset_flag(sk
, SOCK_ZAPPED
);
581 sk
->sk_protocol
= proto
;
582 sk
->sk_state
= IUCV_OPEN
;
584 iucv_sock_link(&iucv_sk_list
, sk
);
588 /* Create an IUCV socket */
589 static int iucv_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
594 if (protocol
&& protocol
!= PF_IUCV
)
595 return -EPROTONOSUPPORT
;
597 sock
->state
= SS_UNCONNECTED
;
599 switch (sock
->type
) {
601 sock
->ops
= &iucv_sock_ops
;
604 /* currently, proto ops can handle both sk types */
605 sock
->ops
= &iucv_sock_ops
;
608 return -ESOCKTNOSUPPORT
;
611 sk
= iucv_sock_alloc(sock
, protocol
, GFP_KERNEL
);
615 iucv_sock_init(sk
, NULL
);
620 void iucv_sock_link(struct iucv_sock_list
*l
, struct sock
*sk
)
622 write_lock_bh(&l
->lock
);
623 sk_add_node(sk
, &l
->head
);
624 write_unlock_bh(&l
->lock
);
627 void iucv_sock_unlink(struct iucv_sock_list
*l
, struct sock
*sk
)
629 write_lock_bh(&l
->lock
);
630 sk_del_node_init(sk
);
631 write_unlock_bh(&l
->lock
);
634 void iucv_accept_enqueue(struct sock
*parent
, struct sock
*sk
)
637 struct iucv_sock
*par
= iucv_sk(parent
);
640 spin_lock_irqsave(&par
->accept_q_lock
, flags
);
641 list_add_tail(&iucv_sk(sk
)->accept_q
, &par
->accept_q
);
642 spin_unlock_irqrestore(&par
->accept_q_lock
, flags
);
643 iucv_sk(sk
)->parent
= parent
;
644 sk_acceptq_added(parent
);
647 void iucv_accept_unlink(struct sock
*sk
)
650 struct iucv_sock
*par
= iucv_sk(iucv_sk(sk
)->parent
);
652 spin_lock_irqsave(&par
->accept_q_lock
, flags
);
653 list_del_init(&iucv_sk(sk
)->accept_q
);
654 spin_unlock_irqrestore(&par
->accept_q_lock
, flags
);
655 sk_acceptq_removed(iucv_sk(sk
)->parent
);
656 iucv_sk(sk
)->parent
= NULL
;
660 struct sock
*iucv_accept_dequeue(struct sock
*parent
, struct socket
*newsock
)
662 struct iucv_sock
*isk
, *n
;
665 list_for_each_entry_safe(isk
, n
, &iucv_sk(parent
)->accept_q
, accept_q
) {
666 sk
= (struct sock
*) isk
;
669 if (sk
->sk_state
== IUCV_CLOSED
) {
670 iucv_accept_unlink(sk
);
675 if (sk
->sk_state
== IUCV_CONNECTED
||
676 sk
->sk_state
== IUCV_DISCONN
||
678 iucv_accept_unlink(sk
);
680 sock_graft(sk
, newsock
);
691 /* Bind an unbound socket */
692 static int iucv_sock_bind(struct socket
*sock
, struct sockaddr
*addr
,
695 struct sockaddr_iucv
*sa
= (struct sockaddr_iucv
*) addr
;
696 struct sock
*sk
= sock
->sk
;
697 struct iucv_sock
*iucv
;
699 struct net_device
*dev
;
702 /* Verify the input sockaddr */
703 if (!addr
|| addr
->sa_family
!= AF_IUCV
)
707 if (sk
->sk_state
!= IUCV_OPEN
) {
712 write_lock_bh(&iucv_sk_list
.lock
);
715 if (__iucv_get_sock_by_name(sa
->siucv_name
)) {
722 /* Bind the socket */
724 if (!memcmp(sa
->siucv_user_id
, iucv_userid
, 8))
725 goto vm_bind
; /* VM IUCV transport */
727 /* try hiper transport */
728 memcpy(uid
, sa
->siucv_user_id
, sizeof(uid
));
731 for_each_netdev_rcu(&init_net
, dev
) {
732 if (!memcmp(dev
->perm_addr
, uid
, 8)) {
733 memcpy(iucv
->src_name
, sa
->siucv_name
, 8);
734 memcpy(iucv
->src_user_id
, sa
->siucv_user_id
, 8);
735 sk
->sk_bound_dev_if
= dev
->ifindex
;
738 sk
->sk_state
= IUCV_BOUND
;
739 iucv
->transport
= AF_IUCV_TRANS_HIPER
;
741 iucv
->msglimit
= IUCV_HIPER_MSGLIM_DEFAULT
;
749 /* use local userid for backward compat */
750 memcpy(iucv
->src_name
, sa
->siucv_name
, 8);
751 memcpy(iucv
->src_user_id
, iucv_userid
, 8);
752 sk
->sk_state
= IUCV_BOUND
;
753 iucv
->transport
= AF_IUCV_TRANS_IUCV
;
755 iucv
->msglimit
= IUCV_QUEUELEN_DEFAULT
;
758 /* found no dev to bind */
761 /* Release the socket list lock */
762 write_unlock_bh(&iucv_sk_list
.lock
);
768 /* Automatically bind an unbound socket */
769 static int iucv_sock_autobind(struct sock
*sk
)
771 struct iucv_sock
*iucv
= iucv_sk(sk
);
775 if (unlikely(!pr_iucv
))
778 memcpy(iucv
->src_user_id
, iucv_userid
, 8);
780 write_lock_bh(&iucv_sk_list
.lock
);
782 sprintf(name
, "%08x", atomic_inc_return(&iucv_sk_list
.autobind_name
));
783 while (__iucv_get_sock_by_name(name
)) {
784 sprintf(name
, "%08x",
785 atomic_inc_return(&iucv_sk_list
.autobind_name
));
788 write_unlock_bh(&iucv_sk_list
.lock
);
790 memcpy(&iucv
->src_name
, name
, 8);
793 iucv
->msglimit
= IUCV_QUEUELEN_DEFAULT
;
798 static int afiucv_path_connect(struct socket
*sock
, struct sockaddr
*addr
)
800 struct sockaddr_iucv
*sa
= (struct sockaddr_iucv
*) addr
;
801 struct sock
*sk
= sock
->sk
;
802 struct iucv_sock
*iucv
= iucv_sk(sk
);
803 unsigned char user_data
[16];
806 high_nmcpy(user_data
, sa
->siucv_name
);
807 low_nmcpy(user_data
, iucv
->src_name
);
808 ASCEBC(user_data
, sizeof(user_data
));
811 iucv
->path
= iucv_path_alloc(iucv
->msglimit
,
812 IUCV_IPRMDATA
, GFP_KERNEL
);
817 err
= pr_iucv
->path_connect(iucv
->path
, &af_iucv_handler
,
818 sa
->siucv_user_id
, NULL
, user_data
,
821 iucv_path_free(iucv
->path
);
824 case 0x0b: /* Target communicator is not logged on */
827 case 0x0d: /* Max connections for this guest exceeded */
828 case 0x0e: /* Max connections for target guest exceeded */
831 case 0x0f: /* Missing IUCV authorization */
843 /* Connect an unconnected socket */
844 static int iucv_sock_connect(struct socket
*sock
, struct sockaddr
*addr
,
847 struct sockaddr_iucv
*sa
= (struct sockaddr_iucv
*) addr
;
848 struct sock
*sk
= sock
->sk
;
849 struct iucv_sock
*iucv
= iucv_sk(sk
);
852 if (addr
->sa_family
!= AF_IUCV
|| alen
< sizeof(struct sockaddr_iucv
))
855 if (sk
->sk_state
!= IUCV_OPEN
&& sk
->sk_state
!= IUCV_BOUND
)
858 if (sk
->sk_state
== IUCV_OPEN
&&
859 iucv
->transport
== AF_IUCV_TRANS_HIPER
)
860 return -EBADFD
; /* explicit bind required */
862 if (sk
->sk_type
!= SOCK_STREAM
&& sk
->sk_type
!= SOCK_SEQPACKET
)
865 if (sk
->sk_state
== IUCV_OPEN
) {
866 err
= iucv_sock_autobind(sk
);
873 /* Set the destination information */
874 memcpy(iucv
->dst_user_id
, sa
->siucv_user_id
, 8);
875 memcpy(iucv
->dst_name
, sa
->siucv_name
, 8);
877 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
)
878 err
= iucv_send_ctrl(sock
->sk
, AF_IUCV_FLAG_SYN
);
880 err
= afiucv_path_connect(sock
, addr
);
884 if (sk
->sk_state
!= IUCV_CONNECTED
)
885 err
= iucv_sock_wait(sk
, iucv_sock_in_state(sk
, IUCV_CONNECTED
,
887 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
889 if (sk
->sk_state
== IUCV_DISCONN
|| sk
->sk_state
== IUCV_CLOSED
)
892 if (err
&& iucv
->transport
== AF_IUCV_TRANS_IUCV
)
893 iucv_sever_path(sk
, 0);
900 /* Move a socket into listening state. */
901 static int iucv_sock_listen(struct socket
*sock
, int backlog
)
903 struct sock
*sk
= sock
->sk
;
909 if (sk
->sk_state
!= IUCV_BOUND
)
912 if (sock
->type
!= SOCK_STREAM
&& sock
->type
!= SOCK_SEQPACKET
)
915 sk
->sk_max_ack_backlog
= backlog
;
916 sk
->sk_ack_backlog
= 0;
917 sk
->sk_state
= IUCV_LISTEN
;
925 /* Accept a pending connection */
926 static int iucv_sock_accept(struct socket
*sock
, struct socket
*newsock
,
929 DECLARE_WAITQUEUE(wait
, current
);
930 struct sock
*sk
= sock
->sk
, *nsk
;
934 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
936 if (sk
->sk_state
!= IUCV_LISTEN
) {
941 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
943 /* Wait for an incoming connection */
944 add_wait_queue_exclusive(sk_sleep(sk
), &wait
);
945 while (!(nsk
= iucv_accept_dequeue(sk
, newsock
))) {
946 set_current_state(TASK_INTERRUPTIBLE
);
953 timeo
= schedule_timeout(timeo
);
954 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
956 if (sk
->sk_state
!= IUCV_LISTEN
) {
961 if (signal_pending(current
)) {
962 err
= sock_intr_errno(timeo
);
967 set_current_state(TASK_RUNNING
);
968 remove_wait_queue(sk_sleep(sk
), &wait
);
973 newsock
->state
= SS_CONNECTED
;
980 static int iucv_sock_getname(struct socket
*sock
, struct sockaddr
*addr
,
983 struct sockaddr_iucv
*siucv
= (struct sockaddr_iucv
*) addr
;
984 struct sock
*sk
= sock
->sk
;
985 struct iucv_sock
*iucv
= iucv_sk(sk
);
987 addr
->sa_family
= AF_IUCV
;
988 *len
= sizeof(struct sockaddr_iucv
);
991 memcpy(siucv
->siucv_user_id
, iucv
->dst_user_id
, 8);
992 memcpy(siucv
->siucv_name
, iucv
->dst_name
, 8);
994 memcpy(siucv
->siucv_user_id
, iucv
->src_user_id
, 8);
995 memcpy(siucv
->siucv_name
, iucv
->src_name
, 8);
997 memset(&siucv
->siucv_port
, 0, sizeof(siucv
->siucv_port
));
998 memset(&siucv
->siucv_addr
, 0, sizeof(siucv
->siucv_addr
));
999 memset(&siucv
->siucv_nodeid
, 0, sizeof(siucv
->siucv_nodeid
));
1005 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
1007 * @msg: Pointer to a struct iucv_message
1008 * @skb: The socket data to send, skb->len MUST BE <= 7
1010 * Send the socket data in the parameter list in the iucv message
1011 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
1012 * list and the socket data len at index 7 (last byte).
1013 * See also iucv_msg_length().
1015 * Returns the error code from the iucv_message_send() call.
1017 static int iucv_send_iprm(struct iucv_path
*path
, struct iucv_message
*msg
,
1018 struct sk_buff
*skb
)
1022 memcpy(prmdata
, (void *) skb
->data
, skb
->len
);
1023 prmdata
[7] = 0xff - (u8
) skb
->len
;
1024 return pr_iucv
->message_send(path
, msg
, IUCV_IPRMDATA
, 0,
1025 (void *) prmdata
, 8);
1028 static int iucv_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
,
1029 struct msghdr
*msg
, size_t len
)
1031 struct sock
*sk
= sock
->sk
;
1032 struct iucv_sock
*iucv
= iucv_sk(sk
);
1033 struct sk_buff
*skb
;
1034 struct iucv_message txmsg
;
1035 struct cmsghdr
*cmsg
;
1041 int noblock
= msg
->msg_flags
& MSG_DONTWAIT
;
1043 err
= sock_error(sk
);
1047 if (msg
->msg_flags
& MSG_OOB
)
1050 /* SOCK_SEQPACKET: we do not support segmented records */
1051 if (sk
->sk_type
== SOCK_SEQPACKET
&& !(msg
->msg_flags
& MSG_EOR
))
1056 if (sk
->sk_shutdown
& SEND_SHUTDOWN
) {
1061 /* Return if the socket is not in connected state */
1062 if (sk
->sk_state
!= IUCV_CONNECTED
) {
1067 /* initialize defaults */
1068 cmsg_done
= 0; /* check for duplicate headers */
1071 /* iterate over control messages */
1072 for (cmsg
= CMSG_FIRSTHDR(msg
); cmsg
;
1073 cmsg
= CMSG_NXTHDR(msg
, cmsg
)) {
1075 if (!CMSG_OK(msg
, cmsg
)) {
1080 if (cmsg
->cmsg_level
!= SOL_IUCV
)
1083 if (cmsg
->cmsg_type
& cmsg_done
) {
1087 cmsg_done
|= cmsg
->cmsg_type
;
1089 switch (cmsg
->cmsg_type
) {
1090 case SCM_IUCV_TRGCLS
:
1091 if (cmsg
->cmsg_len
!= CMSG_LEN(TRGCLS_SIZE
)) {
1096 /* set iucv message target class */
1097 memcpy(&txmsg
.class,
1098 (void *) CMSG_DATA(cmsg
), TRGCLS_SIZE
);
1109 /* allocate one skb for each iucv message:
1110 * this is fine for SOCK_SEQPACKET (unless we want to support
1111 * segmented records using the MSG_EOR flag), but
1112 * for SOCK_STREAM we might want to improve it in future */
1113 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
)
1114 skb
= sock_alloc_send_skb(sk
,
1115 len
+ sizeof(struct af_iucv_trans_hdr
) + ETH_HLEN
,
1118 skb
= sock_alloc_send_skb(sk
, len
, noblock
, &err
);
1123 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
)
1124 skb_reserve(skb
, sizeof(struct af_iucv_trans_hdr
) + ETH_HLEN
);
1125 if (memcpy_fromiovec(skb_put(skb
, len
), msg
->msg_iov
, len
)) {
1130 /* wait if outstanding messages for iucv path has reached */
1131 timeo
= sock_sndtimeo(sk
, noblock
);
1132 err
= iucv_sock_wait(sk
, iucv_below_msglim(sk
), timeo
);
1136 /* return -ECONNRESET if the socket is no longer connected */
1137 if (sk
->sk_state
!= IUCV_CONNECTED
) {
1142 /* increment and save iucv message tag for msg_completion cbk */
1143 txmsg
.tag
= iucv
->send_tag
++;
1144 memcpy(CB_TAG(skb
), &txmsg
.tag
, CB_TAG_LEN
);
1146 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
) {
1147 atomic_inc(&iucv
->msg_sent
);
1148 err
= afiucv_hs_send(&txmsg
, sk
, skb
, 0);
1150 atomic_dec(&iucv
->msg_sent
);
1155 skb_queue_tail(&iucv
->send_skb_q
, skb
);
1157 if (((iucv
->path
->flags
& IUCV_IPRMDATA
) & iucv
->flags
)
1159 err
= iucv_send_iprm(iucv
->path
, &txmsg
, skb
);
1161 /* on success: there is no message_complete callback
1162 * for an IPRMDATA msg; remove skb from send queue */
1164 skb_unlink(skb
, &iucv
->send_skb_q
);
1168 /* this error should never happen since the
1169 * IUCV_IPRMDATA path flag is set... sever path */
1171 pr_iucv
->path_sever(iucv
->path
, NULL
);
1172 skb_unlink(skb
, &iucv
->send_skb_q
);
1177 err
= pr_iucv
->message_send(iucv
->path
, &txmsg
, 0, 0,
1178 (void *) skb
->data
, skb
->len
);
1182 memcpy(user_id
, iucv
->dst_user_id
, 8);
1184 memcpy(appl_id
, iucv
->dst_name
, 8);
1185 pr_err("Application %s on z/VM guest %s"
1186 " exceeds message limit\n",
1191 skb_unlink(skb
, &iucv
->send_skb_q
);
1206 /* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's
1208 * Locking: must be called with message_q.lock held
1210 static int iucv_fragment_skb(struct sock
*sk
, struct sk_buff
*skb
, int len
)
1212 int dataleft
, size
, copied
= 0;
1213 struct sk_buff
*nskb
;
1217 if (dataleft
>= sk
->sk_rcvbuf
/ 4)
1218 size
= sk
->sk_rcvbuf
/ 4;
1222 nskb
= alloc_skb(size
, GFP_ATOMIC
| GFP_DMA
);
1226 /* copy target class to control buffer of new skb */
1227 memcpy(CB_TRGCLS(nskb
), CB_TRGCLS(skb
), CB_TRGCLS_LEN
);
1229 /* copy data fragment */
1230 memcpy(nskb
->data
, skb
->data
+ copied
, size
);
1234 skb_reset_transport_header(nskb
);
1235 skb_reset_network_header(nskb
);
1238 skb_queue_tail(&iucv_sk(sk
)->backlog_skb_q
, nskb
);
1244 /* iucv_process_message() - Receive a single outstanding IUCV message
1246 * Locking: must be called with message_q.lock held
1248 static void iucv_process_message(struct sock
*sk
, struct sk_buff
*skb
,
1249 struct iucv_path
*path
,
1250 struct iucv_message
*msg
)
1255 len
= iucv_msg_length(msg
);
1257 /* store msg target class in the second 4 bytes of skb ctrl buffer */
1258 /* Note: the first 4 bytes are reserved for msg tag */
1259 memcpy(CB_TRGCLS(skb
), &msg
->class, CB_TRGCLS_LEN
);
1261 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1262 if ((msg
->flags
& IUCV_IPRMDATA
) && len
> 7) {
1263 if (memcmp(msg
->rmmsg
, iprm_shutdown
, 8) == 0) {
1268 rc
= pr_iucv
->message_receive(path
, msg
,
1269 msg
->flags
& IUCV_IPRMDATA
,
1270 skb
->data
, len
, NULL
);
1275 /* we need to fragment iucv messages for SOCK_STREAM only;
1276 * for SOCK_SEQPACKET, it is only relevant if we support
1277 * record segmentation using MSG_EOR (see also recvmsg()) */
1278 if (sk
->sk_type
== SOCK_STREAM
&&
1279 skb
->truesize
>= sk
->sk_rcvbuf
/ 4) {
1280 rc
= iucv_fragment_skb(sk
, skb
, len
);
1284 pr_iucv
->path_sever(path
, NULL
);
1287 skb
= skb_dequeue(&iucv_sk(sk
)->backlog_skb_q
);
1289 skb_reset_transport_header(skb
);
1290 skb_reset_network_header(skb
);
1295 if (sock_queue_rcv_skb(sk
, skb
))
1296 skb_queue_head(&iucv_sk(sk
)->backlog_skb_q
, skb
);
1299 /* iucv_process_message_q() - Process outstanding IUCV messages
1301 * Locking: must be called with message_q.lock held
1303 static void iucv_process_message_q(struct sock
*sk
)
1305 struct iucv_sock
*iucv
= iucv_sk(sk
);
1306 struct sk_buff
*skb
;
1307 struct sock_msg_q
*p
, *n
;
1309 list_for_each_entry_safe(p
, n
, &iucv
->message_q
.list
, list
) {
1310 skb
= alloc_skb(iucv_msg_length(&p
->msg
), GFP_ATOMIC
| GFP_DMA
);
1313 iucv_process_message(sk
, skb
, p
->path
, &p
->msg
);
1316 if (!skb_queue_empty(&iucv
->backlog_skb_q
))
1321 static int iucv_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1322 struct msghdr
*msg
, size_t len
, int flags
)
1324 int noblock
= flags
& MSG_DONTWAIT
;
1325 struct sock
*sk
= sock
->sk
;
1326 struct iucv_sock
*iucv
= iucv_sk(sk
);
1327 unsigned int copied
, rlen
;
1328 struct sk_buff
*skb
, *rskb
, *cskb
;
1331 if ((sk
->sk_state
== IUCV_DISCONN
) &&
1332 skb_queue_empty(&iucv
->backlog_skb_q
) &&
1333 skb_queue_empty(&sk
->sk_receive_queue
) &&
1334 list_empty(&iucv
->message_q
.list
))
1337 if (flags
& (MSG_OOB
))
1340 /* receive/dequeue next skb:
1341 * the function understands MSG_PEEK and, thus, does not dequeue skb */
1342 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
1344 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1349 rlen
= skb
->len
; /* real length of skb */
1350 copied
= min_t(unsigned int, rlen
, len
);
1352 sk
->sk_shutdown
= sk
->sk_shutdown
| RCV_SHUTDOWN
;
1355 if (skb_copy_datagram_iovec(cskb
, 0, msg
->msg_iov
, copied
)) {
1356 if (!(flags
& MSG_PEEK
))
1357 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1361 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1362 if (sk
->sk_type
== SOCK_SEQPACKET
) {
1364 msg
->msg_flags
|= MSG_TRUNC
;
1365 /* each iucv message contains a complete record */
1366 msg
->msg_flags
|= MSG_EOR
;
1369 /* create control message to store iucv msg target class:
1370 * get the trgcls from the control buffer of the skb due to
1371 * fragmentation of original iucv message. */
1372 err
= put_cmsg(msg
, SOL_IUCV
, SCM_IUCV_TRGCLS
,
1373 CB_TRGCLS_LEN
, CB_TRGCLS(skb
));
1375 if (!(flags
& MSG_PEEK
))
1376 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1380 /* Mark read part of skb as used */
1381 if (!(flags
& MSG_PEEK
)) {
1383 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1384 if (sk
->sk_type
== SOCK_STREAM
) {
1385 skb_pull(skb
, copied
);
1387 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1393 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
) {
1394 atomic_inc(&iucv
->msg_recv
);
1395 if (atomic_read(&iucv
->msg_recv
) > iucv
->msglimit
) {
1397 iucv_sock_close(sk
);
1402 /* Queue backlog skbs */
1403 spin_lock_bh(&iucv
->message_q
.lock
);
1404 rskb
= skb_dequeue(&iucv
->backlog_skb_q
);
1406 if (sock_queue_rcv_skb(sk
, rskb
)) {
1407 skb_queue_head(&iucv
->backlog_skb_q
,
1411 rskb
= skb_dequeue(&iucv
->backlog_skb_q
);
1414 if (skb_queue_empty(&iucv
->backlog_skb_q
)) {
1415 if (!list_empty(&iucv
->message_q
.list
))
1416 iucv_process_message_q(sk
);
1417 if (atomic_read(&iucv
->msg_recv
) >=
1418 iucv
->msglimit
/ 2) {
1419 err
= iucv_send_ctrl(sk
, AF_IUCV_FLAG_WIN
);
1421 sk
->sk_state
= IUCV_DISCONN
;
1422 sk
->sk_state_change(sk
);
1426 spin_unlock_bh(&iucv
->message_q
.lock
);
1430 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1431 if (sk
->sk_type
== SOCK_SEQPACKET
&& (flags
& MSG_TRUNC
))
1437 static inline unsigned int iucv_accept_poll(struct sock
*parent
)
1439 struct iucv_sock
*isk
, *n
;
1442 list_for_each_entry_safe(isk
, n
, &iucv_sk(parent
)->accept_q
, accept_q
) {
1443 sk
= (struct sock
*) isk
;
1445 if (sk
->sk_state
== IUCV_CONNECTED
)
1446 return POLLIN
| POLLRDNORM
;
1452 unsigned int iucv_sock_poll(struct file
*file
, struct socket
*sock
,
1455 struct sock
*sk
= sock
->sk
;
1456 unsigned int mask
= 0;
1458 sock_poll_wait(file
, sk_sleep(sk
), wait
);
1460 if (sk
->sk_state
== IUCV_LISTEN
)
1461 return iucv_accept_poll(sk
);
1463 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
1465 sock_flag(sk
, SOCK_SELECT_ERR_QUEUE
) ? POLLPRI
: 0;
1467 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1470 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
1473 if (!skb_queue_empty(&sk
->sk_receive_queue
) ||
1474 (sk
->sk_shutdown
& RCV_SHUTDOWN
))
1475 mask
|= POLLIN
| POLLRDNORM
;
1477 if (sk
->sk_state
== IUCV_CLOSED
)
1480 if (sk
->sk_state
== IUCV_DISCONN
)
1483 if (sock_writeable(sk
) && iucv_below_msglim(sk
))
1484 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
1486 set_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
1491 static int iucv_sock_shutdown(struct socket
*sock
, int how
)
1493 struct sock
*sk
= sock
->sk
;
1494 struct iucv_sock
*iucv
= iucv_sk(sk
);
1495 struct iucv_message txmsg
;
1500 if ((how
& ~SHUTDOWN_MASK
) || !how
)
1504 switch (sk
->sk_state
) {
1515 if (how
== SEND_SHUTDOWN
|| how
== SHUTDOWN_MASK
) {
1516 if (iucv
->transport
== AF_IUCV_TRANS_IUCV
) {
1519 err
= pr_iucv
->message_send(iucv
->path
, &txmsg
,
1520 IUCV_IPRMDATA
, 0, (void *) iprm_shutdown
, 8);
1535 iucv_send_ctrl(sk
, AF_IUCV_FLAG_SHT
);
1538 sk
->sk_shutdown
|= how
;
1539 if (how
== RCV_SHUTDOWN
|| how
== SHUTDOWN_MASK
) {
1540 if (iucv
->transport
== AF_IUCV_TRANS_IUCV
) {
1541 err
= pr_iucv
->path_quiesce(iucv
->path
, NULL
);
1544 /* skb_queue_purge(&sk->sk_receive_queue); */
1546 skb_queue_purge(&sk
->sk_receive_queue
);
1549 /* Wake up anyone sleeping in poll */
1550 sk
->sk_state_change(sk
);
1557 static int iucv_sock_release(struct socket
*sock
)
1559 struct sock
*sk
= sock
->sk
;
1565 iucv_sock_close(sk
);
1572 /* getsockopt and setsockopt */
1573 static int iucv_sock_setsockopt(struct socket
*sock
, int level
, int optname
,
1574 char __user
*optval
, unsigned int optlen
)
1576 struct sock
*sk
= sock
->sk
;
1577 struct iucv_sock
*iucv
= iucv_sk(sk
);
1581 if (level
!= SOL_IUCV
)
1582 return -ENOPROTOOPT
;
1584 if (optlen
< sizeof(int))
1587 if (get_user(val
, (int __user
*) optval
))
1594 case SO_IPRMDATA_MSG
:
1596 iucv
->flags
|= IUCV_IPRMDATA
;
1598 iucv
->flags
&= ~IUCV_IPRMDATA
;
1601 switch (sk
->sk_state
) {
1604 if (val
< 1 || val
> (u16
)(~0))
1607 iucv
->msglimit
= val
;
1623 static int iucv_sock_getsockopt(struct socket
*sock
, int level
, int optname
,
1624 char __user
*optval
, int __user
*optlen
)
1626 struct sock
*sk
= sock
->sk
;
1627 struct iucv_sock
*iucv
= iucv_sk(sk
);
1631 if (level
!= SOL_IUCV
)
1632 return -ENOPROTOOPT
;
1634 if (get_user(len
, optlen
))
1640 len
= min_t(unsigned int, len
, sizeof(int));
1643 case SO_IPRMDATA_MSG
:
1644 val
= (iucv
->flags
& IUCV_IPRMDATA
) ? 1 : 0;
1648 val
= (iucv
->path
!= NULL
) ? iucv
->path
->msglim
/* connected */
1649 : iucv
->msglimit
; /* default */
1653 if (sk
->sk_state
== IUCV_OPEN
)
1655 val
= (iucv
->hs_dev
) ? iucv
->hs_dev
->mtu
-
1656 sizeof(struct af_iucv_trans_hdr
) - ETH_HLEN
:
1660 return -ENOPROTOOPT
;
1663 if (put_user(len
, optlen
))
1665 if (copy_to_user(optval
, &val
, len
))
1672 /* Callback wrappers - called from iucv base support */
1673 static int iucv_callback_connreq(struct iucv_path
*path
,
1674 u8 ipvmid
[8], u8 ipuser
[16])
1676 unsigned char user_data
[16];
1677 unsigned char nuser_data
[16];
1678 unsigned char src_name
[8];
1679 struct sock
*sk
, *nsk
;
1680 struct iucv_sock
*iucv
, *niucv
;
1683 memcpy(src_name
, ipuser
, 8);
1684 EBCASC(src_name
, 8);
1685 /* Find out if this path belongs to af_iucv. */
1686 read_lock(&iucv_sk_list
.lock
);
1689 sk_for_each(sk
, &iucv_sk_list
.head
)
1690 if (sk
->sk_state
== IUCV_LISTEN
&&
1691 !memcmp(&iucv_sk(sk
)->src_name
, src_name
, 8)) {
1693 * Found a listening socket with
1694 * src_name == ipuser[0-7].
1699 read_unlock(&iucv_sk_list
.lock
);
1701 /* No socket found, not one of our paths. */
1706 /* Check if parent socket is listening */
1707 low_nmcpy(user_data
, iucv
->src_name
);
1708 high_nmcpy(user_data
, iucv
->dst_name
);
1709 ASCEBC(user_data
, sizeof(user_data
));
1710 if (sk
->sk_state
!= IUCV_LISTEN
) {
1711 err
= pr_iucv
->path_sever(path
, user_data
);
1712 iucv_path_free(path
);
1716 /* Check for backlog size */
1717 if (sk_acceptq_is_full(sk
)) {
1718 err
= pr_iucv
->path_sever(path
, user_data
);
1719 iucv_path_free(path
);
1723 /* Create the new socket */
1724 nsk
= iucv_sock_alloc(NULL
, sk
->sk_type
, GFP_ATOMIC
);
1726 err
= pr_iucv
->path_sever(path
, user_data
);
1727 iucv_path_free(path
);
1731 niucv
= iucv_sk(nsk
);
1732 iucv_sock_init(nsk
, sk
);
1734 /* Set the new iucv_sock */
1735 memcpy(niucv
->dst_name
, ipuser
+ 8, 8);
1736 EBCASC(niucv
->dst_name
, 8);
1737 memcpy(niucv
->dst_user_id
, ipvmid
, 8);
1738 memcpy(niucv
->src_name
, iucv
->src_name
, 8);
1739 memcpy(niucv
->src_user_id
, iucv
->src_user_id
, 8);
1742 /* Call iucv_accept */
1743 high_nmcpy(nuser_data
, ipuser
+ 8);
1744 memcpy(nuser_data
+ 8, niucv
->src_name
, 8);
1745 ASCEBC(nuser_data
+ 8, 8);
1747 /* set message limit for path based on msglimit of accepting socket */
1748 niucv
->msglimit
= iucv
->msglimit
;
1749 path
->msglim
= iucv
->msglimit
;
1750 err
= pr_iucv
->path_accept(path
, &af_iucv_handler
, nuser_data
, nsk
);
1752 iucv_sever_path(nsk
, 1);
1753 iucv_sock_kill(nsk
);
1757 iucv_accept_enqueue(sk
, nsk
);
1759 /* Wake up accept */
1760 nsk
->sk_state
= IUCV_CONNECTED
;
1761 sk
->sk_data_ready(sk
, 1);
1768 static void iucv_callback_connack(struct iucv_path
*path
, u8 ipuser
[16])
1770 struct sock
*sk
= path
->private;
1772 sk
->sk_state
= IUCV_CONNECTED
;
1773 sk
->sk_state_change(sk
);
1776 static void iucv_callback_rx(struct iucv_path
*path
, struct iucv_message
*msg
)
1778 struct sock
*sk
= path
->private;
1779 struct iucv_sock
*iucv
= iucv_sk(sk
);
1780 struct sk_buff
*skb
;
1781 struct sock_msg_q
*save_msg
;
1784 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
1785 pr_iucv
->message_reject(path
, msg
);
1789 spin_lock(&iucv
->message_q
.lock
);
1791 if (!list_empty(&iucv
->message_q
.list
) ||
1792 !skb_queue_empty(&iucv
->backlog_skb_q
))
1795 len
= atomic_read(&sk
->sk_rmem_alloc
);
1796 len
+= SKB_TRUESIZE(iucv_msg_length(msg
));
1797 if (len
> sk
->sk_rcvbuf
)
1800 skb
= alloc_skb(iucv_msg_length(msg
), GFP_ATOMIC
| GFP_DMA
);
1804 iucv_process_message(sk
, skb
, path
, msg
);
1808 save_msg
= kzalloc(sizeof(struct sock_msg_q
), GFP_ATOMIC
| GFP_DMA
);
1811 save_msg
->path
= path
;
1812 save_msg
->msg
= *msg
;
1814 list_add_tail(&save_msg
->list
, &iucv
->message_q
.list
);
1817 spin_unlock(&iucv
->message_q
.lock
);
1820 static void iucv_callback_txdone(struct iucv_path
*path
,
1821 struct iucv_message
*msg
)
1823 struct sock
*sk
= path
->private;
1824 struct sk_buff
*this = NULL
;
1825 struct sk_buff_head
*list
= &iucv_sk(sk
)->send_skb_q
;
1826 struct sk_buff
*list_skb
= list
->next
;
1827 unsigned long flags
;
1830 if (!skb_queue_empty(list
)) {
1831 spin_lock_irqsave(&list
->lock
, flags
);
1833 while (list_skb
!= (struct sk_buff
*)list
) {
1834 if (!memcmp(&msg
->tag
, CB_TAG(list_skb
), CB_TAG_LEN
)) {
1838 list_skb
= list_skb
->next
;
1841 __skb_unlink(this, list
);
1843 spin_unlock_irqrestore(&list
->lock
, flags
);
1847 /* wake up any process waiting for sending */
1848 iucv_sock_wake_msglim(sk
);
1852 if (sk
->sk_state
== IUCV_CLOSING
) {
1853 if (skb_queue_empty(&iucv_sk(sk
)->send_skb_q
)) {
1854 sk
->sk_state
= IUCV_CLOSED
;
1855 sk
->sk_state_change(sk
);
1862 static void iucv_callback_connrej(struct iucv_path
*path
, u8 ipuser
[16])
1864 struct sock
*sk
= path
->private;
1866 if (sk
->sk_state
== IUCV_CLOSED
)
1870 iucv_sever_path(sk
, 1);
1871 sk
->sk_state
= IUCV_DISCONN
;
1873 sk
->sk_state_change(sk
);
1877 /* called if the other communication side shuts down its RECV direction;
1878 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1880 static void iucv_callback_shutdown(struct iucv_path
*path
, u8 ipuser
[16])
1882 struct sock
*sk
= path
->private;
1885 if (sk
->sk_state
!= IUCV_CLOSED
) {
1886 sk
->sk_shutdown
|= SEND_SHUTDOWN
;
1887 sk
->sk_state_change(sk
);
1892 /***************** HiperSockets transport callbacks ********************/
1893 static void afiucv_swap_src_dest(struct sk_buff
*skb
)
1895 struct af_iucv_trans_hdr
*trans_hdr
=
1896 (struct af_iucv_trans_hdr
*)skb
->data
;
1900 ASCEBC(trans_hdr
->destUserID
, sizeof(trans_hdr
->destUserID
));
1901 ASCEBC(trans_hdr
->destAppName
, sizeof(trans_hdr
->destAppName
));
1902 ASCEBC(trans_hdr
->srcUserID
, sizeof(trans_hdr
->srcUserID
));
1903 ASCEBC(trans_hdr
->srcAppName
, sizeof(trans_hdr
->srcAppName
));
1904 memcpy(tmpID
, trans_hdr
->srcUserID
, 8);
1905 memcpy(tmpName
, trans_hdr
->srcAppName
, 8);
1906 memcpy(trans_hdr
->srcUserID
, trans_hdr
->destUserID
, 8);
1907 memcpy(trans_hdr
->srcAppName
, trans_hdr
->destAppName
, 8);
1908 memcpy(trans_hdr
->destUserID
, tmpID
, 8);
1909 memcpy(trans_hdr
->destAppName
, tmpName
, 8);
1910 skb_push(skb
, ETH_HLEN
);
1911 memset(skb
->data
, 0, ETH_HLEN
);
1915 * afiucv_hs_callback_syn - react on received SYN
1917 static int afiucv_hs_callback_syn(struct sock
*sk
, struct sk_buff
*skb
)
1920 struct iucv_sock
*iucv
, *niucv
;
1921 struct af_iucv_trans_hdr
*trans_hdr
;
1925 trans_hdr
= (struct af_iucv_trans_hdr
*)skb
->data
;
1927 /* no sock - connection refused */
1928 afiucv_swap_src_dest(skb
);
1929 trans_hdr
->flags
= AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_FIN
;
1930 err
= dev_queue_xmit(skb
);
1934 nsk
= iucv_sock_alloc(NULL
, sk
->sk_type
, GFP_ATOMIC
);
1936 if ((sk
->sk_state
!= IUCV_LISTEN
) ||
1937 sk_acceptq_is_full(sk
) ||
1939 /* error on server socket - connection refused */
1942 afiucv_swap_src_dest(skb
);
1943 trans_hdr
->flags
= AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_FIN
;
1944 err
= dev_queue_xmit(skb
);
1949 niucv
= iucv_sk(nsk
);
1950 iucv_sock_init(nsk
, sk
);
1951 niucv
->transport
= AF_IUCV_TRANS_HIPER
;
1952 niucv
->msglimit
= iucv
->msglimit
;
1953 if (!trans_hdr
->window
)
1954 niucv
->msglimit_peer
= IUCV_HIPER_MSGLIM_DEFAULT
;
1956 niucv
->msglimit_peer
= trans_hdr
->window
;
1957 memcpy(niucv
->dst_name
, trans_hdr
->srcAppName
, 8);
1958 memcpy(niucv
->dst_user_id
, trans_hdr
->srcUserID
, 8);
1959 memcpy(niucv
->src_name
, iucv
->src_name
, 8);
1960 memcpy(niucv
->src_user_id
, iucv
->src_user_id
, 8);
1961 nsk
->sk_bound_dev_if
= sk
->sk_bound_dev_if
;
1962 niucv
->hs_dev
= iucv
->hs_dev
;
1963 dev_hold(niucv
->hs_dev
);
1964 afiucv_swap_src_dest(skb
);
1965 trans_hdr
->flags
= AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_ACK
;
1966 trans_hdr
->window
= niucv
->msglimit
;
1967 /* if receiver acks the xmit connection is established */
1968 err
= dev_queue_xmit(skb
);
1970 iucv_accept_enqueue(sk
, nsk
);
1971 nsk
->sk_state
= IUCV_CONNECTED
;
1972 sk
->sk_data_ready(sk
, 1);
1974 iucv_sock_kill(nsk
);
1978 return NET_RX_SUCCESS
;
1982 * afiucv_hs_callback_synack() - react on received SYN-ACK
1984 static int afiucv_hs_callback_synack(struct sock
*sk
, struct sk_buff
*skb
)
1986 struct iucv_sock
*iucv
= iucv_sk(sk
);
1987 struct af_iucv_trans_hdr
*trans_hdr
=
1988 (struct af_iucv_trans_hdr
*)skb
->data
;
1992 if (sk
->sk_state
!= IUCV_BOUND
)
1995 iucv
->msglimit_peer
= trans_hdr
->window
;
1996 sk
->sk_state
= IUCV_CONNECTED
;
1997 sk
->sk_state_change(sk
);
2001 return NET_RX_SUCCESS
;
2005 * afiucv_hs_callback_synfin() - react on received SYN_FIN
2007 static int afiucv_hs_callback_synfin(struct sock
*sk
, struct sk_buff
*skb
)
2009 struct iucv_sock
*iucv
= iucv_sk(sk
);
2013 if (sk
->sk_state
!= IUCV_BOUND
)
2016 sk
->sk_state
= IUCV_DISCONN
;
2017 sk
->sk_state_change(sk
);
2021 return NET_RX_SUCCESS
;
2025 * afiucv_hs_callback_fin() - react on received FIN
2027 static int afiucv_hs_callback_fin(struct sock
*sk
, struct sk_buff
*skb
)
2029 struct iucv_sock
*iucv
= iucv_sk(sk
);
2031 /* other end of connection closed */
2035 if (sk
->sk_state
== IUCV_CONNECTED
) {
2036 sk
->sk_state
= IUCV_DISCONN
;
2037 sk
->sk_state_change(sk
);
2042 return NET_RX_SUCCESS
;
2046 * afiucv_hs_callback_win() - react on received WIN
2048 static int afiucv_hs_callback_win(struct sock
*sk
, struct sk_buff
*skb
)
2050 struct iucv_sock
*iucv
= iucv_sk(sk
);
2051 struct af_iucv_trans_hdr
*trans_hdr
=
2052 (struct af_iucv_trans_hdr
*)skb
->data
;
2055 return NET_RX_SUCCESS
;
2057 if (sk
->sk_state
!= IUCV_CONNECTED
)
2058 return NET_RX_SUCCESS
;
2060 atomic_sub(trans_hdr
->window
, &iucv
->msg_sent
);
2061 iucv_sock_wake_msglim(sk
);
2062 return NET_RX_SUCCESS
;
2066 * afiucv_hs_callback_rx() - react on received data
2068 static int afiucv_hs_callback_rx(struct sock
*sk
, struct sk_buff
*skb
)
2070 struct iucv_sock
*iucv
= iucv_sk(sk
);
2074 return NET_RX_SUCCESS
;
2077 if (sk
->sk_state
!= IUCV_CONNECTED
) {
2079 return NET_RX_SUCCESS
;
2082 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
2084 return NET_RX_SUCCESS
;
2087 /* write stuff from iucv_msg to skb cb */
2088 if (skb
->len
< sizeof(struct af_iucv_trans_hdr
)) {
2090 return NET_RX_SUCCESS
;
2092 skb_pull(skb
, sizeof(struct af_iucv_trans_hdr
));
2093 skb_reset_transport_header(skb
);
2094 skb_reset_network_header(skb
);
2095 spin_lock(&iucv
->message_q
.lock
);
2096 if (skb_queue_empty(&iucv
->backlog_skb_q
)) {
2097 if (sock_queue_rcv_skb(sk
, skb
)) {
2098 /* handle rcv queue full */
2099 skb_queue_tail(&iucv
->backlog_skb_q
, skb
);
2102 skb_queue_tail(&iucv_sk(sk
)->backlog_skb_q
, skb
);
2103 spin_unlock(&iucv
->message_q
.lock
);
2104 return NET_RX_SUCCESS
;
2108 * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2110 * called from netif RX softirq
2112 static int afiucv_hs_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
2113 struct packet_type
*pt
, struct net_device
*orig_dev
)
2116 struct iucv_sock
*iucv
;
2117 struct af_iucv_trans_hdr
*trans_hdr
;
2121 skb_pull(skb
, ETH_HLEN
);
2122 trans_hdr
= (struct af_iucv_trans_hdr
*)skb
->data
;
2123 EBCASC(trans_hdr
->destAppName
, sizeof(trans_hdr
->destAppName
));
2124 EBCASC(trans_hdr
->destUserID
, sizeof(trans_hdr
->destUserID
));
2125 EBCASC(trans_hdr
->srcAppName
, sizeof(trans_hdr
->srcAppName
));
2126 EBCASC(trans_hdr
->srcUserID
, sizeof(trans_hdr
->srcUserID
));
2127 memset(nullstring
, 0, sizeof(nullstring
));
2130 read_lock(&iucv_sk_list
.lock
);
2131 sk_for_each(sk
, &iucv_sk_list
.head
) {
2132 if (trans_hdr
->flags
== AF_IUCV_FLAG_SYN
) {
2133 if ((!memcmp(&iucv_sk(sk
)->src_name
,
2134 trans_hdr
->destAppName
, 8)) &&
2135 (!memcmp(&iucv_sk(sk
)->src_user_id
,
2136 trans_hdr
->destUserID
, 8)) &&
2137 (!memcmp(&iucv_sk(sk
)->dst_name
, nullstring
, 8)) &&
2138 (!memcmp(&iucv_sk(sk
)->dst_user_id
,
2144 if ((!memcmp(&iucv_sk(sk
)->src_name
,
2145 trans_hdr
->destAppName
, 8)) &&
2146 (!memcmp(&iucv_sk(sk
)->src_user_id
,
2147 trans_hdr
->destUserID
, 8)) &&
2148 (!memcmp(&iucv_sk(sk
)->dst_name
,
2149 trans_hdr
->srcAppName
, 8)) &&
2150 (!memcmp(&iucv_sk(sk
)->dst_user_id
,
2151 trans_hdr
->srcUserID
, 8))) {
2157 read_unlock(&iucv_sk_list
.lock
);
2162 how should we send with no sock
2163 1) send without sock no send rc checking?
2164 2) introduce default sock to handle this cases
2166 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2168 SYN|ACK, SYN|FIN, FIN -> no action? */
2170 switch (trans_hdr
->flags
) {
2171 case AF_IUCV_FLAG_SYN
:
2172 /* connect request */
2173 err
= afiucv_hs_callback_syn(sk
, skb
);
2175 case (AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_ACK
):
2176 /* connect request confirmed */
2177 err
= afiucv_hs_callback_synack(sk
, skb
);
2179 case (AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_FIN
):
2180 /* connect request refused */
2181 err
= afiucv_hs_callback_synfin(sk
, skb
);
2183 case (AF_IUCV_FLAG_FIN
):
2185 err
= afiucv_hs_callback_fin(sk
, skb
);
2187 case (AF_IUCV_FLAG_WIN
):
2188 err
= afiucv_hs_callback_win(sk
, skb
);
2189 if (skb
->len
== sizeof(struct af_iucv_trans_hdr
)) {
2193 /* fall through and receive non-zero length data */
2194 case (AF_IUCV_FLAG_SHT
):
2195 /* shutdown request */
2196 /* fall through and receive zero length data */
2198 /* plain data frame */
2199 memcpy(CB_TRGCLS(skb
), &trans_hdr
->iucv_hdr
.class,
2201 err
= afiucv_hs_callback_rx(sk
, skb
);
2211 * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2214 static void afiucv_hs_callback_txnotify(struct sk_buff
*skb
,
2215 enum iucv_tx_notify n
)
2217 struct sock
*isk
= skb
->sk
;
2218 struct sock
*sk
= NULL
;
2219 struct iucv_sock
*iucv
= NULL
;
2220 struct sk_buff_head
*list
;
2221 struct sk_buff
*list_skb
;
2222 struct sk_buff
*nskb
;
2223 unsigned long flags
;
2225 read_lock_irqsave(&iucv_sk_list
.lock
, flags
);
2226 sk_for_each(sk
, &iucv_sk_list
.head
)
2231 read_unlock_irqrestore(&iucv_sk_list
.lock
, flags
);
2233 if (!iucv
|| sock_flag(sk
, SOCK_ZAPPED
))
2236 list
= &iucv
->send_skb_q
;
2237 spin_lock_irqsave(&list
->lock
, flags
);
2238 if (skb_queue_empty(list
))
2240 list_skb
= list
->next
;
2241 nskb
= list_skb
->next
;
2242 while (list_skb
!= (struct sk_buff
*)list
) {
2243 if (skb_shinfo(list_skb
) == skb_shinfo(skb
)) {
2246 __skb_unlink(list_skb
, list
);
2247 kfree_skb(list_skb
);
2248 iucv_sock_wake_msglim(sk
);
2250 case TX_NOTIFY_PENDING
:
2251 atomic_inc(&iucv
->pendings
);
2253 case TX_NOTIFY_DELAYED_OK
:
2254 __skb_unlink(list_skb
, list
);
2255 atomic_dec(&iucv
->pendings
);
2256 if (atomic_read(&iucv
->pendings
) <= 0)
2257 iucv_sock_wake_msglim(sk
);
2258 kfree_skb(list_skb
);
2260 case TX_NOTIFY_UNREACHABLE
:
2261 case TX_NOTIFY_DELAYED_UNREACHABLE
:
2262 case TX_NOTIFY_TPQFULL
: /* not yet used */
2263 case TX_NOTIFY_GENERALERROR
:
2264 case TX_NOTIFY_DELAYED_GENERALERROR
:
2265 __skb_unlink(list_skb
, list
);
2266 kfree_skb(list_skb
);
2267 if (sk
->sk_state
== IUCV_CONNECTED
) {
2268 sk
->sk_state
= IUCV_DISCONN
;
2269 sk
->sk_state_change(sk
);
2279 spin_unlock_irqrestore(&list
->lock
, flags
);
2281 if (sk
->sk_state
== IUCV_CLOSING
) {
2282 if (skb_queue_empty(&iucv_sk(sk
)->send_skb_q
)) {
2283 sk
->sk_state
= IUCV_CLOSED
;
2284 sk
->sk_state_change(sk
);
2291 * afiucv_netdev_event: handle netdev notifier chain events
2293 static int afiucv_netdev_event(struct notifier_block
*this,
2294 unsigned long event
, void *ptr
)
2296 struct net_device
*event_dev
= (struct net_device
*)ptr
;
2298 struct iucv_sock
*iucv
;
2302 case NETDEV_GOING_DOWN
:
2303 sk_for_each(sk
, &iucv_sk_list
.head
) {
2305 if ((iucv
->hs_dev
== event_dev
) &&
2306 (sk
->sk_state
== IUCV_CONNECTED
)) {
2307 if (event
== NETDEV_GOING_DOWN
)
2308 iucv_send_ctrl(sk
, AF_IUCV_FLAG_FIN
);
2309 sk
->sk_state
= IUCV_DISCONN
;
2310 sk
->sk_state_change(sk
);
2315 case NETDEV_UNREGISTER
:
2322 static struct notifier_block afiucv_netdev_notifier
= {
2323 .notifier_call
= afiucv_netdev_event
,
2326 static const struct proto_ops iucv_sock_ops
= {
2328 .owner
= THIS_MODULE
,
2329 .release
= iucv_sock_release
,
2330 .bind
= iucv_sock_bind
,
2331 .connect
= iucv_sock_connect
,
2332 .listen
= iucv_sock_listen
,
2333 .accept
= iucv_sock_accept
,
2334 .getname
= iucv_sock_getname
,
2335 .sendmsg
= iucv_sock_sendmsg
,
2336 .recvmsg
= iucv_sock_recvmsg
,
2337 .poll
= iucv_sock_poll
,
2338 .ioctl
= sock_no_ioctl
,
2339 .mmap
= sock_no_mmap
,
2340 .socketpair
= sock_no_socketpair
,
2341 .shutdown
= iucv_sock_shutdown
,
2342 .setsockopt
= iucv_sock_setsockopt
,
2343 .getsockopt
= iucv_sock_getsockopt
,
2346 static const struct net_proto_family iucv_sock_family_ops
= {
2348 .owner
= THIS_MODULE
,
2349 .create
= iucv_sock_create
,
2352 static struct packet_type iucv_packet_type
= {
2353 .type
= cpu_to_be16(ETH_P_AF_IUCV
),
2354 .func
= afiucv_hs_rcv
,
2357 static int afiucv_iucv_init(void)
2361 err
= pr_iucv
->iucv_register(&af_iucv_handler
, 0);
2364 /* establish dummy device */
2365 af_iucv_driver
.bus
= pr_iucv
->bus
;
2366 err
= driver_register(&af_iucv_driver
);
2369 af_iucv_dev
= kzalloc(sizeof(struct device
), GFP_KERNEL
);
2374 dev_set_name(af_iucv_dev
, "af_iucv");
2375 af_iucv_dev
->bus
= pr_iucv
->bus
;
2376 af_iucv_dev
->parent
= pr_iucv
->root
;
2377 af_iucv_dev
->release
= (void (*)(struct device
*))kfree
;
2378 af_iucv_dev
->driver
= &af_iucv_driver
;
2379 err
= device_register(af_iucv_dev
);
2385 driver_unregister(&af_iucv_driver
);
2387 pr_iucv
->iucv_unregister(&af_iucv_handler
, 0);
2392 static int __init
afiucv_init(void)
2396 if (MACHINE_IS_VM
) {
2397 cpcmd("QUERY USERID", iucv_userid
, sizeof(iucv_userid
), &err
);
2398 if (unlikely(err
)) {
2400 err
= -EPROTONOSUPPORT
;
2404 pr_iucv
= try_then_request_module(symbol_get(iucv_if
), "iucv");
2406 printk(KERN_WARNING
"iucv_if lookup failed\n");
2407 memset(&iucv_userid
, 0, sizeof(iucv_userid
));
2410 memset(&iucv_userid
, 0, sizeof(iucv_userid
));
2414 err
= proto_register(&iucv_proto
, 0);
2417 err
= sock_register(&iucv_sock_family_ops
);
2422 err
= afiucv_iucv_init();
2426 register_netdevice_notifier(&afiucv_netdev_notifier
);
2427 dev_add_pack(&iucv_packet_type
);
2431 sock_unregister(PF_IUCV
);
2433 proto_unregister(&iucv_proto
);
2436 symbol_put(iucv_if
);
2440 static void __exit
afiucv_exit(void)
2443 device_unregister(af_iucv_dev
);
2444 driver_unregister(&af_iucv_driver
);
2445 pr_iucv
->iucv_unregister(&af_iucv_handler
, 0);
2446 symbol_put(iucv_if
);
2448 unregister_netdevice_notifier(&afiucv_netdev_notifier
);
2449 dev_remove_pack(&iucv_packet_type
);
2450 sock_unregister(PF_IUCV
);
2451 proto_unregister(&iucv_proto
);
2454 module_init(afiucv_init
);
2455 module_exit(afiucv_exit
);
2457 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
2458 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION
);
2459 MODULE_VERSION(VERSION
);
2460 MODULE_LICENSE("GPL");
2461 MODULE_ALIAS_NETPROTO(PF_IUCV
);