47c5c8d3703f2ff6195805057ef4e65b551ba1a8
[pandora-kernel.git] / net / iucv / af_iucv.c
1 /*
2  *  linux/net/iucv/af_iucv.c
3  *
4  *  IUCV protocol stack for Linux on zSeries
5  *
6  *  Copyright 2006 IBM Corporation
7  *
8  *  Author(s):  Jennifer Hunt <jenhunt@us.ibm.com>
9  */
10
11 #define KMSG_COMPONENT "af_iucv"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/list.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/skbuff.h>
22 #include <linux/init.h>
23 #include <linux/poll.h>
24 #include <net/sock.h>
25 #include <asm/ebcdic.h>
26 #include <asm/cpcmd.h>
27 #include <linux/kmod.h>
28
29 #include <net/iucv/iucv.h>
30 #include <net/iucv/af_iucv.h>
31
32 #define VERSION "1.1"
33
34 static char iucv_userid[80];
35
36 static struct proto_ops iucv_sock_ops;
37
38 static struct proto iucv_proto = {
39         .name           = "AF_IUCV",
40         .owner          = THIS_MODULE,
41         .obj_size       = sizeof(struct iucv_sock),
42 };
43
44 /* special AF_IUCV IPRM messages */
45 static const u8 iprm_shutdown[8] =
46         {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
47
48 #define TRGCLS_SIZE     (sizeof(((struct iucv_message *)0)->class))
49
50 /* macros to set/get socket control buffer at correct offset */
51 #define CB_TAG(skb)     ((skb)->cb)             /* iucv message tag */
52 #define CB_TAG_LEN      (sizeof(((struct iucv_message *) 0)->tag))
53 #define CB_TRGCLS(skb)  ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
54 #define CB_TRGCLS_LEN   (TRGCLS_SIZE)
55
56
57 static void iucv_sock_kill(struct sock *sk);
58 static void iucv_sock_close(struct sock *sk);
59
60 /* Call Back functions */
61 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
62 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
63 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
64 static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
65                                  u8 ipuser[16]);
66 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
67 static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
68
69 static struct iucv_sock_list iucv_sk_list = {
70         .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
71         .autobind_name = ATOMIC_INIT(0)
72 };
73
74 static struct iucv_handler af_iucv_handler = {
75         .path_pending     = iucv_callback_connreq,
76         .path_complete    = iucv_callback_connack,
77         .path_severed     = iucv_callback_connrej,
78         .message_pending  = iucv_callback_rx,
79         .message_complete = iucv_callback_txdone,
80         .path_quiesced    = iucv_callback_shutdown,
81 };
82
83 static inline void high_nmcpy(unsigned char *dst, char *src)
84 {
85        memcpy(dst, src, 8);
86 }
87
88 static inline void low_nmcpy(unsigned char *dst, char *src)
89 {
90        memcpy(&dst[8], src, 8);
91 }
92
93 /**
94  * iucv_msg_length() - Returns the length of an iucv message.
95  * @msg:        Pointer to struct iucv_message, MUST NOT be NULL
96  *
97  * The function returns the length of the specified iucv message @msg of data
98  * stored in a buffer and of data stored in the parameter list (PRMDATA).
99  *
100  * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
101  * data:
102  *      PRMDATA[0..6]   socket data (max 7 bytes);
103  *      PRMDATA[7]      socket data length value (len is 0xff - PRMDATA[7])
104  *
105  * The socket data length is computed by substracting the socket data length
106  * value from 0xFF.
107  * If the socket data len is greater 7, then PRMDATA can be used for special
108  * notifications (see iucv_sock_shutdown); and further,
109  * if the socket data len is > 7, the function returns 8.
110  *
111  * Use this function to allocate socket buffers to store iucv message data.
112  */
113 static inline size_t iucv_msg_length(struct iucv_message *msg)
114 {
115         size_t datalen;
116
117         if (msg->flags & IUCV_IPRMDATA) {
118                 datalen = 0xff - msg->rmmsg[7];
119                 return (datalen < 8) ? datalen : 8;
120         }
121         return msg->length;
122 }
123
124 /* Timers */
125 static void iucv_sock_timeout(unsigned long arg)
126 {
127         struct sock *sk = (struct sock *)arg;
128
129         bh_lock_sock(sk);
130         sk->sk_err = ETIMEDOUT;
131         sk->sk_state_change(sk);
132         bh_unlock_sock(sk);
133
134         iucv_sock_kill(sk);
135         sock_put(sk);
136 }
137
138 static void iucv_sock_clear_timer(struct sock *sk)
139 {
140         sk_stop_timer(sk, &sk->sk_timer);
141 }
142
143 static struct sock *__iucv_get_sock_by_name(char *nm)
144 {
145         struct sock *sk;
146         struct hlist_node *node;
147
148         sk_for_each(sk, node, &iucv_sk_list.head)
149                 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
150                         return sk;
151
152         return NULL;
153 }
154
155 static void iucv_sock_destruct(struct sock *sk)
156 {
157         skb_queue_purge(&sk->sk_receive_queue);
158         skb_queue_purge(&sk->sk_write_queue);
159 }
160
161 /* Cleanup Listen */
162 static void iucv_sock_cleanup_listen(struct sock *parent)
163 {
164         struct sock *sk;
165
166         /* Close non-accepted connections */
167         while ((sk = iucv_accept_dequeue(parent, NULL))) {
168                 iucv_sock_close(sk);
169                 iucv_sock_kill(sk);
170         }
171
172         parent->sk_state = IUCV_CLOSED;
173         sock_set_flag(parent, SOCK_ZAPPED);
174 }
175
176 /* Kill socket */
177 static void iucv_sock_kill(struct sock *sk)
178 {
179         if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
180                 return;
181
182         iucv_sock_unlink(&iucv_sk_list, sk);
183         sock_set_flag(sk, SOCK_DEAD);
184         sock_put(sk);
185 }
186
187 /* Close an IUCV socket */
188 static void iucv_sock_close(struct sock *sk)
189 {
190         unsigned char user_data[16];
191         struct iucv_sock *iucv = iucv_sk(sk);
192         int err;
193         unsigned long timeo;
194
195         iucv_sock_clear_timer(sk);
196         lock_sock(sk);
197
198         switch (sk->sk_state) {
199         case IUCV_LISTEN:
200                 iucv_sock_cleanup_listen(sk);
201                 break;
202
203         case IUCV_CONNECTED:
204         case IUCV_DISCONN:
205                 err = 0;
206
207                 sk->sk_state = IUCV_CLOSING;
208                 sk->sk_state_change(sk);
209
210                 if (!skb_queue_empty(&iucv->send_skb_q)) {
211                         if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
212                                 timeo = sk->sk_lingertime;
213                         else
214                                 timeo = IUCV_DISCONN_TIMEOUT;
215                         err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
216                 }
217
218                 sk->sk_state = IUCV_CLOSED;
219                 sk->sk_state_change(sk);
220
221                 if (iucv->path) {
222                         low_nmcpy(user_data, iucv->src_name);
223                         high_nmcpy(user_data, iucv->dst_name);
224                         ASCEBC(user_data, sizeof(user_data));
225                         err = iucv_path_sever(iucv->path, user_data);
226                         iucv_path_free(iucv->path);
227                         iucv->path = NULL;
228                 }
229
230                 sk->sk_err = ECONNRESET;
231                 sk->sk_state_change(sk);
232
233                 skb_queue_purge(&iucv->send_skb_q);
234                 skb_queue_purge(&iucv->backlog_skb_q);
235
236                 sock_set_flag(sk, SOCK_ZAPPED);
237                 break;
238
239         default:
240                 sock_set_flag(sk, SOCK_ZAPPED);
241                 break;
242         }
243
244         release_sock(sk);
245         iucv_sock_kill(sk);
246 }
247
248 static void iucv_sock_init(struct sock *sk, struct sock *parent)
249 {
250         if (parent)
251                 sk->sk_type = parent->sk_type;
252 }
253
254 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
255 {
256         struct sock *sk;
257
258         sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
259         if (!sk)
260                 return NULL;
261
262         sock_init_data(sock, sk);
263         INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
264         spin_lock_init(&iucv_sk(sk)->accept_q_lock);
265         skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
266         INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list);
267         spin_lock_init(&iucv_sk(sk)->message_q.lock);
268         skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
269         iucv_sk(sk)->send_tag = 0;
270         iucv_sk(sk)->flags = 0;
271
272         sk->sk_destruct = iucv_sock_destruct;
273         sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
274         sk->sk_allocation = GFP_DMA;
275
276         sock_reset_flag(sk, SOCK_ZAPPED);
277
278         sk->sk_protocol = proto;
279         sk->sk_state    = IUCV_OPEN;
280
281         setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk);
282
283         iucv_sock_link(&iucv_sk_list, sk);
284         return sk;
285 }
286
287 /* Create an IUCV socket */
288 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol)
289 {
290         struct sock *sk;
291
292         if (sock->type != SOCK_STREAM)
293                 return -ESOCKTNOSUPPORT;
294
295         sock->state = SS_UNCONNECTED;
296         sock->ops = &iucv_sock_ops;
297
298         sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
299         if (!sk)
300                 return -ENOMEM;
301
302         iucv_sock_init(sk, NULL);
303
304         return 0;
305 }
306
307 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
308 {
309         write_lock_bh(&l->lock);
310         sk_add_node(sk, &l->head);
311         write_unlock_bh(&l->lock);
312 }
313
314 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
315 {
316         write_lock_bh(&l->lock);
317         sk_del_node_init(sk);
318         write_unlock_bh(&l->lock);
319 }
320
321 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
322 {
323         unsigned long flags;
324         struct iucv_sock *par = iucv_sk(parent);
325
326         sock_hold(sk);
327         spin_lock_irqsave(&par->accept_q_lock, flags);
328         list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
329         spin_unlock_irqrestore(&par->accept_q_lock, flags);
330         iucv_sk(sk)->parent = parent;
331         parent->sk_ack_backlog++;
332 }
333
334 void iucv_accept_unlink(struct sock *sk)
335 {
336         unsigned long flags;
337         struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
338
339         spin_lock_irqsave(&par->accept_q_lock, flags);
340         list_del_init(&iucv_sk(sk)->accept_q);
341         spin_unlock_irqrestore(&par->accept_q_lock, flags);
342         iucv_sk(sk)->parent->sk_ack_backlog--;
343         iucv_sk(sk)->parent = NULL;
344         sock_put(sk);
345 }
346
347 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
348 {
349         struct iucv_sock *isk, *n;
350         struct sock *sk;
351
352         list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
353                 sk = (struct sock *) isk;
354                 lock_sock(sk);
355
356                 if (sk->sk_state == IUCV_CLOSED) {
357                         iucv_accept_unlink(sk);
358                         release_sock(sk);
359                         continue;
360                 }
361
362                 if (sk->sk_state == IUCV_CONNECTED ||
363                     sk->sk_state == IUCV_SEVERED ||
364                     !newsock) {
365                         iucv_accept_unlink(sk);
366                         if (newsock)
367                                 sock_graft(sk, newsock);
368
369                         if (sk->sk_state == IUCV_SEVERED)
370                                 sk->sk_state = IUCV_DISCONN;
371
372                         release_sock(sk);
373                         return sk;
374                 }
375
376                 release_sock(sk);
377         }
378         return NULL;
379 }
380
381 int iucv_sock_wait_state(struct sock *sk, int state, int state2,
382                          unsigned long timeo)
383 {
384         DECLARE_WAITQUEUE(wait, current);
385         int err = 0;
386
387         add_wait_queue(sk->sk_sleep, &wait);
388         while (sk->sk_state != state && sk->sk_state != state2) {
389                 set_current_state(TASK_INTERRUPTIBLE);
390
391                 if (!timeo) {
392                         err = -EAGAIN;
393                         break;
394                 }
395
396                 if (signal_pending(current)) {
397                         err = sock_intr_errno(timeo);
398                         break;
399                 }
400
401                 release_sock(sk);
402                 timeo = schedule_timeout(timeo);
403                 lock_sock(sk);
404
405                 err = sock_error(sk);
406                 if (err)
407                         break;
408         }
409         set_current_state(TASK_RUNNING);
410         remove_wait_queue(sk->sk_sleep, &wait);
411         return err;
412 }
413
414 /* Bind an unbound socket */
415 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
416                           int addr_len)
417 {
418         struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
419         struct sock *sk = sock->sk;
420         struct iucv_sock *iucv;
421         int err;
422
423         /* Verify the input sockaddr */
424         if (!addr || addr->sa_family != AF_IUCV)
425                 return -EINVAL;
426
427         lock_sock(sk);
428         if (sk->sk_state != IUCV_OPEN) {
429                 err = -EBADFD;
430                 goto done;
431         }
432
433         write_lock_bh(&iucv_sk_list.lock);
434
435         iucv = iucv_sk(sk);
436         if (__iucv_get_sock_by_name(sa->siucv_name)) {
437                 err = -EADDRINUSE;
438                 goto done_unlock;
439         }
440         if (iucv->path) {
441                 err = 0;
442                 goto done_unlock;
443         }
444
445         /* Bind the socket */
446         memcpy(iucv->src_name, sa->siucv_name, 8);
447
448         /* Copy the user id */
449         memcpy(iucv->src_user_id, iucv_userid, 8);
450         sk->sk_state = IUCV_BOUND;
451         err = 0;
452
453 done_unlock:
454         /* Release the socket list lock */
455         write_unlock_bh(&iucv_sk_list.lock);
456 done:
457         release_sock(sk);
458         return err;
459 }
460
461 /* Automatically bind an unbound socket */
462 static int iucv_sock_autobind(struct sock *sk)
463 {
464         struct iucv_sock *iucv = iucv_sk(sk);
465         char query_buffer[80];
466         char name[12];
467         int err = 0;
468
469         /* Set the userid and name */
470         cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
471         if (unlikely(err))
472                 return -EPROTO;
473
474         memcpy(iucv->src_user_id, query_buffer, 8);
475
476         write_lock_bh(&iucv_sk_list.lock);
477
478         sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
479         while (__iucv_get_sock_by_name(name)) {
480                 sprintf(name, "%08x",
481                         atomic_inc_return(&iucv_sk_list.autobind_name));
482         }
483
484         write_unlock_bh(&iucv_sk_list.lock);
485
486         memcpy(&iucv->src_name, name, 8);
487
488         return err;
489 }
490
491 /* Connect an unconnected socket */
492 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
493                              int alen, int flags)
494 {
495         struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
496         struct sock *sk = sock->sk;
497         struct iucv_sock *iucv;
498         unsigned char user_data[16];
499         int err;
500
501         if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
502                 return -EINVAL;
503
504         if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
505                 return -EBADFD;
506
507         if (sk->sk_type != SOCK_STREAM)
508                 return -EINVAL;
509
510         iucv = iucv_sk(sk);
511
512         if (sk->sk_state == IUCV_OPEN) {
513                 err = iucv_sock_autobind(sk);
514                 if (unlikely(err))
515                         return err;
516         }
517
518         lock_sock(sk);
519
520         /* Set the destination information */
521         memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
522         memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
523
524         high_nmcpy(user_data, sa->siucv_name);
525         low_nmcpy(user_data, iucv_sk(sk)->src_name);
526         ASCEBC(user_data, sizeof(user_data));
527
528         iucv = iucv_sk(sk);
529         /* Create path. */
530         iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT,
531                                      IUCV_IPRMDATA, GFP_KERNEL);
532         if (!iucv->path) {
533                 err = -ENOMEM;
534                 goto done;
535         }
536         err = iucv_path_connect(iucv->path, &af_iucv_handler,
537                                 sa->siucv_user_id, NULL, user_data, sk);
538         if (err) {
539                 iucv_path_free(iucv->path);
540                 iucv->path = NULL;
541                 switch (err) {
542                 case 0x0b:      /* Target communicator is not logged on */
543                         err = -ENETUNREACH;
544                         break;
545                 case 0x0d:      /* Max connections for this guest exceeded */
546                 case 0x0e:      /* Max connections for target guest exceeded */
547                         err = -EAGAIN;
548                         break;
549                 case 0x0f:      /* Missing IUCV authorization */
550                         err = -EACCES;
551                         break;
552                 default:
553                         err = -ECONNREFUSED;
554                         break;
555                 }
556                 goto done;
557         }
558
559         if (sk->sk_state != IUCV_CONNECTED) {
560                 err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
561                                 sock_sndtimeo(sk, flags & O_NONBLOCK));
562         }
563
564         if (sk->sk_state == IUCV_DISCONN) {
565                 err = -ECONNREFUSED;
566         }
567
568         if (err) {
569                 iucv_path_sever(iucv->path, NULL);
570                 iucv_path_free(iucv->path);
571                 iucv->path = NULL;
572         }
573
574 done:
575         release_sock(sk);
576         return err;
577 }
578
579 /* Move a socket into listening state. */
580 static int iucv_sock_listen(struct socket *sock, int backlog)
581 {
582         struct sock *sk = sock->sk;
583         int err;
584
585         lock_sock(sk);
586
587         err = -EINVAL;
588         if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM)
589                 goto done;
590
591         sk->sk_max_ack_backlog = backlog;
592         sk->sk_ack_backlog = 0;
593         sk->sk_state = IUCV_LISTEN;
594         err = 0;
595
596 done:
597         release_sock(sk);
598         return err;
599 }
600
601 /* Accept a pending connection */
602 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
603                             int flags)
604 {
605         DECLARE_WAITQUEUE(wait, current);
606         struct sock *sk = sock->sk, *nsk;
607         long timeo;
608         int err = 0;
609
610         lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
611
612         if (sk->sk_state != IUCV_LISTEN) {
613                 err = -EBADFD;
614                 goto done;
615         }
616
617         timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
618
619         /* Wait for an incoming connection */
620         add_wait_queue_exclusive(sk->sk_sleep, &wait);
621         while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
622                 set_current_state(TASK_INTERRUPTIBLE);
623                 if (!timeo) {
624                         err = -EAGAIN;
625                         break;
626                 }
627
628                 release_sock(sk);
629                 timeo = schedule_timeout(timeo);
630                 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
631
632                 if (sk->sk_state != IUCV_LISTEN) {
633                         err = -EBADFD;
634                         break;
635                 }
636
637                 if (signal_pending(current)) {
638                         err = sock_intr_errno(timeo);
639                         break;
640                 }
641         }
642
643         set_current_state(TASK_RUNNING);
644         remove_wait_queue(sk->sk_sleep, &wait);
645
646         if (err)
647                 goto done;
648
649         newsock->state = SS_CONNECTED;
650
651 done:
652         release_sock(sk);
653         return err;
654 }
655
656 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
657                              int *len, int peer)
658 {
659         struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
660         struct sock *sk = sock->sk;
661
662         addr->sa_family = AF_IUCV;
663         *len = sizeof(struct sockaddr_iucv);
664
665         if (peer) {
666                 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
667                 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
668         } else {
669                 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
670                 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
671         }
672         memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
673         memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
674         memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
675
676         return 0;
677 }
678
679 /**
680  * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
681  * @path:       IUCV path
682  * @msg:        Pointer to a struct iucv_message
683  * @skb:        The socket data to send, skb->len MUST BE <= 7
684  *
685  * Send the socket data in the parameter list in the iucv message
686  * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
687  * list and the socket data len at index 7 (last byte).
688  * See also iucv_msg_length().
689  *
690  * Returns the error code from the iucv_message_send() call.
691  */
692 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
693                           struct sk_buff *skb)
694 {
695         u8 prmdata[8];
696
697         memcpy(prmdata, (void *) skb->data, skb->len);
698         prmdata[7] = 0xff - (u8) skb->len;
699         return iucv_message_send(path, msg, IUCV_IPRMDATA, 0,
700                                  (void *) prmdata, 8);
701 }
702
703 static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
704                              struct msghdr *msg, size_t len)
705 {
706         struct sock *sk = sock->sk;
707         struct iucv_sock *iucv = iucv_sk(sk);
708         struct sk_buff *skb;
709         struct iucv_message txmsg;
710         struct cmsghdr *cmsg;
711         int cmsg_done;
712         char user_id[9];
713         char appl_id[9];
714         int err;
715
716         err = sock_error(sk);
717         if (err)
718                 return err;
719
720         if (msg->msg_flags & MSG_OOB)
721                 return -EOPNOTSUPP;
722
723         lock_sock(sk);
724
725         if (sk->sk_shutdown & SEND_SHUTDOWN) {
726                 err = -EPIPE;
727                 goto out;
728         }
729
730         if (sk->sk_state == IUCV_CONNECTED) {
731                 /* initialize defaults */
732                 cmsg_done   = 0;        /* check for duplicate headers */
733                 txmsg.class = 0;
734
735                 /* iterate over control messages */
736                 for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
737                      cmsg = CMSG_NXTHDR(msg, cmsg)) {
738
739                         if (!CMSG_OK(msg, cmsg)) {
740                                 err = -EINVAL;
741                                 goto out;
742                         }
743
744                         if (cmsg->cmsg_level != SOL_IUCV)
745                                 continue;
746
747                         if (cmsg->cmsg_type & cmsg_done) {
748                                 err = -EINVAL;
749                                 goto out;
750                         }
751                         cmsg_done |= cmsg->cmsg_type;
752
753                         switch (cmsg->cmsg_type) {
754                         case SCM_IUCV_TRGCLS:
755                                 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
756                                         err = -EINVAL;
757                                         goto out;
758                                 }
759
760                                 /* set iucv message target class */
761                                 memcpy(&txmsg.class,
762                                         (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
763
764                                 break;
765
766                         default:
767                                 err = -EINVAL;
768                                 goto out;
769                                 break;
770                         }
771                 }
772
773                 if (!(skb = sock_alloc_send_skb(sk, len,
774                                                 msg->msg_flags & MSG_DONTWAIT,
775                                                 &err)))
776                         goto out;
777
778                 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
779                         err = -EFAULT;
780                         goto fail;
781                 }
782
783                 /* increment and save iucv message tag for msg_completion cbk */
784                 txmsg.tag = iucv->send_tag++;
785                 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
786                 skb_queue_tail(&iucv->send_skb_q, skb);
787
788                 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
789                     && skb->len <= 7) {
790                         err = iucv_send_iprm(iucv->path, &txmsg, skb);
791
792                         /* on success: there is no message_complete callback
793                          * for an IPRMDATA msg; remove skb from send queue */
794                         if (err == 0) {
795                                 skb_unlink(skb, &iucv->send_skb_q);
796                                 kfree_skb(skb);
797                         }
798
799                         /* this error should never happen since the
800                          * IUCV_IPRMDATA path flag is set... sever path */
801                         if (err == 0x15) {
802                                 iucv_path_sever(iucv->path, NULL);
803                                 skb_unlink(skb, &iucv->send_skb_q);
804                                 err = -EPIPE;
805                                 goto fail;
806                         }
807                 } else
808                         err = iucv_message_send(iucv->path, &txmsg, 0, 0,
809                                                 (void *) skb->data, skb->len);
810                 if (err) {
811                         if (err == 3) {
812                                 user_id[8] = 0;
813                                 memcpy(user_id, iucv->dst_user_id, 8);
814                                 appl_id[8] = 0;
815                                 memcpy(appl_id, iucv->dst_name, 8);
816                                 pr_err("Application %s on z/VM guest %s"
817                                        " exceeds message limit\n",
818                                        user_id, appl_id);
819                         }
820                         skb_unlink(skb, &iucv->send_skb_q);
821                         err = -EPIPE;
822                         goto fail;
823                 }
824
825         } else {
826                 err = -ENOTCONN;
827                 goto out;
828         }
829
830         release_sock(sk);
831         return len;
832
833 fail:
834         kfree_skb(skb);
835 out:
836         release_sock(sk);
837         return err;
838 }
839
840 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
841 {
842         int dataleft, size, copied = 0;
843         struct sk_buff *nskb;
844
845         dataleft = len;
846         while (dataleft) {
847                 if (dataleft >= sk->sk_rcvbuf / 4)
848                         size = sk->sk_rcvbuf / 4;
849                 else
850                         size = dataleft;
851
852                 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
853                 if (!nskb)
854                         return -ENOMEM;
855
856                 /* copy target class to control buffer of new skb */
857                 memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
858
859                 /* copy data fragment */
860                 memcpy(nskb->data, skb->data + copied, size);
861                 copied += size;
862                 dataleft -= size;
863
864                 skb_reset_transport_header(nskb);
865                 skb_reset_network_header(nskb);
866                 nskb->len = size;
867
868                 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
869         }
870
871         return 0;
872 }
873
874 static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
875                                  struct iucv_path *path,
876                                  struct iucv_message *msg)
877 {
878         int rc;
879         unsigned int len;
880
881         len = iucv_msg_length(msg);
882
883         /* store msg target class in the second 4 bytes of skb ctrl buffer */
884         /* Note: the first 4 bytes are reserved for msg tag */
885         memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
886
887         /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
888         if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
889                 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
890                         skb->data = NULL;
891                         skb->len = 0;
892                 }
893         } else {
894                 rc = iucv_message_receive(path, msg, msg->flags & IUCV_IPRMDATA,
895                                           skb->data, len, NULL);
896                 if (rc) {
897                         kfree_skb(skb);
898                         return;
899                 }
900                 if (skb->truesize >= sk->sk_rcvbuf / 4) {
901                         rc = iucv_fragment_skb(sk, skb, len);
902                         kfree_skb(skb);
903                         skb = NULL;
904                         if (rc) {
905                                 iucv_path_sever(path, NULL);
906                                 return;
907                         }
908                         skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
909                 } else {
910                         skb_reset_transport_header(skb);
911                         skb_reset_network_header(skb);
912                         skb->len = len;
913                 }
914         }
915
916         if (sock_queue_rcv_skb(sk, skb))
917                 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
918 }
919
920 static void iucv_process_message_q(struct sock *sk)
921 {
922         struct iucv_sock *iucv = iucv_sk(sk);
923         struct sk_buff *skb;
924         struct sock_msg_q *p, *n;
925
926         list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
927                 skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
928                 if (!skb)
929                         break;
930                 iucv_process_message(sk, skb, p->path, &p->msg);
931                 list_del(&p->list);
932                 kfree(p);
933                 if (!skb_queue_empty(&iucv->backlog_skb_q))
934                         break;
935         }
936 }
937
938 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
939                              struct msghdr *msg, size_t len, int flags)
940 {
941         int noblock = flags & MSG_DONTWAIT;
942         struct sock *sk = sock->sk;
943         struct iucv_sock *iucv = iucv_sk(sk);
944         int target, copied = 0;
945         struct sk_buff *skb, *rskb, *cskb;
946         int err = 0;
947
948         if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
949             skb_queue_empty(&iucv->backlog_skb_q) &&
950             skb_queue_empty(&sk->sk_receive_queue) &&
951             list_empty(&iucv->message_q.list))
952                 return 0;
953
954         if (flags & (MSG_OOB))
955                 return -EOPNOTSUPP;
956
957         target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
958
959         skb = skb_recv_datagram(sk, flags, noblock, &err);
960         if (!skb) {
961                 if (sk->sk_shutdown & RCV_SHUTDOWN)
962                         return 0;
963                 return err;
964         }
965
966         copied = min_t(unsigned int, skb->len, len);
967
968         cskb = skb;
969         if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
970                 skb_queue_head(&sk->sk_receive_queue, skb);
971                 if (copied == 0)
972                         return -EFAULT;
973                 goto done;
974         }
975
976         len -= copied;
977
978         /* create control message to store iucv msg target class:
979          * get the trgcls from the control buffer of the skb due to
980          * fragmentation of original iucv message. */
981         err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
982                         CB_TRGCLS_LEN, CB_TRGCLS(skb));
983         if (err) {
984                 if (!(flags & MSG_PEEK))
985                         skb_queue_head(&sk->sk_receive_queue, skb);
986                 return err;
987         }
988
989         /* Mark read part of skb as used */
990         if (!(flags & MSG_PEEK)) {
991                 skb_pull(skb, copied);
992
993                 if (skb->len) {
994                         skb_queue_head(&sk->sk_receive_queue, skb);
995                         goto done;
996                 }
997
998                 kfree_skb(skb);
999
1000                 /* Queue backlog skbs */
1001                 rskb = skb_dequeue(&iucv->backlog_skb_q);
1002                 while (rskb) {
1003                         if (sock_queue_rcv_skb(sk, rskb)) {
1004                                 skb_queue_head(&iucv->backlog_skb_q,
1005                                                 rskb);
1006                                 break;
1007                         } else {
1008                                 rskb = skb_dequeue(&iucv->backlog_skb_q);
1009                         }
1010                 }
1011                 if (skb_queue_empty(&iucv->backlog_skb_q)) {
1012                         spin_lock_bh(&iucv->message_q.lock);
1013                         if (!list_empty(&iucv->message_q.list))
1014                                 iucv_process_message_q(sk);
1015                         spin_unlock_bh(&iucv->message_q.lock);
1016                 }
1017
1018         } else
1019                 skb_queue_head(&sk->sk_receive_queue, skb);
1020
1021 done:
1022         return err ? : copied;
1023 }
1024
1025 static inline unsigned int iucv_accept_poll(struct sock *parent)
1026 {
1027         struct iucv_sock *isk, *n;
1028         struct sock *sk;
1029
1030         list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
1031                 sk = (struct sock *) isk;
1032
1033                 if (sk->sk_state == IUCV_CONNECTED)
1034                         return POLLIN | POLLRDNORM;
1035         }
1036
1037         return 0;
1038 }
1039
1040 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1041                             poll_table *wait)
1042 {
1043         struct sock *sk = sock->sk;
1044         unsigned int mask = 0;
1045
1046         poll_wait(file, sk->sk_sleep, wait);
1047
1048         if (sk->sk_state == IUCV_LISTEN)
1049                 return iucv_accept_poll(sk);
1050
1051         if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1052                 mask |= POLLERR;
1053
1054         if (sk->sk_shutdown & RCV_SHUTDOWN)
1055                 mask |= POLLRDHUP;
1056
1057         if (sk->sk_shutdown == SHUTDOWN_MASK)
1058                 mask |= POLLHUP;
1059
1060         if (!skb_queue_empty(&sk->sk_receive_queue) ||
1061             (sk->sk_shutdown & RCV_SHUTDOWN))
1062                 mask |= POLLIN | POLLRDNORM;
1063
1064         if (sk->sk_state == IUCV_CLOSED)
1065                 mask |= POLLHUP;
1066
1067         if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
1068                 mask |= POLLIN;
1069
1070         if (sock_writeable(sk))
1071                 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1072         else
1073                 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1074
1075         return mask;
1076 }
1077
1078 static int iucv_sock_shutdown(struct socket *sock, int how)
1079 {
1080         struct sock *sk = sock->sk;
1081         struct iucv_sock *iucv = iucv_sk(sk);
1082         struct iucv_message txmsg;
1083         int err = 0;
1084
1085         how++;
1086
1087         if ((how & ~SHUTDOWN_MASK) || !how)
1088                 return -EINVAL;
1089
1090         lock_sock(sk);
1091         switch (sk->sk_state) {
1092         case IUCV_CLOSED:
1093                 err = -ENOTCONN;
1094                 goto fail;
1095
1096         default:
1097                 sk->sk_shutdown |= how;
1098                 break;
1099         }
1100
1101         if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1102                 txmsg.class = 0;
1103                 txmsg.tag = 0;
1104                 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
1105                                         (void *) iprm_shutdown, 8);
1106                 if (err) {
1107                         switch (err) {
1108                         case 1:
1109                                 err = -ENOTCONN;
1110                                 break;
1111                         case 2:
1112                                 err = -ECONNRESET;
1113                                 break;
1114                         default:
1115                                 err = -ENOTCONN;
1116                                 break;
1117                         }
1118                 }
1119         }
1120
1121         if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1122                 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
1123                 if (err)
1124                         err = -ENOTCONN;
1125
1126                 skb_queue_purge(&sk->sk_receive_queue);
1127         }
1128
1129         /* Wake up anyone sleeping in poll */
1130         sk->sk_state_change(sk);
1131
1132 fail:
1133         release_sock(sk);
1134         return err;
1135 }
1136
1137 static int iucv_sock_release(struct socket *sock)
1138 {
1139         struct sock *sk = sock->sk;
1140         int err = 0;
1141
1142         if (!sk)
1143                 return 0;
1144
1145         iucv_sock_close(sk);
1146
1147         /* Unregister with IUCV base support */
1148         if (iucv_sk(sk)->path) {
1149                 iucv_path_sever(iucv_sk(sk)->path, NULL);
1150                 iucv_path_free(iucv_sk(sk)->path);
1151                 iucv_sk(sk)->path = NULL;
1152         }
1153
1154         sock_orphan(sk);
1155         iucv_sock_kill(sk);
1156         return err;
1157 }
1158
1159 /* getsockopt and setsockopt */
1160 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1161                                 char __user *optval, int optlen)
1162 {
1163         struct sock *sk = sock->sk;
1164         struct iucv_sock *iucv = iucv_sk(sk);
1165         int val;
1166         int rc;
1167
1168         if (level != SOL_IUCV)
1169                 return -ENOPROTOOPT;
1170
1171         if (optlen < sizeof(int))
1172                 return -EINVAL;
1173
1174         if (get_user(val, (int __user *) optval))
1175                 return -EFAULT;
1176
1177         rc = 0;
1178
1179         lock_sock(sk);
1180         switch (optname) {
1181         case SO_IPRMDATA_MSG:
1182                 if (val)
1183                         iucv->flags |= IUCV_IPRMDATA;
1184                 else
1185                         iucv->flags &= ~IUCV_IPRMDATA;
1186                 break;
1187         default:
1188                 rc = -ENOPROTOOPT;
1189                 break;
1190         }
1191         release_sock(sk);
1192
1193         return rc;
1194 }
1195
1196 static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1197                                 char __user *optval, int __user *optlen)
1198 {
1199         struct sock *sk = sock->sk;
1200         struct iucv_sock *iucv = iucv_sk(sk);
1201         int val, len;
1202
1203         if (level != SOL_IUCV)
1204                 return -ENOPROTOOPT;
1205
1206         if (get_user(len, optlen))
1207                 return -EFAULT;
1208
1209         if (len < 0)
1210                 return -EINVAL;
1211
1212         len = min_t(unsigned int, len, sizeof(int));
1213
1214         switch (optname) {
1215         case SO_IPRMDATA_MSG:
1216                 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1217                 break;
1218         default:
1219                 return -ENOPROTOOPT;
1220         }
1221
1222         if (put_user(len, optlen))
1223                 return -EFAULT;
1224         if (copy_to_user(optval, &val, len))
1225                 return -EFAULT;
1226
1227         return 0;
1228 }
1229
1230
1231 /* Callback wrappers - called from iucv base support */
1232 static int iucv_callback_connreq(struct iucv_path *path,
1233                                  u8 ipvmid[8], u8 ipuser[16])
1234 {
1235         unsigned char user_data[16];
1236         unsigned char nuser_data[16];
1237         unsigned char src_name[8];
1238         struct hlist_node *node;
1239         struct sock *sk, *nsk;
1240         struct iucv_sock *iucv, *niucv;
1241         int err;
1242
1243         memcpy(src_name, ipuser, 8);
1244         EBCASC(src_name, 8);
1245         /* Find out if this path belongs to af_iucv. */
1246         read_lock(&iucv_sk_list.lock);
1247         iucv = NULL;
1248         sk = NULL;
1249         sk_for_each(sk, node, &iucv_sk_list.head)
1250                 if (sk->sk_state == IUCV_LISTEN &&
1251                     !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1252                         /*
1253                          * Found a listening socket with
1254                          * src_name == ipuser[0-7].
1255                          */
1256                         iucv = iucv_sk(sk);
1257                         break;
1258                 }
1259         read_unlock(&iucv_sk_list.lock);
1260         if (!iucv)
1261                 /* No socket found, not one of our paths. */
1262                 return -EINVAL;
1263
1264         bh_lock_sock(sk);
1265
1266         /* Check if parent socket is listening */
1267         low_nmcpy(user_data, iucv->src_name);
1268         high_nmcpy(user_data, iucv->dst_name);
1269         ASCEBC(user_data, sizeof(user_data));
1270         if (sk->sk_state != IUCV_LISTEN) {
1271                 err = iucv_path_sever(path, user_data);
1272                 iucv_path_free(path);
1273                 goto fail;
1274         }
1275
1276         /* Check for backlog size */
1277         if (sk_acceptq_is_full(sk)) {
1278                 err = iucv_path_sever(path, user_data);
1279                 iucv_path_free(path);
1280                 goto fail;
1281         }
1282
1283         /* Create the new socket */
1284         nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
1285         if (!nsk) {
1286                 err = iucv_path_sever(path, user_data);
1287                 iucv_path_free(path);
1288                 goto fail;
1289         }
1290
1291         niucv = iucv_sk(nsk);
1292         iucv_sock_init(nsk, sk);
1293
1294         /* Set the new iucv_sock */
1295         memcpy(niucv->dst_name, ipuser + 8, 8);
1296         EBCASC(niucv->dst_name, 8);
1297         memcpy(niucv->dst_user_id, ipvmid, 8);
1298         memcpy(niucv->src_name, iucv->src_name, 8);
1299         memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1300         niucv->path = path;
1301
1302         /* Call iucv_accept */
1303         high_nmcpy(nuser_data, ipuser + 8);
1304         memcpy(nuser_data + 8, niucv->src_name, 8);
1305         ASCEBC(nuser_data + 8, 8);
1306
1307         path->msglim = IUCV_QUEUELEN_DEFAULT;
1308         err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
1309         if (err) {
1310                 err = iucv_path_sever(path, user_data);
1311                 iucv_path_free(path);
1312                 iucv_sock_kill(nsk);
1313                 goto fail;
1314         }
1315
1316         iucv_accept_enqueue(sk, nsk);
1317
1318         /* Wake up accept */
1319         nsk->sk_state = IUCV_CONNECTED;
1320         sk->sk_data_ready(sk, 1);
1321         err = 0;
1322 fail:
1323         bh_unlock_sock(sk);
1324         return 0;
1325 }
1326
1327 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1328 {
1329         struct sock *sk = path->private;
1330
1331         sk->sk_state = IUCV_CONNECTED;
1332         sk->sk_state_change(sk);
1333 }
1334
1335 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1336 {
1337         struct sock *sk = path->private;
1338         struct iucv_sock *iucv = iucv_sk(sk);
1339         struct sk_buff *skb;
1340         struct sock_msg_q *save_msg;
1341         int len;
1342
1343         if (sk->sk_shutdown & RCV_SHUTDOWN)
1344                 return;
1345
1346         if (!list_empty(&iucv->message_q.list) ||
1347             !skb_queue_empty(&iucv->backlog_skb_q))
1348                 goto save_message;
1349
1350         len = atomic_read(&sk->sk_rmem_alloc);
1351         len += iucv_msg_length(msg) + sizeof(struct sk_buff);
1352         if (len > sk->sk_rcvbuf)
1353                 goto save_message;
1354
1355         skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
1356         if (!skb)
1357                 goto save_message;
1358
1359         spin_lock(&iucv->message_q.lock);
1360         iucv_process_message(sk, skb, path, msg);
1361         spin_unlock(&iucv->message_q.lock);
1362
1363         return;
1364
1365 save_message:
1366         save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1367         if (!save_msg)
1368                 return;
1369         save_msg->path = path;
1370         save_msg->msg = *msg;
1371
1372         spin_lock(&iucv->message_q.lock);
1373         list_add_tail(&save_msg->list, &iucv->message_q.list);
1374         spin_unlock(&iucv->message_q.lock);
1375 }
1376
1377 static void iucv_callback_txdone(struct iucv_path *path,
1378                                  struct iucv_message *msg)
1379 {
1380         struct sock *sk = path->private;
1381         struct sk_buff *this = NULL;
1382         struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1383         struct sk_buff *list_skb = list->next;
1384         unsigned long flags;
1385
1386         if (!skb_queue_empty(list)) {
1387                 spin_lock_irqsave(&list->lock, flags);
1388
1389                 while (list_skb != (struct sk_buff *)list) {
1390                         if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
1391                                 this = list_skb;
1392                                 break;
1393                         }
1394                         list_skb = list_skb->next;
1395                 }
1396                 if (this)
1397                         __skb_unlink(this, list);
1398
1399                 spin_unlock_irqrestore(&list->lock, flags);
1400
1401                 kfree_skb(this);
1402         }
1403         BUG_ON(!this);
1404
1405         if (sk->sk_state == IUCV_CLOSING) {
1406                 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1407                         sk->sk_state = IUCV_CLOSED;
1408                         sk->sk_state_change(sk);
1409                 }
1410         }
1411
1412 }
1413
1414 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1415 {
1416         struct sock *sk = path->private;
1417
1418         if (!list_empty(&iucv_sk(sk)->accept_q))
1419                 sk->sk_state = IUCV_SEVERED;
1420         else
1421                 sk->sk_state = IUCV_DISCONN;
1422
1423         sk->sk_state_change(sk);
1424 }
1425
1426 /* called if the other communication side shuts down its RECV direction;
1427  * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1428  */
1429 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1430 {
1431         struct sock *sk = path->private;
1432
1433         bh_lock_sock(sk);
1434         if (sk->sk_state != IUCV_CLOSED) {
1435                 sk->sk_shutdown |= SEND_SHUTDOWN;
1436                 sk->sk_state_change(sk);
1437         }
1438         bh_unlock_sock(sk);
1439 }
1440
1441 static struct proto_ops iucv_sock_ops = {
1442         .family         = PF_IUCV,
1443         .owner          = THIS_MODULE,
1444         .release        = iucv_sock_release,
1445         .bind           = iucv_sock_bind,
1446         .connect        = iucv_sock_connect,
1447         .listen         = iucv_sock_listen,
1448         .accept         = iucv_sock_accept,
1449         .getname        = iucv_sock_getname,
1450         .sendmsg        = iucv_sock_sendmsg,
1451         .recvmsg        = iucv_sock_recvmsg,
1452         .poll           = iucv_sock_poll,
1453         .ioctl          = sock_no_ioctl,
1454         .mmap           = sock_no_mmap,
1455         .socketpair     = sock_no_socketpair,
1456         .shutdown       = iucv_sock_shutdown,
1457         .setsockopt     = iucv_sock_setsockopt,
1458         .getsockopt     = iucv_sock_getsockopt,
1459 };
1460
1461 static struct net_proto_family iucv_sock_family_ops = {
1462         .family = AF_IUCV,
1463         .owner  = THIS_MODULE,
1464         .create = iucv_sock_create,
1465 };
1466
1467 static int __init afiucv_init(void)
1468 {
1469         int err;
1470
1471         if (!MACHINE_IS_VM) {
1472                 pr_err("The af_iucv module cannot be loaded"
1473                        " without z/VM\n");
1474                 err = -EPROTONOSUPPORT;
1475                 goto out;
1476         }
1477         cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1478         if (unlikely(err)) {
1479                 WARN_ON(err);
1480                 err = -EPROTONOSUPPORT;
1481                 goto out;
1482         }
1483
1484         err = iucv_register(&af_iucv_handler, 0);
1485         if (err)
1486                 goto out;
1487         err = proto_register(&iucv_proto, 0);
1488         if (err)
1489                 goto out_iucv;
1490         err = sock_register(&iucv_sock_family_ops);
1491         if (err)
1492                 goto out_proto;
1493         return 0;
1494
1495 out_proto:
1496         proto_unregister(&iucv_proto);
1497 out_iucv:
1498         iucv_unregister(&af_iucv_handler, 0);
1499 out:
1500         return err;
1501 }
1502
1503 static void __exit afiucv_exit(void)
1504 {
1505         sock_unregister(PF_IUCV);
1506         proto_unregister(&iucv_proto);
1507         iucv_unregister(&af_iucv_handler, 0);
1508 }
1509
1510 module_init(afiucv_init);
1511 module_exit(afiucv_exit);
1512
1513 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1514 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1515 MODULE_VERSION(VERSION);
1516 MODULE_LICENSE("GPL");
1517 MODULE_ALIAS_NETPROTO(PF_IUCV);