Merge branch 'x86-kbuild-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / drivers / s390 / net / netiucv.c
1 /*
2  * IUCV network driver
3  *
4  * Copyright IBM Corp. 2001, 2009
5  *
6  * Author(s):
7  *      Original netiucv driver:
8  *              Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
9  *      Sysfs integration and all bugs therein:
10  *              Cornelia Huck (cornelia.huck@de.ibm.com)
11  *      PM functions:
12  *              Ursula Braun (ursula.braun@de.ibm.com)
13  *
14  * Documentation used:
15  *  the source of the original IUCV driver by:
16  *    Stefan Hegewald <hegewald@de.ibm.com>
17  *    Hartmut Penner <hpenner@de.ibm.com>
18  *    Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
19  *    Martin Schwidefsky (schwidefsky@de.ibm.com)
20  *    Alan Altmark (Alan_Altmark@us.ibm.com)  Sept. 2000
21  *
22  * This program is free software; you can redistribute it and/or modify
23  * it under the terms of the GNU General Public License as published by
24  * the Free Software Foundation; either version 2, or (at your option)
25  * any later version.
26  *
27  * This program is distributed in the hope that it will be useful,
28  * but WITHOUT ANY WARRANTY; without even the implied warranty of
29  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
30  * GNU General Public License for more details.
31  *
32  * You should have received a copy of the GNU General Public License
33  * along with this program; if not, write to the Free Software
34  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35  *
36  */
37
38 #define KMSG_COMPONENT "netiucv"
39 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
40
41 #undef DEBUG
42
43 #include <linux/module.h>
44 #include <linux/init.h>
45 #include <linux/kernel.h>
46 #include <linux/slab.h>
47 #include <linux/errno.h>
48 #include <linux/types.h>
49 #include <linux/interrupt.h>
50 #include <linux/timer.h>
51 #include <linux/bitops.h>
52
53 #include <linux/signal.h>
54 #include <linux/string.h>
55 #include <linux/device.h>
56
57 #include <linux/ip.h>
58 #include <linux/if_arp.h>
59 #include <linux/tcp.h>
60 #include <linux/skbuff.h>
61 #include <linux/ctype.h>
62 #include <net/dst.h>
63
64 #include <asm/io.h>
65 #include <asm/uaccess.h>
66
67 #include <net/iucv/iucv.h>
68 #include "fsm.h"
69
70 MODULE_AUTHOR
71     ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
72 MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
73
74 /**
75  * Debug Facility stuff
76  */
77 #define IUCV_DBF_SETUP_NAME "iucv_setup"
78 #define IUCV_DBF_SETUP_LEN 32
79 #define IUCV_DBF_SETUP_PAGES 2
80 #define IUCV_DBF_SETUP_NR_AREAS 1
81 #define IUCV_DBF_SETUP_LEVEL 3
82
83 #define IUCV_DBF_DATA_NAME "iucv_data"
84 #define IUCV_DBF_DATA_LEN 128
85 #define IUCV_DBF_DATA_PAGES 2
86 #define IUCV_DBF_DATA_NR_AREAS 1
87 #define IUCV_DBF_DATA_LEVEL 2
88
89 #define IUCV_DBF_TRACE_NAME "iucv_trace"
90 #define IUCV_DBF_TRACE_LEN 16
91 #define IUCV_DBF_TRACE_PAGES 4
92 #define IUCV_DBF_TRACE_NR_AREAS 1
93 #define IUCV_DBF_TRACE_LEVEL 3
94
95 #define IUCV_DBF_TEXT(name,level,text) \
96         do { \
97                 debug_text_event(iucv_dbf_##name,level,text); \
98         } while (0)
99
100 #define IUCV_DBF_HEX(name,level,addr,len) \
101         do { \
102                 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
103         } while (0)
104
105 DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
106
107 /* Allow to sort out low debug levels early to avoid wasted sprints */
108 static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
109 {
110         return (level <= dbf_grp->level);
111 }
112
113 #define IUCV_DBF_TEXT_(name, level, text...) \
114         do { \
115                 if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
116                         char* iucv_dbf_txt_buf = \
117                                         get_cpu_var(iucv_dbf_txt_buf); \
118                         sprintf(iucv_dbf_txt_buf, text); \
119                         debug_text_event(iucv_dbf_##name, level, \
120                                                 iucv_dbf_txt_buf); \
121                         put_cpu_var(iucv_dbf_txt_buf); \
122                 } \
123         } while (0)
124
125 #define IUCV_DBF_SPRINTF(name,level,text...) \
126         do { \
127                 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
128                 debug_sprintf_event(iucv_dbf_trace, level, text ); \
129         } while (0)
130
131 /**
132  * some more debug stuff
133  */
134 #define IUCV_HEXDUMP16(importance,header,ptr) \
135 PRINT_##importance(header "%02x %02x %02x %02x  %02x %02x %02x %02x  " \
136                    "%02x %02x %02x %02x  %02x %02x %02x %02x\n", \
137                    *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
138                    *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
139                    *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
140                    *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
141                    *(((char*)ptr)+12),*(((char*)ptr)+13), \
142                    *(((char*)ptr)+14),*(((char*)ptr)+15)); \
143 PRINT_##importance(header "%02x %02x %02x %02x  %02x %02x %02x %02x  " \
144                    "%02x %02x %02x %02x  %02x %02x %02x %02x\n", \
145                    *(((char*)ptr)+16),*(((char*)ptr)+17), \
146                    *(((char*)ptr)+18),*(((char*)ptr)+19), \
147                    *(((char*)ptr)+20),*(((char*)ptr)+21), \
148                    *(((char*)ptr)+22),*(((char*)ptr)+23), \
149                    *(((char*)ptr)+24),*(((char*)ptr)+25), \
150                    *(((char*)ptr)+26),*(((char*)ptr)+27), \
151                    *(((char*)ptr)+28),*(((char*)ptr)+29), \
152                    *(((char*)ptr)+30),*(((char*)ptr)+31));
153
154 #define PRINTK_HEADER " iucv: "       /* for debugging */
155
156 /* dummy device to make sure netiucv_pm functions are called */
157 static struct device *netiucv_dev;
158
159 static int netiucv_pm_prepare(struct device *);
160 static void netiucv_pm_complete(struct device *);
161 static int netiucv_pm_freeze(struct device *);
162 static int netiucv_pm_restore_thaw(struct device *);
163
164 static struct dev_pm_ops netiucv_pm_ops = {
165         .prepare = netiucv_pm_prepare,
166         .complete = netiucv_pm_complete,
167         .freeze = netiucv_pm_freeze,
168         .thaw = netiucv_pm_restore_thaw,
169         .restore = netiucv_pm_restore_thaw,
170 };
171
172 static struct device_driver netiucv_driver = {
173         .owner = THIS_MODULE,
174         .name = "netiucv",
175         .bus  = &iucv_bus,
176         .pm = &netiucv_pm_ops,
177 };
178
179 static int netiucv_callback_connreq(struct iucv_path *,
180                                     u8 ipvmid[8], u8 ipuser[16]);
181 static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
182 static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
183 static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]);
184 static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]);
185 static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
186 static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
187
188 static struct iucv_handler netiucv_handler = {
189         .path_pending     = netiucv_callback_connreq,
190         .path_complete    = netiucv_callback_connack,
191         .path_severed     = netiucv_callback_connrej,
192         .path_quiesced    = netiucv_callback_connsusp,
193         .path_resumed     = netiucv_callback_connres,
194         .message_pending  = netiucv_callback_rx,
195         .message_complete = netiucv_callback_txdone
196 };
197
198 /**
199  * Per connection profiling data
200  */
201 struct connection_profile {
202         unsigned long maxmulti;
203         unsigned long maxcqueue;
204         unsigned long doios_single;
205         unsigned long doios_multi;
206         unsigned long txlen;
207         unsigned long tx_time;
208         struct timespec send_stamp;
209         unsigned long tx_pending;
210         unsigned long tx_max_pending;
211 };
212
213 /**
214  * Representation of one iucv connection
215  */
216 struct iucv_connection {
217         struct list_head          list;
218         struct iucv_path          *path;
219         struct sk_buff            *rx_buff;
220         struct sk_buff            *tx_buff;
221         struct sk_buff_head       collect_queue;
222         struct sk_buff_head       commit_queue;
223         spinlock_t                collect_lock;
224         int                       collect_len;
225         int                       max_buffsize;
226         fsm_timer                 timer;
227         fsm_instance              *fsm;
228         struct net_device         *netdev;
229         struct connection_profile prof;
230         char                      userid[9];
231 };
232
233 /**
234  * Linked list of all connection structs.
235  */
236 static LIST_HEAD(iucv_connection_list);
237 static DEFINE_RWLOCK(iucv_connection_rwlock);
238
239 /**
240  * Representation of event-data for the
241  * connection state machine.
242  */
243 struct iucv_event {
244         struct iucv_connection *conn;
245         void                   *data;
246 };
247
248 /**
249  * Private part of the network device structure
250  */
251 struct netiucv_priv {
252         struct net_device_stats stats;
253         unsigned long           tbusy;
254         fsm_instance            *fsm;
255         struct iucv_connection  *conn;
256         struct device           *dev;
257         int                      pm_state;
258 };
259
260 /**
261  * Link level header for a packet.
262  */
263 struct ll_header {
264         u16 next;
265 };
266
267 #define NETIUCV_HDRLEN           (sizeof(struct ll_header))
268 #define NETIUCV_BUFSIZE_MAX      32768
269 #define NETIUCV_BUFSIZE_DEFAULT  NETIUCV_BUFSIZE_MAX
270 #define NETIUCV_MTU_MAX          (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
271 #define NETIUCV_MTU_DEFAULT      9216
272 #define NETIUCV_QUEUELEN_DEFAULT 50
273 #define NETIUCV_TIMEOUT_5SEC     5000
274
275 /**
276  * Compatibility macros for busy handling
277  * of network devices.
278  */
279 static inline void netiucv_clear_busy(struct net_device *dev)
280 {
281         struct netiucv_priv *priv = netdev_priv(dev);
282         clear_bit(0, &priv->tbusy);
283         netif_wake_queue(dev);
284 }
285
286 static inline int netiucv_test_and_set_busy(struct net_device *dev)
287 {
288         struct netiucv_priv *priv = netdev_priv(dev);
289         netif_stop_queue(dev);
290         return test_and_set_bit(0, &priv->tbusy);
291 }
292
293 static u8 iucvMagic[16] = {
294         0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
295         0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
296 };
297
298 /**
299  * Convert an iucv userId to its printable
300  * form (strip whitespace at end).
301  *
302  * @param An iucv userId
303  *
304  * @returns The printable string (static data!!)
305  */
306 static char *netiucv_printname(char *name)
307 {
308         static char tmp[9];
309         char *p = tmp;
310         memcpy(tmp, name, 8);
311         tmp[8] = '\0';
312         while (*p && (!isspace(*p)))
313                 p++;
314         *p = '\0';
315         return tmp;
316 }
317
318 /**
319  * States of the interface statemachine.
320  */
321 enum dev_states {
322         DEV_STATE_STOPPED,
323         DEV_STATE_STARTWAIT,
324         DEV_STATE_STOPWAIT,
325         DEV_STATE_RUNNING,
326         /**
327          * MUST be always the last element!!
328          */
329         NR_DEV_STATES
330 };
331
332 static const char *dev_state_names[] = {
333         "Stopped",
334         "StartWait",
335         "StopWait",
336         "Running",
337 };
338
339 /**
340  * Events of the interface statemachine.
341  */
342 enum dev_events {
343         DEV_EVENT_START,
344         DEV_EVENT_STOP,
345         DEV_EVENT_CONUP,
346         DEV_EVENT_CONDOWN,
347         /**
348          * MUST be always the last element!!
349          */
350         NR_DEV_EVENTS
351 };
352
353 static const char *dev_event_names[] = {
354         "Start",
355         "Stop",
356         "Connection up",
357         "Connection down",
358 };
359
360 /**
361  * Events of the connection statemachine
362  */
363 enum conn_events {
364         /**
365          * Events, representing callbacks from
366          * lowlevel iucv layer)
367          */
368         CONN_EVENT_CONN_REQ,
369         CONN_EVENT_CONN_ACK,
370         CONN_EVENT_CONN_REJ,
371         CONN_EVENT_CONN_SUS,
372         CONN_EVENT_CONN_RES,
373         CONN_EVENT_RX,
374         CONN_EVENT_TXDONE,
375
376         /**
377          * Events, representing errors return codes from
378          * calls to lowlevel iucv layer
379          */
380
381         /**
382          * Event, representing timer expiry.
383          */
384         CONN_EVENT_TIMER,
385
386         /**
387          * Events, representing commands from upper levels.
388          */
389         CONN_EVENT_START,
390         CONN_EVENT_STOP,
391
392         /**
393          * MUST be always the last element!!
394          */
395         NR_CONN_EVENTS,
396 };
397
398 static const char *conn_event_names[] = {
399         "Remote connection request",
400         "Remote connection acknowledge",
401         "Remote connection reject",
402         "Connection suspended",
403         "Connection resumed",
404         "Data received",
405         "Data sent",
406
407         "Timer",
408
409         "Start",
410         "Stop",
411 };
412
413 /**
414  * States of the connection statemachine.
415  */
416 enum conn_states {
417         /**
418          * Connection not assigned to any device,
419          * initial state, invalid
420          */
421         CONN_STATE_INVALID,
422
423         /**
424          * Userid assigned but not operating
425          */
426         CONN_STATE_STOPPED,
427
428         /**
429          * Connection registered,
430          * no connection request sent yet,
431          * no connection request received
432          */
433         CONN_STATE_STARTWAIT,
434
435         /**
436          * Connection registered and connection request sent,
437          * no acknowledge and no connection request received yet.
438          */
439         CONN_STATE_SETUPWAIT,
440
441         /**
442          * Connection up and running idle
443          */
444         CONN_STATE_IDLE,
445
446         /**
447          * Data sent, awaiting CONN_EVENT_TXDONE
448          */
449         CONN_STATE_TX,
450
451         /**
452          * Error during registration.
453          */
454         CONN_STATE_REGERR,
455
456         /**
457          * Error during registration.
458          */
459         CONN_STATE_CONNERR,
460
461         /**
462          * MUST be always the last element!!
463          */
464         NR_CONN_STATES,
465 };
466
467 static const char *conn_state_names[] = {
468         "Invalid",
469         "Stopped",
470         "StartWait",
471         "SetupWait",
472         "Idle",
473         "TX",
474         "Terminating",
475         "Registration error",
476         "Connect error",
477 };
478
479
480 /**
481  * Debug Facility Stuff
482  */
483 static debug_info_t *iucv_dbf_setup = NULL;
484 static debug_info_t *iucv_dbf_data = NULL;
485 static debug_info_t *iucv_dbf_trace = NULL;
486
487 DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
488
489 static void iucv_unregister_dbf_views(void)
490 {
491         if (iucv_dbf_setup)
492                 debug_unregister(iucv_dbf_setup);
493         if (iucv_dbf_data)
494                 debug_unregister(iucv_dbf_data);
495         if (iucv_dbf_trace)
496                 debug_unregister(iucv_dbf_trace);
497 }
498 static int iucv_register_dbf_views(void)
499 {
500         iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
501                                         IUCV_DBF_SETUP_PAGES,
502                                         IUCV_DBF_SETUP_NR_AREAS,
503                                         IUCV_DBF_SETUP_LEN);
504         iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
505                                        IUCV_DBF_DATA_PAGES,
506                                        IUCV_DBF_DATA_NR_AREAS,
507                                        IUCV_DBF_DATA_LEN);
508         iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
509                                         IUCV_DBF_TRACE_PAGES,
510                                         IUCV_DBF_TRACE_NR_AREAS,
511                                         IUCV_DBF_TRACE_LEN);
512
513         if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
514             (iucv_dbf_trace == NULL)) {
515                 iucv_unregister_dbf_views();
516                 return -ENOMEM;
517         }
518         debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
519         debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
520
521         debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
522         debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
523
524         debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
525         debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
526
527         return 0;
528 }
529
530 /*
531  * Callback-wrappers, called from lowlevel iucv layer.
532  */
533
534 static void netiucv_callback_rx(struct iucv_path *path,
535                                 struct iucv_message *msg)
536 {
537         struct iucv_connection *conn = path->private;
538         struct iucv_event ev;
539
540         ev.conn = conn;
541         ev.data = msg;
542         fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
543 }
544
545 static void netiucv_callback_txdone(struct iucv_path *path,
546                                     struct iucv_message *msg)
547 {
548         struct iucv_connection *conn = path->private;
549         struct iucv_event ev;
550
551         ev.conn = conn;
552         ev.data = msg;
553         fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
554 }
555
556 static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
557 {
558         struct iucv_connection *conn = path->private;
559
560         fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
561 }
562
563 static int netiucv_callback_connreq(struct iucv_path *path,
564                                     u8 ipvmid[8], u8 ipuser[16])
565 {
566         struct iucv_connection *conn = path->private;
567         struct iucv_event ev;
568         int rc;
569
570         if (memcmp(iucvMagic, ipuser, sizeof(ipuser)))
571                 /* ipuser must match iucvMagic. */
572                 return -EINVAL;
573         rc = -EINVAL;
574         read_lock_bh(&iucv_connection_rwlock);
575         list_for_each_entry(conn, &iucv_connection_list, list) {
576                 if (strncmp(ipvmid, conn->userid, 8))
577                         continue;
578                 /* Found a matching connection for this path. */
579                 conn->path = path;
580                 ev.conn = conn;
581                 ev.data = path;
582                 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
583                 rc = 0;
584         }
585         read_unlock_bh(&iucv_connection_rwlock);
586         return rc;
587 }
588
589 static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
590 {
591         struct iucv_connection *conn = path->private;
592
593         fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
594 }
595
596 static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16])
597 {
598         struct iucv_connection *conn = path->private;
599
600         fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
601 }
602
603 static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
604 {
605         struct iucv_connection *conn = path->private;
606
607         fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
608 }
609
610 /**
611  * NOP action for statemachines
612  */
613 static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
614 {
615 }
616
617 /*
618  * Actions of the connection statemachine
619  */
620
621 /**
622  * netiucv_unpack_skb
623  * @conn: The connection where this skb has been received.
624  * @pskb: The received skb.
625  *
626  * Unpack a just received skb and hand it over to upper layers.
627  * Helper function for conn_action_rx.
628  */
629 static void netiucv_unpack_skb(struct iucv_connection *conn,
630                                struct sk_buff *pskb)
631 {
632         struct net_device     *dev = conn->netdev;
633         struct netiucv_priv   *privptr = netdev_priv(dev);
634         u16 offset = 0;
635
636         skb_put(pskb, NETIUCV_HDRLEN);
637         pskb->dev = dev;
638         pskb->ip_summed = CHECKSUM_NONE;
639         pskb->protocol = ntohs(ETH_P_IP);
640
641         while (1) {
642                 struct sk_buff *skb;
643                 struct ll_header *header = (struct ll_header *) pskb->data;
644
645                 if (!header->next)
646                         break;
647
648                 skb_pull(pskb, NETIUCV_HDRLEN);
649                 header->next -= offset;
650                 offset += header->next;
651                 header->next -= NETIUCV_HDRLEN;
652                 if (skb_tailroom(pskb) < header->next) {
653                         IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
654                                 header->next, skb_tailroom(pskb));
655                         return;
656                 }
657                 skb_put(pskb, header->next);
658                 skb_reset_mac_header(pskb);
659                 skb = dev_alloc_skb(pskb->len);
660                 if (!skb) {
661                         IUCV_DBF_TEXT(data, 2,
662                                 "Out of memory in netiucv_unpack_skb\n");
663                         privptr->stats.rx_dropped++;
664                         return;
665                 }
666                 skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
667                                           pskb->len);
668                 skb_reset_mac_header(skb);
669                 skb->dev = pskb->dev;
670                 skb->protocol = pskb->protocol;
671                 pskb->ip_summed = CHECKSUM_UNNECESSARY;
672                 privptr->stats.rx_packets++;
673                 privptr->stats.rx_bytes += skb->len;
674                 /*
675                  * Since receiving is always initiated from a tasklet (in iucv.c),
676                  * we must use netif_rx_ni() instead of netif_rx()
677                  */
678                 netif_rx_ni(skb);
679                 dev->last_rx = jiffies;
680                 skb_pull(pskb, header->next);
681                 skb_put(pskb, NETIUCV_HDRLEN);
682         }
683 }
684
685 static void conn_action_rx(fsm_instance *fi, int event, void *arg)
686 {
687         struct iucv_event *ev = arg;
688         struct iucv_connection *conn = ev->conn;
689         struct iucv_message *msg = ev->data;
690         struct netiucv_priv *privptr = netdev_priv(conn->netdev);
691         int rc;
692
693         IUCV_DBF_TEXT(trace, 4, __func__);
694
695         if (!conn->netdev) {
696                 iucv_message_reject(conn->path, msg);
697                 IUCV_DBF_TEXT(data, 2,
698                               "Received data for unlinked connection\n");
699                 return;
700         }
701         if (msg->length > conn->max_buffsize) {
702                 iucv_message_reject(conn->path, msg);
703                 privptr->stats.rx_dropped++;
704                 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
705                                msg->length, conn->max_buffsize);
706                 return;
707         }
708         conn->rx_buff->data = conn->rx_buff->head;
709         skb_reset_tail_pointer(conn->rx_buff);
710         conn->rx_buff->len = 0;
711         rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
712                                   msg->length, NULL);
713         if (rc || msg->length < 5) {
714                 privptr->stats.rx_errors++;
715                 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
716                 return;
717         }
718         netiucv_unpack_skb(conn, conn->rx_buff);
719 }
720
721 static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
722 {
723         struct iucv_event *ev = arg;
724         struct iucv_connection *conn = ev->conn;
725         struct iucv_message *msg = ev->data;
726         struct iucv_message txmsg;
727         struct netiucv_priv *privptr = NULL;
728         u32 single_flag = msg->tag;
729         u32 txbytes = 0;
730         u32 txpackets = 0;
731         u32 stat_maxcq = 0;
732         struct sk_buff *skb;
733         unsigned long saveflags;
734         struct ll_header header;
735         int rc;
736
737         IUCV_DBF_TEXT(trace, 4, __func__);
738
739         if (conn && conn->netdev)
740                 privptr = netdev_priv(conn->netdev);
741         conn->prof.tx_pending--;
742         if (single_flag) {
743                 if ((skb = skb_dequeue(&conn->commit_queue))) {
744                         atomic_dec(&skb->users);
745                         dev_kfree_skb_any(skb);
746                         if (privptr) {
747                                 privptr->stats.tx_packets++;
748                                 privptr->stats.tx_bytes +=
749                                         (skb->len - NETIUCV_HDRLEN
750                                                   - NETIUCV_HDRLEN);
751                         }
752                 }
753         }
754         conn->tx_buff->data = conn->tx_buff->head;
755         skb_reset_tail_pointer(conn->tx_buff);
756         conn->tx_buff->len = 0;
757         spin_lock_irqsave(&conn->collect_lock, saveflags);
758         while ((skb = skb_dequeue(&conn->collect_queue))) {
759                 header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
760                 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
761                        NETIUCV_HDRLEN);
762                 skb_copy_from_linear_data(skb,
763                                           skb_put(conn->tx_buff, skb->len),
764                                           skb->len);
765                 txbytes += skb->len;
766                 txpackets++;
767                 stat_maxcq++;
768                 atomic_dec(&skb->users);
769                 dev_kfree_skb_any(skb);
770         }
771         if (conn->collect_len > conn->prof.maxmulti)
772                 conn->prof.maxmulti = conn->collect_len;
773         conn->collect_len = 0;
774         spin_unlock_irqrestore(&conn->collect_lock, saveflags);
775         if (conn->tx_buff->len == 0) {
776                 fsm_newstate(fi, CONN_STATE_IDLE);
777                 return;
778         }
779
780         header.next = 0;
781         memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
782         conn->prof.send_stamp = current_kernel_time();
783         txmsg.class = 0;
784         txmsg.tag = 0;
785         rc = iucv_message_send(conn->path, &txmsg, 0, 0,
786                                conn->tx_buff->data, conn->tx_buff->len);
787         conn->prof.doios_multi++;
788         conn->prof.txlen += conn->tx_buff->len;
789         conn->prof.tx_pending++;
790         if (conn->prof.tx_pending > conn->prof.tx_max_pending)
791                 conn->prof.tx_max_pending = conn->prof.tx_pending;
792         if (rc) {
793                 conn->prof.tx_pending--;
794                 fsm_newstate(fi, CONN_STATE_IDLE);
795                 if (privptr)
796                         privptr->stats.tx_errors += txpackets;
797                 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
798         } else {
799                 if (privptr) {
800                         privptr->stats.tx_packets += txpackets;
801                         privptr->stats.tx_bytes += txbytes;
802                 }
803                 if (stat_maxcq > conn->prof.maxcqueue)
804                         conn->prof.maxcqueue = stat_maxcq;
805         }
806 }
807
808 static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
809 {
810         struct iucv_event *ev = arg;
811         struct iucv_connection *conn = ev->conn;
812         struct iucv_path *path = ev->data;
813         struct net_device *netdev = conn->netdev;
814         struct netiucv_priv *privptr = netdev_priv(netdev);
815         int rc;
816
817         IUCV_DBF_TEXT(trace, 3, __func__);
818
819         conn->path = path;
820         path->msglim = NETIUCV_QUEUELEN_DEFAULT;
821         path->flags = 0;
822         rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
823         if (rc) {
824                 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
825                 return;
826         }
827         fsm_newstate(fi, CONN_STATE_IDLE);
828         netdev->tx_queue_len = conn->path->msglim;
829         fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
830 }
831
832 static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
833 {
834         struct iucv_event *ev = arg;
835         struct iucv_path *path = ev->data;
836
837         IUCV_DBF_TEXT(trace, 3, __func__);
838         iucv_path_sever(path, NULL);
839 }
840
841 static void conn_action_connack(fsm_instance *fi, int event, void *arg)
842 {
843         struct iucv_connection *conn = arg;
844         struct net_device *netdev = conn->netdev;
845         struct netiucv_priv *privptr = netdev_priv(netdev);
846
847         IUCV_DBF_TEXT(trace, 3, __func__);
848         fsm_deltimer(&conn->timer);
849         fsm_newstate(fi, CONN_STATE_IDLE);
850         netdev->tx_queue_len = conn->path->msglim;
851         fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
852 }
853
854 static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
855 {
856         struct iucv_connection *conn = arg;
857
858         IUCV_DBF_TEXT(trace, 3, __func__);
859         fsm_deltimer(&conn->timer);
860         iucv_path_sever(conn->path, NULL);
861         fsm_newstate(fi, CONN_STATE_STARTWAIT);
862 }
863
864 static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
865 {
866         struct iucv_connection *conn = arg;
867         struct net_device *netdev = conn->netdev;
868         struct netiucv_priv *privptr = netdev_priv(netdev);
869
870         IUCV_DBF_TEXT(trace, 3, __func__);
871
872         fsm_deltimer(&conn->timer);
873         iucv_path_sever(conn->path, NULL);
874         dev_info(privptr->dev, "The peer interface of the IUCV device"
875                 " has closed the connection\n");
876         IUCV_DBF_TEXT(data, 2,
877                       "conn_action_connsever: Remote dropped connection\n");
878         fsm_newstate(fi, CONN_STATE_STARTWAIT);
879         fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
880 }
881
882 static void conn_action_start(fsm_instance *fi, int event, void *arg)
883 {
884         struct iucv_connection *conn = arg;
885         struct net_device *netdev = conn->netdev;
886         struct netiucv_priv *privptr = netdev_priv(netdev);
887         int rc;
888
889         IUCV_DBF_TEXT(trace, 3, __func__);
890
891         fsm_newstate(fi, CONN_STATE_STARTWAIT);
892         IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n",
893                 netdev->name, conn->userid);
894
895         /*
896          * We must set the state before calling iucv_connect because the
897          * callback handler could be called at any point after the connection
898          * request is sent
899          */
900
901         fsm_newstate(fi, CONN_STATE_SETUPWAIT);
902         conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
903         rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
904                                NULL, iucvMagic, conn);
905         switch (rc) {
906         case 0:
907                 netdev->tx_queue_len = conn->path->msglim;
908                 fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
909                              CONN_EVENT_TIMER, conn);
910                 return;
911         case 11:
912                 dev_warn(privptr->dev,
913                         "The IUCV device failed to connect to z/VM guest %s\n",
914                         netiucv_printname(conn->userid));
915                 fsm_newstate(fi, CONN_STATE_STARTWAIT);
916                 break;
917         case 12:
918                 dev_warn(privptr->dev,
919                         "The IUCV device failed to connect to the peer on z/VM"
920                         " guest %s\n", netiucv_printname(conn->userid));
921                 fsm_newstate(fi, CONN_STATE_STARTWAIT);
922                 break;
923         case 13:
924                 dev_err(privptr->dev,
925                         "Connecting the IUCV device would exceed the maximum"
926                         " number of IUCV connections\n");
927                 fsm_newstate(fi, CONN_STATE_CONNERR);
928                 break;
929         case 14:
930                 dev_err(privptr->dev,
931                         "z/VM guest %s has too many IUCV connections"
932                         " to connect with the IUCV device\n",
933                         netiucv_printname(conn->userid));
934                 fsm_newstate(fi, CONN_STATE_CONNERR);
935                 break;
936         case 15:
937                 dev_err(privptr->dev,
938                         "The IUCV device cannot connect to a z/VM guest with no"
939                         " IUCV authorization\n");
940                 fsm_newstate(fi, CONN_STATE_CONNERR);
941                 break;
942         default:
943                 dev_err(privptr->dev,
944                         "Connecting the IUCV device failed with error %d\n",
945                         rc);
946                 fsm_newstate(fi, CONN_STATE_CONNERR);
947                 break;
948         }
949         IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
950         kfree(conn->path);
951         conn->path = NULL;
952 }
953
954 static void netiucv_purge_skb_queue(struct sk_buff_head *q)
955 {
956         struct sk_buff *skb;
957
958         while ((skb = skb_dequeue(q))) {
959                 atomic_dec(&skb->users);
960                 dev_kfree_skb_any(skb);
961         }
962 }
963
964 static void conn_action_stop(fsm_instance *fi, int event, void *arg)
965 {
966         struct iucv_event *ev = arg;
967         struct iucv_connection *conn = ev->conn;
968         struct net_device *netdev = conn->netdev;
969         struct netiucv_priv *privptr = netdev_priv(netdev);
970
971         IUCV_DBF_TEXT(trace, 3, __func__);
972
973         fsm_deltimer(&conn->timer);
974         fsm_newstate(fi, CONN_STATE_STOPPED);
975         netiucv_purge_skb_queue(&conn->collect_queue);
976         if (conn->path) {
977                 IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
978                 iucv_path_sever(conn->path, iucvMagic);
979                 kfree(conn->path);
980                 conn->path = NULL;
981         }
982         netiucv_purge_skb_queue(&conn->commit_queue);
983         fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
984 }
985
986 static void conn_action_inval(fsm_instance *fi, int event, void *arg)
987 {
988         struct iucv_connection *conn = arg;
989         struct net_device *netdev = conn->netdev;
990
991         IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
992                 netdev->name, conn->userid);
993 }
994
995 static const fsm_node conn_fsm[] = {
996         { CONN_STATE_INVALID,   CONN_EVENT_START,    conn_action_inval      },
997         { CONN_STATE_STOPPED,   CONN_EVENT_START,    conn_action_start      },
998
999         { CONN_STATE_STOPPED,   CONN_EVENT_STOP,     conn_action_stop       },
1000         { CONN_STATE_STARTWAIT, CONN_EVENT_STOP,     conn_action_stop       },
1001         { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP,     conn_action_stop       },
1002         { CONN_STATE_IDLE,      CONN_EVENT_STOP,     conn_action_stop       },
1003         { CONN_STATE_TX,        CONN_EVENT_STOP,     conn_action_stop       },
1004         { CONN_STATE_REGERR,    CONN_EVENT_STOP,     conn_action_stop       },
1005         { CONN_STATE_CONNERR,   CONN_EVENT_STOP,     conn_action_stop       },
1006
1007         { CONN_STATE_STOPPED,   CONN_EVENT_CONN_REQ, conn_action_connreject },
1008         { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
1009         { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
1010         { CONN_STATE_IDLE,      CONN_EVENT_CONN_REQ, conn_action_connreject },
1011         { CONN_STATE_TX,        CONN_EVENT_CONN_REQ, conn_action_connreject },
1012
1013         { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack    },
1014         { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER,    conn_action_conntimsev },
1015
1016         { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever  },
1017         { CONN_STATE_IDLE,      CONN_EVENT_CONN_REJ, conn_action_connsever  },
1018         { CONN_STATE_TX,        CONN_EVENT_CONN_REJ, conn_action_connsever  },
1019
1020         { CONN_STATE_IDLE,      CONN_EVENT_RX,       conn_action_rx         },
1021         { CONN_STATE_TX,        CONN_EVENT_RX,       conn_action_rx         },
1022
1023         { CONN_STATE_TX,        CONN_EVENT_TXDONE,   conn_action_txdone     },
1024         { CONN_STATE_IDLE,      CONN_EVENT_TXDONE,   conn_action_txdone     },
1025 };
1026
1027 static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
1028
1029
1030 /*
1031  * Actions for interface - statemachine.
1032  */
1033
1034 /**
1035  * dev_action_start
1036  * @fi: An instance of an interface statemachine.
1037  * @event: The event, just happened.
1038  * @arg: Generic pointer, casted from struct net_device * upon call.
1039  *
1040  * Startup connection by sending CONN_EVENT_START to it.
1041  */
1042 static void dev_action_start(fsm_instance *fi, int event, void *arg)
1043 {
1044         struct net_device   *dev = arg;
1045         struct netiucv_priv *privptr = netdev_priv(dev);
1046
1047         IUCV_DBF_TEXT(trace, 3, __func__);
1048
1049         fsm_newstate(fi, DEV_STATE_STARTWAIT);
1050         fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1051 }
1052
1053 /**
1054  * Shutdown connection by sending CONN_EVENT_STOP to it.
1055  *
1056  * @param fi    An instance of an interface statemachine.
1057  * @param event The event, just happened.
1058  * @param arg   Generic pointer, casted from struct net_device * upon call.
1059  */
1060 static void
1061 dev_action_stop(fsm_instance *fi, int event, void *arg)
1062 {
1063         struct net_device   *dev = arg;
1064         struct netiucv_priv *privptr = netdev_priv(dev);
1065         struct iucv_event   ev;
1066
1067         IUCV_DBF_TEXT(trace, 3, __func__);
1068
1069         ev.conn = privptr->conn;
1070
1071         fsm_newstate(fi, DEV_STATE_STOPWAIT);
1072         fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1073 }
1074
1075 /**
1076  * Called from connection statemachine
1077  * when a connection is up and running.
1078  *
1079  * @param fi    An instance of an interface statemachine.
1080  * @param event The event, just happened.
1081  * @param arg   Generic pointer, casted from struct net_device * upon call.
1082  */
1083 static void
1084 dev_action_connup(fsm_instance *fi, int event, void *arg)
1085 {
1086         struct net_device   *dev = arg;
1087         struct netiucv_priv *privptr = netdev_priv(dev);
1088
1089         IUCV_DBF_TEXT(trace, 3, __func__);
1090
1091         switch (fsm_getstate(fi)) {
1092                 case DEV_STATE_STARTWAIT:
1093                         fsm_newstate(fi, DEV_STATE_RUNNING);
1094                         dev_info(privptr->dev,
1095                                 "The IUCV device has been connected"
1096                                 " successfully to %s\n", privptr->conn->userid);
1097                         IUCV_DBF_TEXT(setup, 3,
1098                                 "connection is up and running\n");
1099                         break;
1100                 case DEV_STATE_STOPWAIT:
1101                         IUCV_DBF_TEXT(data, 2,
1102                                 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1103                         break;
1104         }
1105 }
1106
1107 /**
1108  * Called from connection statemachine
1109  * when a connection has been shutdown.
1110  *
1111  * @param fi    An instance of an interface statemachine.
1112  * @param event The event, just happened.
1113  * @param arg   Generic pointer, casted from struct net_device * upon call.
1114  */
1115 static void
1116 dev_action_conndown(fsm_instance *fi, int event, void *arg)
1117 {
1118         IUCV_DBF_TEXT(trace, 3, __func__);
1119
1120         switch (fsm_getstate(fi)) {
1121                 case DEV_STATE_RUNNING:
1122                         fsm_newstate(fi, DEV_STATE_STARTWAIT);
1123                         break;
1124                 case DEV_STATE_STOPWAIT:
1125                         fsm_newstate(fi, DEV_STATE_STOPPED);
1126                         IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1127                         break;
1128         }
1129 }
1130
1131 static const fsm_node dev_fsm[] = {
1132         { DEV_STATE_STOPPED,    DEV_EVENT_START,   dev_action_start    },
1133
1134         { DEV_STATE_STOPWAIT,   DEV_EVENT_START,   dev_action_start    },
1135         { DEV_STATE_STOPWAIT,   DEV_EVENT_CONDOWN, dev_action_conndown },
1136
1137         { DEV_STATE_STARTWAIT,  DEV_EVENT_STOP,    dev_action_stop     },
1138         { DEV_STATE_STARTWAIT,  DEV_EVENT_CONUP,   dev_action_connup   },
1139
1140         { DEV_STATE_RUNNING,    DEV_EVENT_STOP,    dev_action_stop     },
1141         { DEV_STATE_RUNNING,    DEV_EVENT_CONDOWN, dev_action_conndown },
1142         { DEV_STATE_RUNNING,    DEV_EVENT_CONUP,   netiucv_action_nop  },
1143 };
1144
1145 static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1146
1147 /**
1148  * Transmit a packet.
1149  * This is a helper function for netiucv_tx().
1150  *
1151  * @param conn Connection to be used for sending.
1152  * @param skb Pointer to struct sk_buff of packet to send.
1153  *            The linklevel header has already been set up
1154  *            by netiucv_tx().
1155  *
1156  * @return 0 on success, -ERRNO on failure. (Never fails.)
1157  */
1158 static int netiucv_transmit_skb(struct iucv_connection *conn,
1159                                 struct sk_buff *skb)
1160 {
1161         struct iucv_message msg;
1162         unsigned long saveflags;
1163         struct ll_header header;
1164         int rc;
1165
1166         if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1167                 int l = skb->len + NETIUCV_HDRLEN;
1168
1169                 spin_lock_irqsave(&conn->collect_lock, saveflags);
1170                 if (conn->collect_len + l >
1171                     (conn->max_buffsize - NETIUCV_HDRLEN)) {
1172                         rc = -EBUSY;
1173                         IUCV_DBF_TEXT(data, 2,
1174                                       "EBUSY from netiucv_transmit_skb\n");
1175                 } else {
1176                         atomic_inc(&skb->users);
1177                         skb_queue_tail(&conn->collect_queue, skb);
1178                         conn->collect_len += l;
1179                         rc = 0;
1180                 }
1181                 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1182         } else {
1183                 struct sk_buff *nskb = skb;
1184                 /**
1185                  * Copy the skb to a new allocated skb in lowmem only if the
1186                  * data is located above 2G in memory or tailroom is < 2.
1187                  */
1188                 unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1189                                     NETIUCV_HDRLEN)) >> 31;
1190                 int copied = 0;
1191                 if (hi || (skb_tailroom(skb) < 2)) {
1192                         nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1193                                          NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1194                         if (!nskb) {
1195                                 IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1196                                 rc = -ENOMEM;
1197                                 return rc;
1198                         } else {
1199                                 skb_reserve(nskb, NETIUCV_HDRLEN);
1200                                 memcpy(skb_put(nskb, skb->len),
1201                                        skb->data, skb->len);
1202                         }
1203                         copied = 1;
1204                 }
1205                 /**
1206                  * skb now is below 2G and has enough room. Add headers.
1207                  */
1208                 header.next = nskb->len + NETIUCV_HDRLEN;
1209                 memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1210                 header.next = 0;
1211                 memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header,  NETIUCV_HDRLEN);
1212
1213                 fsm_newstate(conn->fsm, CONN_STATE_TX);
1214                 conn->prof.send_stamp = current_kernel_time();
1215
1216                 msg.tag = 1;
1217                 msg.class = 0;
1218                 rc = iucv_message_send(conn->path, &msg, 0, 0,
1219                                        nskb->data, nskb->len);
1220                 conn->prof.doios_single++;
1221                 conn->prof.txlen += skb->len;
1222                 conn->prof.tx_pending++;
1223                 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1224                         conn->prof.tx_max_pending = conn->prof.tx_pending;
1225                 if (rc) {
1226                         struct netiucv_priv *privptr;
1227                         fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1228                         conn->prof.tx_pending--;
1229                         privptr = netdev_priv(conn->netdev);
1230                         if (privptr)
1231                                 privptr->stats.tx_errors++;
1232                         if (copied)
1233                                 dev_kfree_skb(nskb);
1234                         else {
1235                                 /**
1236                                  * Remove our headers. They get added
1237                                  * again on retransmit.
1238                                  */
1239                                 skb_pull(skb, NETIUCV_HDRLEN);
1240                                 skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1241                         }
1242                         IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1243                 } else {
1244                         if (copied)
1245                                 dev_kfree_skb(skb);
1246                         atomic_inc(&nskb->users);
1247                         skb_queue_tail(&conn->commit_queue, nskb);
1248                 }
1249         }
1250
1251         return rc;
1252 }
1253
1254 /*
1255  * Interface API for upper network layers
1256  */
1257
1258 /**
1259  * Open an interface.
1260  * Called from generic network layer when ifconfig up is run.
1261  *
1262  * @param dev Pointer to interface struct.
1263  *
1264  * @return 0 on success, -ERRNO on failure. (Never fails.)
1265  */
1266 static int netiucv_open(struct net_device *dev)
1267 {
1268         struct netiucv_priv *priv = netdev_priv(dev);
1269
1270         fsm_event(priv->fsm, DEV_EVENT_START, dev);
1271         return 0;
1272 }
1273
1274 /**
1275  * Close an interface.
1276  * Called from generic network layer when ifconfig down is run.
1277  *
1278  * @param dev Pointer to interface struct.
1279  *
1280  * @return 0 on success, -ERRNO on failure. (Never fails.)
1281  */
1282 static int netiucv_close(struct net_device *dev)
1283 {
1284         struct netiucv_priv *priv = netdev_priv(dev);
1285
1286         fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1287         return 0;
1288 }
1289
1290 static int netiucv_pm_prepare(struct device *dev)
1291 {
1292         IUCV_DBF_TEXT(trace, 3, __func__);
1293         return 0;
1294 }
1295
1296 static void netiucv_pm_complete(struct device *dev)
1297 {
1298         IUCV_DBF_TEXT(trace, 3, __func__);
1299         return;
1300 }
1301
1302 /**
1303  * netiucv_pm_freeze() - Freeze PM callback
1304  * @dev:        netiucv device
1305  *
1306  * close open netiucv interfaces
1307  */
1308 static int netiucv_pm_freeze(struct device *dev)
1309 {
1310         struct netiucv_priv *priv = dev_get_drvdata(dev);
1311         struct net_device *ndev = NULL;
1312         int rc = 0;
1313
1314         IUCV_DBF_TEXT(trace, 3, __func__);
1315         if (priv && priv->conn)
1316                 ndev = priv->conn->netdev;
1317         if (!ndev)
1318                 goto out;
1319         netif_device_detach(ndev);
1320         priv->pm_state = fsm_getstate(priv->fsm);
1321         rc = netiucv_close(ndev);
1322 out:
1323         return rc;
1324 }
1325
1326 /**
1327  * netiucv_pm_restore_thaw() - Thaw and restore PM callback
1328  * @dev:        netiucv device
1329  *
1330  * re-open netiucv interfaces closed during freeze
1331  */
1332 static int netiucv_pm_restore_thaw(struct device *dev)
1333 {
1334         struct netiucv_priv *priv = dev_get_drvdata(dev);
1335         struct net_device *ndev = NULL;
1336         int rc = 0;
1337
1338         IUCV_DBF_TEXT(trace, 3, __func__);
1339         if (priv && priv->conn)
1340                 ndev = priv->conn->netdev;
1341         if (!ndev)
1342                 goto out;
1343         switch (priv->pm_state) {
1344         case DEV_STATE_RUNNING:
1345         case DEV_STATE_STARTWAIT:
1346                 rc = netiucv_open(ndev);
1347                 break;
1348         default:
1349                 break;
1350         }
1351         netif_device_attach(ndev);
1352 out:
1353         return rc;
1354 }
1355
1356 /**
1357  * Start transmission of a packet.
1358  * Called from generic network device layer.
1359  *
1360  * @param skb Pointer to buffer containing the packet.
1361  * @param dev Pointer to interface struct.
1362  *
1363  * @return 0 if packet consumed, !0 if packet rejected.
1364  *         Note: If we return !0, then the packet is free'd by
1365  *               the generic network layer.
1366  */
1367 static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1368 {
1369         struct netiucv_priv *privptr = netdev_priv(dev);
1370         int rc;
1371
1372         IUCV_DBF_TEXT(trace, 4, __func__);
1373         /**
1374          * Some sanity checks ...
1375          */
1376         if (skb == NULL) {
1377                 IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1378                 privptr->stats.tx_dropped++;
1379                 return 0;
1380         }
1381         if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1382                 IUCV_DBF_TEXT(data, 2,
1383                         "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1384                 dev_kfree_skb(skb);
1385                 privptr->stats.tx_dropped++;
1386                 return 0;
1387         }
1388
1389         /**
1390          * If connection is not running, try to restart it
1391          * and throw away packet.
1392          */
1393         if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1394                 dev_kfree_skb(skb);
1395                 privptr->stats.tx_dropped++;
1396                 privptr->stats.tx_errors++;
1397                 privptr->stats.tx_carrier_errors++;
1398                 return 0;
1399         }
1400
1401         if (netiucv_test_and_set_busy(dev)) {
1402                 IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1403                 return NETDEV_TX_BUSY;
1404         }
1405         dev->trans_start = jiffies;
1406         rc = netiucv_transmit_skb(privptr->conn, skb);
1407         netiucv_clear_busy(dev);
1408         return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
1409 }
1410
1411 /**
1412  * netiucv_stats
1413  * @dev: Pointer to interface struct.
1414  *
1415  * Returns interface statistics of a device.
1416  *
1417  * Returns pointer to stats struct of this interface.
1418  */
1419 static struct net_device_stats *netiucv_stats (struct net_device * dev)
1420 {
1421         struct netiucv_priv *priv = netdev_priv(dev);
1422
1423         IUCV_DBF_TEXT(trace, 5, __func__);
1424         return &priv->stats;
1425 }
1426
1427 /**
1428  * netiucv_change_mtu
1429  * @dev: Pointer to interface struct.
1430  * @new_mtu: The new MTU to use for this interface.
1431  *
1432  * Sets MTU of an interface.
1433  *
1434  * Returns 0 on success, -EINVAL if MTU is out of valid range.
1435  *         (valid range is 576 .. NETIUCV_MTU_MAX).
1436  */
1437 static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
1438 {
1439         IUCV_DBF_TEXT(trace, 3, __func__);
1440         if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
1441                 IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1442                 return -EINVAL;
1443         }
1444         dev->mtu = new_mtu;
1445         return 0;
1446 }
1447
1448 /*
1449  * attributes in sysfs
1450  */
1451
1452 static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1453                          char *buf)
1454 {
1455         struct netiucv_priv *priv = dev_get_drvdata(dev);
1456
1457         IUCV_DBF_TEXT(trace, 5, __func__);
1458         return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
1459 }
1460
1461 static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1462                           const char *buf, size_t count)
1463 {
1464         struct netiucv_priv *priv = dev_get_drvdata(dev);
1465         struct net_device *ndev = priv->conn->netdev;
1466         char    *p;
1467         char    *tmp;
1468         char    username[9];
1469         int     i;
1470         struct iucv_connection *cp;
1471
1472         IUCV_DBF_TEXT(trace, 3, __func__);
1473         if (count > 9) {
1474                 IUCV_DBF_TEXT_(setup, 2,
1475                                "%d is length of username\n", (int) count);
1476                 return -EINVAL;
1477         }
1478
1479         tmp = strsep((char **) &buf, "\n");
1480         for (i = 0, p = tmp; i < 8 && *p; i++, p++) {
1481                 if (isalnum(*p) || (*p == '$')) {
1482                         username[i]= toupper(*p);
1483                         continue;
1484                 }
1485                 if (*p == '\n') {
1486                         /* trailing lf, grr */
1487                         break;
1488                 }
1489                 IUCV_DBF_TEXT_(setup, 2,
1490                                "username: invalid character %c\n", *p);
1491                 return -EINVAL;
1492         }
1493         while (i < 8)
1494                 username[i++] = ' ';
1495         username[8] = '\0';
1496
1497         if (memcmp(username, priv->conn->userid, 9) &&
1498             (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1499                 /* username changed while the interface is active. */
1500                 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1501                 return -EPERM;
1502         }
1503         read_lock_bh(&iucv_connection_rwlock);
1504         list_for_each_entry(cp, &iucv_connection_list, list) {
1505                 if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
1506                         read_unlock_bh(&iucv_connection_rwlock);
1507                         IUCV_DBF_TEXT_(setup, 2, "user_write: Connection "
1508                                 "to %s already exists\n", username);
1509                         return -EEXIST;
1510                 }
1511         }
1512         read_unlock_bh(&iucv_connection_rwlock);
1513         memcpy(priv->conn->userid, username, 9);
1514         return count;
1515 }
1516
1517 static DEVICE_ATTR(user, 0644, user_show, user_write);
1518
1519 static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1520                             char *buf)
1521 {
1522         struct netiucv_priv *priv = dev_get_drvdata(dev);
1523
1524         IUCV_DBF_TEXT(trace, 5, __func__);
1525         return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1526 }
1527
1528 static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1529                              const char *buf, size_t count)
1530 {
1531         struct netiucv_priv *priv = dev_get_drvdata(dev);
1532         struct net_device *ndev = priv->conn->netdev;
1533         char         *e;
1534         int          bs1;
1535
1536         IUCV_DBF_TEXT(trace, 3, __func__);
1537         if (count >= 39)
1538                 return -EINVAL;
1539
1540         bs1 = simple_strtoul(buf, &e, 0);
1541
1542         if (e && (!isspace(*e))) {
1543                 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
1544                 return -EINVAL;
1545         }
1546         if (bs1 > NETIUCV_BUFSIZE_MAX) {
1547                 IUCV_DBF_TEXT_(setup, 2,
1548                         "buffer_write: buffer size %d too large\n",
1549                         bs1);
1550                 return -EINVAL;
1551         }
1552         if ((ndev->flags & IFF_RUNNING) &&
1553             (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1554                 IUCV_DBF_TEXT_(setup, 2,
1555                         "buffer_write: buffer size %d too small\n",
1556                         bs1);
1557                 return -EINVAL;
1558         }
1559         if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1560                 IUCV_DBF_TEXT_(setup, 2,
1561                         "buffer_write: buffer size %d too small\n",
1562                         bs1);
1563                 return -EINVAL;
1564         }
1565
1566         priv->conn->max_buffsize = bs1;
1567         if (!(ndev->flags & IFF_RUNNING))
1568                 ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1569
1570         return count;
1571
1572 }
1573
1574 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1575
1576 static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1577                              char *buf)
1578 {
1579         struct netiucv_priv *priv = dev_get_drvdata(dev);
1580
1581         IUCV_DBF_TEXT(trace, 5, __func__);
1582         return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1583 }
1584
1585 static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1586
1587 static ssize_t conn_fsm_show (struct device *dev,
1588                               struct device_attribute *attr, char *buf)
1589 {
1590         struct netiucv_priv *priv = dev_get_drvdata(dev);
1591
1592         IUCV_DBF_TEXT(trace, 5, __func__);
1593         return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1594 }
1595
1596 static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1597
1598 static ssize_t maxmulti_show (struct device *dev,
1599                               struct device_attribute *attr, char *buf)
1600 {
1601         struct netiucv_priv *priv = dev_get_drvdata(dev);
1602
1603         IUCV_DBF_TEXT(trace, 5, __func__);
1604         return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1605 }
1606
1607 static ssize_t maxmulti_write (struct device *dev,
1608                                struct device_attribute *attr,
1609                                const char *buf, size_t count)
1610 {
1611         struct netiucv_priv *priv = dev_get_drvdata(dev);
1612
1613         IUCV_DBF_TEXT(trace, 4, __func__);
1614         priv->conn->prof.maxmulti = 0;
1615         return count;
1616 }
1617
1618 static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1619
1620 static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1621                            char *buf)
1622 {
1623         struct netiucv_priv *priv = dev_get_drvdata(dev);
1624
1625         IUCV_DBF_TEXT(trace, 5, __func__);
1626         return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1627 }
1628
1629 static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1630                             const char *buf, size_t count)
1631 {
1632         struct netiucv_priv *priv = dev_get_drvdata(dev);
1633
1634         IUCV_DBF_TEXT(trace, 4, __func__);
1635         priv->conn->prof.maxcqueue = 0;
1636         return count;
1637 }
1638
1639 static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1640
1641 static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1642                            char *buf)
1643 {
1644         struct netiucv_priv *priv = dev_get_drvdata(dev);
1645
1646         IUCV_DBF_TEXT(trace, 5, __func__);
1647         return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1648 }
1649
1650 static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1651                             const char *buf, size_t count)
1652 {
1653         struct netiucv_priv *priv = dev_get_drvdata(dev);
1654
1655         IUCV_DBF_TEXT(trace, 4, __func__);
1656         priv->conn->prof.doios_single = 0;
1657         return count;
1658 }
1659
1660 static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1661
1662 static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1663                            char *buf)
1664 {
1665         struct netiucv_priv *priv = dev_get_drvdata(dev);
1666
1667         IUCV_DBF_TEXT(trace, 5, __func__);
1668         return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1669 }
1670
1671 static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1672                             const char *buf, size_t count)
1673 {
1674         struct netiucv_priv *priv = dev_get_drvdata(dev);
1675
1676         IUCV_DBF_TEXT(trace, 5, __func__);
1677         priv->conn->prof.doios_multi = 0;
1678         return count;
1679 }
1680
1681 static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1682
1683 static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1684                            char *buf)
1685 {
1686         struct netiucv_priv *priv = dev_get_drvdata(dev);
1687
1688         IUCV_DBF_TEXT(trace, 5, __func__);
1689         return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1690 }
1691
1692 static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1693                             const char *buf, size_t count)
1694 {
1695         struct netiucv_priv *priv = dev_get_drvdata(dev);
1696
1697         IUCV_DBF_TEXT(trace, 4, __func__);
1698         priv->conn->prof.txlen = 0;
1699         return count;
1700 }
1701
1702 static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1703
1704 static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1705                             char *buf)
1706 {
1707         struct netiucv_priv *priv = dev_get_drvdata(dev);
1708
1709         IUCV_DBF_TEXT(trace, 5, __func__);
1710         return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1711 }
1712
1713 static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1714                              const char *buf, size_t count)
1715 {
1716         struct netiucv_priv *priv = dev_get_drvdata(dev);
1717
1718         IUCV_DBF_TEXT(trace, 4, __func__);
1719         priv->conn->prof.tx_time = 0;
1720         return count;
1721 }
1722
1723 static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1724
1725 static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1726                             char *buf)
1727 {
1728         struct netiucv_priv *priv = dev_get_drvdata(dev);
1729
1730         IUCV_DBF_TEXT(trace, 5, __func__);
1731         return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1732 }
1733
1734 static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1735                              const char *buf, size_t count)
1736 {
1737         struct netiucv_priv *priv = dev_get_drvdata(dev);
1738
1739         IUCV_DBF_TEXT(trace, 4, __func__);
1740         priv->conn->prof.tx_pending = 0;
1741         return count;
1742 }
1743
1744 static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1745
1746 static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1747                             char *buf)
1748 {
1749         struct netiucv_priv *priv = dev_get_drvdata(dev);
1750
1751         IUCV_DBF_TEXT(trace, 5, __func__);
1752         return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1753 }
1754
1755 static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1756                              const char *buf, size_t count)
1757 {
1758         struct netiucv_priv *priv = dev_get_drvdata(dev);
1759
1760         IUCV_DBF_TEXT(trace, 4, __func__);
1761         priv->conn->prof.tx_max_pending = 0;
1762         return count;
1763 }
1764
1765 static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1766
1767 static struct attribute *netiucv_attrs[] = {
1768         &dev_attr_buffer.attr,
1769         &dev_attr_user.attr,
1770         NULL,
1771 };
1772
1773 static struct attribute_group netiucv_attr_group = {
1774         .attrs = netiucv_attrs,
1775 };
1776
1777 static struct attribute *netiucv_stat_attrs[] = {
1778         &dev_attr_device_fsm_state.attr,
1779         &dev_attr_connection_fsm_state.attr,
1780         &dev_attr_max_tx_buffer_used.attr,
1781         &dev_attr_max_chained_skbs.attr,
1782         &dev_attr_tx_single_write_ops.attr,
1783         &dev_attr_tx_multi_write_ops.attr,
1784         &dev_attr_netto_bytes.attr,
1785         &dev_attr_max_tx_io_time.attr,
1786         &dev_attr_tx_pending.attr,
1787         &dev_attr_tx_max_pending.attr,
1788         NULL,
1789 };
1790
1791 static struct attribute_group netiucv_stat_attr_group = {
1792         .name  = "stats",
1793         .attrs = netiucv_stat_attrs,
1794 };
1795
1796 static int netiucv_add_files(struct device *dev)
1797 {
1798         int ret;
1799
1800         IUCV_DBF_TEXT(trace, 3, __func__);
1801         ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
1802         if (ret)
1803                 return ret;
1804         ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group);
1805         if (ret)
1806                 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1807         return ret;
1808 }
1809
1810 static void netiucv_remove_files(struct device *dev)
1811 {
1812         IUCV_DBF_TEXT(trace, 3, __func__);
1813         sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
1814         sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1815 }
1816
1817 static int netiucv_register_device(struct net_device *ndev)
1818 {
1819         struct netiucv_priv *priv = netdev_priv(ndev);
1820         struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1821         int ret;
1822
1823         IUCV_DBF_TEXT(trace, 3, __func__);
1824
1825         if (dev) {
1826                 dev_set_name(dev, "net%s", ndev->name);
1827                 dev->bus = &iucv_bus;
1828                 dev->parent = iucv_root;
1829                 /*
1830                  * The release function could be called after the
1831                  * module has been unloaded. It's _only_ task is to
1832                  * free the struct. Therefore, we specify kfree()
1833                  * directly here. (Probably a little bit obfuscating
1834                  * but legitime ...).
1835                  */
1836                 dev->release = (void (*)(struct device *))kfree;
1837                 dev->driver = &netiucv_driver;
1838         } else
1839                 return -ENOMEM;
1840
1841         ret = device_register(dev);
1842         if (ret) {
1843                 put_device(dev);
1844                 return ret;
1845         }
1846         ret = netiucv_add_files(dev);
1847         if (ret)
1848                 goto out_unreg;
1849         priv->dev = dev;
1850         dev_set_drvdata(dev, priv);
1851         return 0;
1852
1853 out_unreg:
1854         device_unregister(dev);
1855         return ret;
1856 }
1857
1858 static void netiucv_unregister_device(struct device *dev)
1859 {
1860         IUCV_DBF_TEXT(trace, 3, __func__);
1861         netiucv_remove_files(dev);
1862         device_unregister(dev);
1863 }
1864
1865 /**
1866  * Allocate and initialize a new connection structure.
1867  * Add it to the list of netiucv connections;
1868  */
1869 static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1870                                                       char *username)
1871 {
1872         struct iucv_connection *conn;
1873
1874         conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1875         if (!conn)
1876                 goto out;
1877         skb_queue_head_init(&conn->collect_queue);
1878         skb_queue_head_init(&conn->commit_queue);
1879         spin_lock_init(&conn->collect_lock);
1880         conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1881         conn->netdev = dev;
1882
1883         conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1884         if (!conn->rx_buff)
1885                 goto out_conn;
1886         conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1887         if (!conn->tx_buff)
1888                 goto out_rx;
1889         conn->fsm = init_fsm("netiucvconn", conn_state_names,
1890                              conn_event_names, NR_CONN_STATES,
1891                              NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1892                              GFP_KERNEL);
1893         if (!conn->fsm)
1894                 goto out_tx;
1895
1896         fsm_settimer(conn->fsm, &conn->timer);
1897         fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1898
1899         if (username) {
1900                 memcpy(conn->userid, username, 9);
1901                 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1902         }
1903
1904         write_lock_bh(&iucv_connection_rwlock);
1905         list_add_tail(&conn->list, &iucv_connection_list);
1906         write_unlock_bh(&iucv_connection_rwlock);
1907         return conn;
1908
1909 out_tx:
1910         kfree_skb(conn->tx_buff);
1911 out_rx:
1912         kfree_skb(conn->rx_buff);
1913 out_conn:
1914         kfree(conn);
1915 out:
1916         return NULL;
1917 }
1918
1919 /**
1920  * Release a connection structure and remove it from the
1921  * list of netiucv connections.
1922  */
1923 static void netiucv_remove_connection(struct iucv_connection *conn)
1924 {
1925         IUCV_DBF_TEXT(trace, 3, __func__);
1926         write_lock_bh(&iucv_connection_rwlock);
1927         list_del_init(&conn->list);
1928         write_unlock_bh(&iucv_connection_rwlock);
1929         fsm_deltimer(&conn->timer);
1930         netiucv_purge_skb_queue(&conn->collect_queue);
1931         if (conn->path) {
1932                 iucv_path_sever(conn->path, iucvMagic);
1933                 kfree(conn->path);
1934                 conn->path = NULL;
1935         }
1936         netiucv_purge_skb_queue(&conn->commit_queue);
1937         kfree_fsm(conn->fsm);
1938         kfree_skb(conn->rx_buff);
1939         kfree_skb(conn->tx_buff);
1940 }
1941
1942 /**
1943  * Release everything of a net device.
1944  */
1945 static void netiucv_free_netdevice(struct net_device *dev)
1946 {
1947         struct netiucv_priv *privptr = netdev_priv(dev);
1948
1949         IUCV_DBF_TEXT(trace, 3, __func__);
1950
1951         if (!dev)
1952                 return;
1953
1954         if (privptr) {
1955                 if (privptr->conn)
1956                         netiucv_remove_connection(privptr->conn);
1957                 if (privptr->fsm)
1958                         kfree_fsm(privptr->fsm);
1959                 privptr->conn = NULL; privptr->fsm = NULL;
1960                 /* privptr gets freed by free_netdev() */
1961         }
1962         free_netdev(dev);
1963 }
1964
1965 /**
1966  * Initialize a net device. (Called from kernel in alloc_netdev())
1967  */
1968 static const struct net_device_ops netiucv_netdev_ops = {
1969         .ndo_open               = netiucv_open,
1970         .ndo_stop               = netiucv_close,
1971         .ndo_get_stats          = netiucv_stats,
1972         .ndo_start_xmit         = netiucv_tx,
1973         .ndo_change_mtu         = netiucv_change_mtu,
1974 };
1975
1976 static void netiucv_setup_netdevice(struct net_device *dev)
1977 {
1978         dev->mtu                 = NETIUCV_MTU_DEFAULT;
1979         dev->destructor          = netiucv_free_netdevice;
1980         dev->hard_header_len     = NETIUCV_HDRLEN;
1981         dev->addr_len            = 0;
1982         dev->type                = ARPHRD_SLIP;
1983         dev->tx_queue_len        = NETIUCV_QUEUELEN_DEFAULT;
1984         dev->flags               = IFF_POINTOPOINT | IFF_NOARP;
1985         dev->netdev_ops          = &netiucv_netdev_ops;
1986 }
1987
1988 /**
1989  * Allocate and initialize everything of a net device.
1990  */
1991 static struct net_device *netiucv_init_netdevice(char *username)
1992 {
1993         struct netiucv_priv *privptr;
1994         struct net_device *dev;
1995
1996         dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
1997                            netiucv_setup_netdevice);
1998         if (!dev)
1999                 return NULL;
2000         if (dev_alloc_name(dev, dev->name) < 0)
2001                 goto out_netdev;
2002
2003         privptr = netdev_priv(dev);
2004         privptr->fsm = init_fsm("netiucvdev", dev_state_names,
2005                                 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
2006                                 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
2007         if (!privptr->fsm)
2008                 goto out_netdev;
2009
2010         privptr->conn = netiucv_new_connection(dev, username);
2011         if (!privptr->conn) {
2012                 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
2013                 goto out_fsm;
2014         }
2015         fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
2016         return dev;
2017
2018 out_fsm:
2019         kfree_fsm(privptr->fsm);
2020 out_netdev:
2021         free_netdev(dev);
2022         return NULL;
2023 }
2024
2025 static ssize_t conn_write(struct device_driver *drv,
2026                           const char *buf, size_t count)
2027 {
2028         const char *p;
2029         char username[9];
2030         int i, rc;
2031         struct net_device *dev;
2032         struct netiucv_priv *priv;
2033         struct iucv_connection *cp;
2034
2035         IUCV_DBF_TEXT(trace, 3, __func__);
2036         if (count>9) {
2037                 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
2038                 return -EINVAL;
2039         }
2040
2041         for (i = 0, p = buf; i < 8 && *p; i++, p++) {
2042                 if (isalnum(*p) || *p == '$') {
2043                         username[i] = toupper(*p);
2044                         continue;
2045                 }
2046                 if (*p == '\n')
2047                         /* trailing lf, grr */
2048                         break;
2049                 IUCV_DBF_TEXT_(setup, 2,
2050                                "conn_write: invalid character %c\n", *p);
2051                 return -EINVAL;
2052         }
2053         while (i < 8)
2054                 username[i++] = ' ';
2055         username[8] = '\0';
2056
2057         read_lock_bh(&iucv_connection_rwlock);
2058         list_for_each_entry(cp, &iucv_connection_list, list) {
2059                 if (!strncmp(username, cp->userid, 9)) {
2060                         read_unlock_bh(&iucv_connection_rwlock);
2061                         IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection "
2062                                 "to %s already exists\n", username);
2063                         return -EEXIST;
2064                 }
2065         }
2066         read_unlock_bh(&iucv_connection_rwlock);
2067
2068         dev = netiucv_init_netdevice(username);
2069         if (!dev) {
2070                 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
2071                 return -ENODEV;
2072         }
2073
2074         rc = netiucv_register_device(dev);
2075         if (rc) {
2076                 IUCV_DBF_TEXT_(setup, 2,
2077                         "ret %d from netiucv_register_device\n", rc);
2078                 goto out_free_ndev;
2079         }
2080
2081         /* sysfs magic */
2082         priv = netdev_priv(dev);
2083         SET_NETDEV_DEV(dev, priv->dev);
2084
2085         rc = register_netdev(dev);
2086         if (rc)
2087                 goto out_unreg;
2088
2089         dev_info(priv->dev, "The IUCV interface to %s has been"
2090                 " established successfully\n", netiucv_printname(username));
2091
2092         return count;
2093
2094 out_unreg:
2095         netiucv_unregister_device(priv->dev);
2096 out_free_ndev:
2097         netiucv_free_netdevice(dev);
2098         return rc;
2099 }
2100
2101 static DRIVER_ATTR(connection, 0200, NULL, conn_write);
2102
2103 static ssize_t remove_write (struct device_driver *drv,
2104                              const char *buf, size_t count)
2105 {
2106         struct iucv_connection *cp;
2107         struct net_device *ndev;
2108         struct netiucv_priv *priv;
2109         struct device *dev;
2110         char name[IFNAMSIZ];
2111         const char *p;
2112         int i;
2113
2114         IUCV_DBF_TEXT(trace, 3, __func__);
2115
2116         if (count >= IFNAMSIZ)
2117                 count = IFNAMSIZ - 1;;
2118
2119         for (i = 0, p = buf; i < count && *p; i++, p++) {
2120                 if (*p == '\n' || *p == ' ')
2121                         /* trailing lf, grr */
2122                         break;
2123                 name[i] = *p;
2124         }
2125         name[i] = '\0';
2126
2127         read_lock_bh(&iucv_connection_rwlock);
2128         list_for_each_entry(cp, &iucv_connection_list, list) {
2129                 ndev = cp->netdev;
2130                 priv = netdev_priv(ndev);
2131                 dev = priv->dev;
2132                 if (strncmp(name, ndev->name, count))
2133                         continue;
2134                 read_unlock_bh(&iucv_connection_rwlock);
2135                 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2136                         dev_warn(dev, "The IUCV device is connected"
2137                                 " to %s and cannot be removed\n",
2138                                 priv->conn->userid);
2139                         IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2140                         return -EPERM;
2141                 }
2142                 unregister_netdev(ndev);
2143                 netiucv_unregister_device(dev);
2144                 return count;
2145         }
2146         read_unlock_bh(&iucv_connection_rwlock);
2147         IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2148         return -EINVAL;
2149 }
2150
2151 static DRIVER_ATTR(remove, 0200, NULL, remove_write);
2152
2153 static struct attribute * netiucv_drv_attrs[] = {
2154         &driver_attr_connection.attr,
2155         &driver_attr_remove.attr,
2156         NULL,
2157 };
2158
2159 static struct attribute_group netiucv_drv_attr_group = {
2160         .attrs = netiucv_drv_attrs,
2161 };
2162
2163 static struct attribute_group *netiucv_drv_attr_groups[] = {
2164         &netiucv_drv_attr_group,
2165         NULL,
2166 };
2167
2168 static void netiucv_banner(void)
2169 {
2170         pr_info("driver initialized\n");
2171 }
2172
2173 static void __exit netiucv_exit(void)
2174 {
2175         struct iucv_connection *cp;
2176         struct net_device *ndev;
2177         struct netiucv_priv *priv;
2178         struct device *dev;
2179
2180         IUCV_DBF_TEXT(trace, 3, __func__);
2181         while (!list_empty(&iucv_connection_list)) {
2182                 cp = list_entry(iucv_connection_list.next,
2183                                 struct iucv_connection, list);
2184                 ndev = cp->netdev;
2185                 priv = netdev_priv(ndev);
2186                 dev = priv->dev;
2187
2188                 unregister_netdev(ndev);
2189                 netiucv_unregister_device(dev);
2190         }
2191
2192         device_unregister(netiucv_dev);
2193         driver_unregister(&netiucv_driver);
2194         iucv_unregister(&netiucv_handler, 1);
2195         iucv_unregister_dbf_views();
2196
2197         pr_info("driver unloaded\n");
2198         return;
2199 }
2200
2201 static int __init netiucv_init(void)
2202 {
2203         int rc;
2204
2205         rc = iucv_register_dbf_views();
2206         if (rc)
2207                 goto out;
2208         rc = iucv_register(&netiucv_handler, 1);
2209         if (rc)
2210                 goto out_dbf;
2211         IUCV_DBF_TEXT(trace, 3, __func__);
2212         netiucv_driver.groups = netiucv_drv_attr_groups;
2213         rc = driver_register(&netiucv_driver);
2214         if (rc) {
2215                 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2216                 goto out_iucv;
2217         }
2218         /* establish dummy device */
2219         netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2220         if (!netiucv_dev) {
2221                 rc = -ENOMEM;
2222                 goto out_driver;
2223         }
2224         dev_set_name(netiucv_dev, "netiucv");
2225         netiucv_dev->bus = &iucv_bus;
2226         netiucv_dev->parent = iucv_root;
2227         netiucv_dev->release = (void (*)(struct device *))kfree;
2228         netiucv_dev->driver = &netiucv_driver;
2229         rc = device_register(netiucv_dev);
2230         if (rc) {
2231                 put_device(netiucv_dev);
2232                 goto out_driver;
2233         }
2234         netiucv_banner();
2235         return rc;
2236
2237 out_driver:
2238         driver_unregister(&netiucv_driver);
2239 out_iucv:
2240         iucv_unregister(&netiucv_handler, 1);
2241 out_dbf:
2242         iucv_unregister_dbf_views();
2243 out:
2244         return rc;
2245 }
2246
2247 module_init(netiucv_init);
2248 module_exit(netiucv_exit);
2249 MODULE_LICENSE("GPL");