Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
[pandora-kernel.git] / net / x25 / x25_link.c
1 /*
2  *      X.25 Packet Layer release 002
3  *
4  *      This is ALPHA test software. This code may break your machine,
5  *      randomly fail to work with new releases, misbehave and/or generally
6  *      screw up. It might even work.
7  *
8  *      This code REQUIRES 2.1.15 or higher
9  *
10  *      This module:
11  *              This module is free software; you can redistribute it and/or
12  *              modify it under the terms of the GNU General Public License
13  *              as published by the Free Software Foundation; either version
14  *              2 of the License, or (at your option) any later version.
15  *
16  *      History
17  *      X.25 001        Jonathan Naylor   Started coding.
18  *      X.25 002        Jonathan Naylor   New timer architecture.
19  *      mar/20/00       Daniela Squassoni Disabling/enabling of facilities
20  *                                        negotiation.
21  *      2000-09-04      Henner Eisen      dev_hold() / dev_put() for x25_neigh.
22  */
23
24 #include <linux/kernel.h>
25 #include <linux/jiffies.h>
26 #include <linux/timer.h>
27 #include <linux/slab.h>
28 #include <linux/netdevice.h>
29 #include <linux/skbuff.h>
30 #include <asm/uaccess.h>
31 #include <linux/init.h>
32 #include <net/x25.h>
33
34 LIST_HEAD(x25_neigh_list);
35 DEFINE_RWLOCK(x25_neigh_list_lock);
36
37 static void x25_t20timer_expiry(unsigned long);
38
39 static void x25_transmit_restart_confirmation(struct x25_neigh *nb);
40 static void x25_transmit_restart_request(struct x25_neigh *nb);
41
42 /*
43  *      Linux set/reset timer routines
44  */
45 static inline void x25_start_t20timer(struct x25_neigh *nb)
46 {
47         mod_timer(&nb->t20timer, jiffies + nb->t20);
48 }
49
50 static void x25_t20timer_expiry(unsigned long param)
51 {
52         struct x25_neigh *nb = (struct x25_neigh *)param;
53
54         x25_transmit_restart_request(nb);
55
56         x25_start_t20timer(nb);
57 }
58
59 static inline void x25_stop_t20timer(struct x25_neigh *nb)
60 {
61         del_timer(&nb->t20timer);
62 }
63
64 static inline int x25_t20timer_pending(struct x25_neigh *nb)
65 {
66         return timer_pending(&nb->t20timer);
67 }
68
69 /*
70  *      This handles all restart and diagnostic frames.
71  */
72 void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
73                       unsigned short frametype)
74 {
75         struct sk_buff *skbn;
76         int confirm;
77
78         switch (frametype) {
79         case X25_RESTART_REQUEST:
80                 confirm = !x25_t20timer_pending(nb);
81                 x25_stop_t20timer(nb);
82                 nb->state = X25_LINK_STATE_3;
83                 if (confirm)
84                         x25_transmit_restart_confirmation(nb);
85                 break;
86
87         case X25_RESTART_CONFIRMATION:
88                 x25_stop_t20timer(nb);
89                 nb->state = X25_LINK_STATE_3;
90                 break;
91
92         case X25_DIAGNOSTIC:
93                 printk(KERN_WARNING "x25: diagnostic #%d - %02X %02X %02X\n",
94                        skb->data[3], skb->data[4],
95                        skb->data[5], skb->data[6]);
96                 break;
97
98         default:
99                 printk(KERN_WARNING "x25: received unknown %02X with LCI 000\n",
100                        frametype);
101                 break;
102         }
103
104         if (nb->state == X25_LINK_STATE_3)
105                 while ((skbn = skb_dequeue(&nb->queue)) != NULL)
106                         x25_send_frame(skbn, nb);
107 }
108
109 /*
110  *      This routine is called when a Restart Request is needed
111  */
112 static void x25_transmit_restart_request(struct x25_neigh *nb)
113 {
114         unsigned char *dptr;
115         int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
116         struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
117
118         if (!skb)
119                 return;
120
121         skb_reserve(skb, X25_MAX_L2_LEN);
122
123         dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
124
125         *dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
126         *dptr++ = 0x00;
127         *dptr++ = X25_RESTART_REQUEST;
128         *dptr++ = 0x00;
129         *dptr++ = 0;
130
131         skb->sk = NULL;
132
133         x25_send_frame(skb, nb);
134 }
135
136 /*
137  * This routine is called when a Restart Confirmation is needed
138  */
139 static void x25_transmit_restart_confirmation(struct x25_neigh *nb)
140 {
141         unsigned char *dptr;
142         int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN;
143         struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
144
145         if (!skb)
146                 return;
147
148         skb_reserve(skb, X25_MAX_L2_LEN);
149
150         dptr = skb_put(skb, X25_STD_MIN_LEN);
151
152         *dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
153         *dptr++ = 0x00;
154         *dptr++ = X25_RESTART_CONFIRMATION;
155
156         skb->sk = NULL;
157
158         x25_send_frame(skb, nb);
159 }
160
161 /*
162  *      This routine is called when a Clear Request is needed outside of the context
163  *      of a connected socket.
164  */
165 void x25_transmit_clear_request(struct x25_neigh *nb, unsigned int lci,
166                                 unsigned char cause)
167 {
168         unsigned char *dptr;
169         int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
170         struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
171
172         if (!skb)
173                 return;
174
175         skb_reserve(skb, X25_MAX_L2_LEN);
176
177         dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
178
179         *dptr++ = ((lci >> 8) & 0x0F) | (nb->extended ?
180                                          X25_GFI_EXTSEQ :
181                                          X25_GFI_STDSEQ);
182         *dptr++ = (lci >> 0) & 0xFF;
183         *dptr++ = X25_CLEAR_REQUEST;
184         *dptr++ = cause;
185         *dptr++ = 0x00;
186
187         skb->sk = NULL;
188
189         x25_send_frame(skb, nb);
190 }
191
192 void x25_transmit_link(struct sk_buff *skb, struct x25_neigh *nb)
193 {
194         switch (nb->state) {
195         case X25_LINK_STATE_0:
196                 skb_queue_tail(&nb->queue, skb);
197                 nb->state = X25_LINK_STATE_1;
198                 x25_establish_link(nb);
199                 break;
200         case X25_LINK_STATE_1:
201         case X25_LINK_STATE_2:
202                 skb_queue_tail(&nb->queue, skb);
203                 break;
204         case X25_LINK_STATE_3:
205                 x25_send_frame(skb, nb);
206                 break;
207         }
208 }
209
210 /*
211  *      Called when the link layer has become established.
212  */
213 void x25_link_established(struct x25_neigh *nb)
214 {
215         switch (nb->state) {
216         case X25_LINK_STATE_0:
217                 nb->state = X25_LINK_STATE_2;
218                 break;
219         case X25_LINK_STATE_1:
220                 x25_transmit_restart_request(nb);
221                 nb->state = X25_LINK_STATE_2;
222                 x25_start_t20timer(nb);
223                 break;
224         }
225 }
226
227 /*
228  *      Called when the link layer has terminated, or an establishment
229  *      request has failed.
230  */
231
232 void x25_link_terminated(struct x25_neigh *nb)
233 {
234         nb->state = X25_LINK_STATE_0;
235         /* Out of order: clear existing virtual calls (X.25 03/93 4.6.3) */
236         x25_kill_by_neigh(nb);
237 }
238
239 /*
240  *      Add a new device.
241  */
242 void x25_link_device_up(struct net_device *dev)
243 {
244         struct x25_neigh *nb = kmalloc(sizeof(*nb), GFP_ATOMIC);
245
246         if (!nb)
247                 return;
248
249         skb_queue_head_init(&nb->queue);
250         setup_timer(&nb->t20timer, x25_t20timer_expiry, (unsigned long)nb);
251
252         dev_hold(dev);
253         nb->dev      = dev;
254         nb->state    = X25_LINK_STATE_0;
255         nb->extended = 0;
256         /*
257          * Enables negotiation
258          */
259         nb->global_facil_mask = X25_MASK_REVERSE |
260                                        X25_MASK_THROUGHPUT |
261                                        X25_MASK_PACKET_SIZE |
262                                        X25_MASK_WINDOW_SIZE;
263         nb->t20      = sysctl_x25_restart_request_timeout;
264         atomic_set(&nb->refcnt, 1);
265
266         write_lock_bh(&x25_neigh_list_lock);
267         list_add(&nb->node, &x25_neigh_list);
268         write_unlock_bh(&x25_neigh_list_lock);
269 }
270
271 /**
272  *      __x25_remove_neigh - remove neighbour from x25_neigh_list
273  *      @nb - neigh to remove
274  *
275  *      Remove neighbour from x25_neigh_list. If it was there.
276  *      Caller must hold x25_neigh_list_lock.
277  */
278 static void __x25_remove_neigh(struct x25_neigh *nb)
279 {
280         skb_queue_purge(&nb->queue);
281         x25_stop_t20timer(nb);
282
283         if (nb->node.next) {
284                 list_del(&nb->node);
285                 x25_neigh_put(nb);
286         }
287 }
288
289 /*
290  *      A device has been removed, remove its links.
291  */
292 void x25_link_device_down(struct net_device *dev)
293 {
294         struct x25_neigh *nb;
295         struct list_head *entry, *tmp;
296
297         write_lock_bh(&x25_neigh_list_lock);
298
299         list_for_each_safe(entry, tmp, &x25_neigh_list) {
300                 nb = list_entry(entry, struct x25_neigh, node);
301
302                 if (nb->dev == dev) {
303                         __x25_remove_neigh(nb);
304                         dev_put(dev);
305                 }
306         }
307
308         write_unlock_bh(&x25_neigh_list_lock);
309 }
310
311 /*
312  *      Given a device, return the neighbour address.
313  */
314 struct x25_neigh *x25_get_neigh(struct net_device *dev)
315 {
316         struct x25_neigh *nb, *use = NULL;
317         struct list_head *entry;
318
319         read_lock_bh(&x25_neigh_list_lock);
320         list_for_each(entry, &x25_neigh_list) {
321                 nb = list_entry(entry, struct x25_neigh, node);
322
323                 if (nb->dev == dev) {
324                         use = nb;
325                         break;
326                 }
327         }
328
329         if (use)
330                 x25_neigh_hold(use);
331         read_unlock_bh(&x25_neigh_list_lock);
332         return use;
333 }
334
335 /*
336  *      Handle the ioctls that control the subscription functions.
337  */
338 int x25_subscr_ioctl(unsigned int cmd, void __user *arg)
339 {
340         struct x25_subscrip_struct x25_subscr;
341         struct x25_neigh *nb;
342         struct net_device *dev;
343         int rc = -EINVAL;
344
345         if (cmd != SIOCX25GSUBSCRIP && cmd != SIOCX25SSUBSCRIP)
346                 goto out;
347
348         rc = -EFAULT;
349         if (copy_from_user(&x25_subscr, arg, sizeof(x25_subscr)))
350                 goto out;
351
352         rc = -EINVAL;
353         if ((dev = x25_dev_get(x25_subscr.device)) == NULL)
354                 goto out;
355
356         if ((nb = x25_get_neigh(dev)) == NULL)
357                 goto out_dev_put;
358
359         dev_put(dev);
360
361         if (cmd == SIOCX25GSUBSCRIP) {
362                 read_lock_bh(&x25_neigh_list_lock);
363                 x25_subscr.extended          = nb->extended;
364                 x25_subscr.global_facil_mask = nb->global_facil_mask;
365                 read_unlock_bh(&x25_neigh_list_lock);
366                 rc = copy_to_user(arg, &x25_subscr,
367                                   sizeof(x25_subscr)) ? -EFAULT : 0;
368         } else {
369                 rc = -EINVAL;
370                 if (!(x25_subscr.extended && x25_subscr.extended != 1)) {
371                         rc = 0;
372                         write_lock_bh(&x25_neigh_list_lock);
373                         nb->extended         = x25_subscr.extended;
374                         nb->global_facil_mask = x25_subscr.global_facil_mask;
375                         write_unlock_bh(&x25_neigh_list_lock);
376                 }
377         }
378         x25_neigh_put(nb);
379 out:
380         return rc;
381 out_dev_put:
382         dev_put(dev);
383         goto out;
384 }
385
386
387 /*
388  *      Release all memory associated with X.25 neighbour structures.
389  */
390 void __exit x25_link_free(void)
391 {
392         struct x25_neigh *nb;
393         struct list_head *entry, *tmp;
394
395         write_lock_bh(&x25_neigh_list_lock);
396
397         list_for_each_safe(entry, tmp, &x25_neigh_list) {
398                 struct net_device *dev;
399
400                 nb = list_entry(entry, struct x25_neigh, node);
401                 dev = nb->dev;
402                 __x25_remove_neigh(nb);
403                 dev_put(dev);
404         }
405         write_unlock_bh(&x25_neigh_list_lock);
406 }