net/mlx4_en: Fix mixed PFC and Global pause user control requests
[pandora-kernel.git] / net / x25 / x25_link.c
1 /*
2  *      X.25 Packet Layer release 002
3  *
4  *      This is ALPHA test software. This code may break your machine,
5  *      randomly fail to work with new releases, misbehave and/or generally
6  *      screw up. It might even work.
7  *
8  *      This code REQUIRES 2.1.15 or higher
9  *
10  *      This module:
11  *              This module is free software; you can redistribute it and/or
12  *              modify it under the terms of the GNU General Public License
13  *              as published by the Free Software Foundation; either version
14  *              2 of the License, or (at your option) any later version.
15  *
16  *      History
17  *      X.25 001        Jonathan Naylor   Started coding.
18  *      X.25 002        Jonathan Naylor   New timer architecture.
19  *      mar/20/00       Daniela Squassoni Disabling/enabling of facilities
20  *                                        negotiation.
21  *      2000-09-04      Henner Eisen      dev_hold() / dev_put() for x25_neigh.
22  */
23
24 #include <linux/kernel.h>
25 #include <linux/jiffies.h>
26 #include <linux/timer.h>
27 #include <linux/slab.h>
28 #include <linux/netdevice.h>
29 #include <linux/skbuff.h>
30 #include <asm/uaccess.h>
31 #include <linux/init.h>
32 #include <net/x25.h>
33
34 LIST_HEAD(x25_neigh_list);
35 DEFINE_RWLOCK(x25_neigh_list_lock);
36
37 static void x25_t20timer_expiry(unsigned long);
38
39 static void x25_transmit_restart_confirmation(struct x25_neigh *nb);
40 static void x25_transmit_restart_request(struct x25_neigh *nb);
41
42 /*
43  *      Linux set/reset timer routines
44  */
45 static inline void x25_start_t20timer(struct x25_neigh *nb)
46 {
47         mod_timer(&nb->t20timer, jiffies + nb->t20);
48 }
49
50 static void x25_t20timer_expiry(unsigned long param)
51 {
52         struct x25_neigh *nb = (struct x25_neigh *)param;
53
54         x25_transmit_restart_request(nb);
55
56         x25_start_t20timer(nb);
57 }
58
59 static inline void x25_stop_t20timer(struct x25_neigh *nb)
60 {
61         del_timer(&nb->t20timer);
62 }
63
64 static inline int x25_t20timer_pending(struct x25_neigh *nb)
65 {
66         return timer_pending(&nb->t20timer);
67 }
68
69 /*
70  *      This handles all restart and diagnostic frames.
71  */
72 void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
73                       unsigned short frametype)
74 {
75         struct sk_buff *skbn;
76         int confirm;
77
78         switch (frametype) {
79         case X25_RESTART_REQUEST:
80                 confirm = !x25_t20timer_pending(nb);
81                 x25_stop_t20timer(nb);
82                 nb->state = X25_LINK_STATE_3;
83                 if (confirm)
84                         x25_transmit_restart_confirmation(nb);
85                 break;
86
87         case X25_RESTART_CONFIRMATION:
88                 x25_stop_t20timer(nb);
89                 nb->state = X25_LINK_STATE_3;
90                 break;
91
92         case X25_DIAGNOSTIC:
93                 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
94                         break;
95
96                 printk(KERN_WARNING "x25: diagnostic #%d - %02X %02X %02X\n",
97                        skb->data[3], skb->data[4],
98                        skb->data[5], skb->data[6]);
99                 break;
100
101         default:
102                 printk(KERN_WARNING "x25: received unknown %02X with LCI 000\n",
103                        frametype);
104                 break;
105         }
106
107         if (nb->state == X25_LINK_STATE_3)
108                 while ((skbn = skb_dequeue(&nb->queue)) != NULL)
109                         x25_send_frame(skbn, nb);
110 }
111
112 /*
113  *      This routine is called when a Restart Request is needed
114  */
115 static void x25_transmit_restart_request(struct x25_neigh *nb)
116 {
117         unsigned char *dptr;
118         int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
119         struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
120
121         if (!skb)
122                 return;
123
124         skb_reserve(skb, X25_MAX_L2_LEN);
125
126         dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
127
128         *dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
129         *dptr++ = 0x00;
130         *dptr++ = X25_RESTART_REQUEST;
131         *dptr++ = 0x00;
132         *dptr++ = 0;
133
134         skb->sk = NULL;
135
136         x25_send_frame(skb, nb);
137 }
138
139 /*
140  * This routine is called when a Restart Confirmation is needed
141  */
142 static void x25_transmit_restart_confirmation(struct x25_neigh *nb)
143 {
144         unsigned char *dptr;
145         int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN;
146         struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
147
148         if (!skb)
149                 return;
150
151         skb_reserve(skb, X25_MAX_L2_LEN);
152
153         dptr = skb_put(skb, X25_STD_MIN_LEN);
154
155         *dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
156         *dptr++ = 0x00;
157         *dptr++ = X25_RESTART_CONFIRMATION;
158
159         skb->sk = NULL;
160
161         x25_send_frame(skb, nb);
162 }
163
164 /*
165  *      This routine is called when a Clear Request is needed outside of the context
166  *      of a connected socket.
167  */
168 void x25_transmit_clear_request(struct x25_neigh *nb, unsigned int lci,
169                                 unsigned char cause)
170 {
171         unsigned char *dptr;
172         int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
173         struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
174
175         if (!skb)
176                 return;
177
178         skb_reserve(skb, X25_MAX_L2_LEN);
179
180         dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
181
182         *dptr++ = ((lci >> 8) & 0x0F) | (nb->extended ?
183                                          X25_GFI_EXTSEQ :
184                                          X25_GFI_STDSEQ);
185         *dptr++ = (lci >> 0) & 0xFF;
186         *dptr++ = X25_CLEAR_REQUEST;
187         *dptr++ = cause;
188         *dptr++ = 0x00;
189
190         skb->sk = NULL;
191
192         x25_send_frame(skb, nb);
193 }
194
195 void x25_transmit_link(struct sk_buff *skb, struct x25_neigh *nb)
196 {
197         switch (nb->state) {
198         case X25_LINK_STATE_0:
199                 skb_queue_tail(&nb->queue, skb);
200                 nb->state = X25_LINK_STATE_1;
201                 x25_establish_link(nb);
202                 break;
203         case X25_LINK_STATE_1:
204         case X25_LINK_STATE_2:
205                 skb_queue_tail(&nb->queue, skb);
206                 break;
207         case X25_LINK_STATE_3:
208                 x25_send_frame(skb, nb);
209                 break;
210         }
211 }
212
213 /*
214  *      Called when the link layer has become established.
215  */
216 void x25_link_established(struct x25_neigh *nb)
217 {
218         switch (nb->state) {
219         case X25_LINK_STATE_0:
220                 nb->state = X25_LINK_STATE_2;
221                 break;
222         case X25_LINK_STATE_1:
223                 x25_transmit_restart_request(nb);
224                 nb->state = X25_LINK_STATE_2;
225                 x25_start_t20timer(nb);
226                 break;
227         }
228 }
229
230 /*
231  *      Called when the link layer has terminated, or an establishment
232  *      request has failed.
233  */
234
235 void x25_link_terminated(struct x25_neigh *nb)
236 {
237         nb->state = X25_LINK_STATE_0;
238         /* Out of order: clear existing virtual calls (X.25 03/93 4.6.3) */
239         x25_kill_by_neigh(nb);
240 }
241
242 /*
243  *      Add a new device.
244  */
245 void x25_link_device_up(struct net_device *dev)
246 {
247         struct x25_neigh *nb = kmalloc(sizeof(*nb), GFP_ATOMIC);
248
249         if (!nb)
250                 return;
251
252         skb_queue_head_init(&nb->queue);
253         setup_timer(&nb->t20timer, x25_t20timer_expiry, (unsigned long)nb);
254
255         dev_hold(dev);
256         nb->dev      = dev;
257         nb->state    = X25_LINK_STATE_0;
258         nb->extended = 0;
259         /*
260          * Enables negotiation
261          */
262         nb->global_facil_mask = X25_MASK_REVERSE |
263                                        X25_MASK_THROUGHPUT |
264                                        X25_MASK_PACKET_SIZE |
265                                        X25_MASK_WINDOW_SIZE;
266         nb->t20      = sysctl_x25_restart_request_timeout;
267         atomic_set(&nb->refcnt, 1);
268
269         write_lock_bh(&x25_neigh_list_lock);
270         list_add(&nb->node, &x25_neigh_list);
271         write_unlock_bh(&x25_neigh_list_lock);
272 }
273
274 /**
275  *      __x25_remove_neigh - remove neighbour from x25_neigh_list
276  *      @nb - neigh to remove
277  *
278  *      Remove neighbour from x25_neigh_list. If it was there.
279  *      Caller must hold x25_neigh_list_lock.
280  */
281 static void __x25_remove_neigh(struct x25_neigh *nb)
282 {
283         skb_queue_purge(&nb->queue);
284         x25_stop_t20timer(nb);
285
286         if (nb->node.next) {
287                 list_del(&nb->node);
288                 x25_neigh_put(nb);
289         }
290 }
291
292 /*
293  *      A device has been removed, remove its links.
294  */
295 void x25_link_device_down(struct net_device *dev)
296 {
297         struct x25_neigh *nb;
298         struct list_head *entry, *tmp;
299
300         write_lock_bh(&x25_neigh_list_lock);
301
302         list_for_each_safe(entry, tmp, &x25_neigh_list) {
303                 nb = list_entry(entry, struct x25_neigh, node);
304
305                 if (nb->dev == dev) {
306                         __x25_remove_neigh(nb);
307                         dev_put(dev);
308                 }
309         }
310
311         write_unlock_bh(&x25_neigh_list_lock);
312 }
313
314 /*
315  *      Given a device, return the neighbour address.
316  */
317 struct x25_neigh *x25_get_neigh(struct net_device *dev)
318 {
319         struct x25_neigh *nb, *use = NULL;
320         struct list_head *entry;
321
322         read_lock_bh(&x25_neigh_list_lock);
323         list_for_each(entry, &x25_neigh_list) {
324                 nb = list_entry(entry, struct x25_neigh, node);
325
326                 if (nb->dev == dev) {
327                         use = nb;
328                         break;
329                 }
330         }
331
332         if (use)
333                 x25_neigh_hold(use);
334         read_unlock_bh(&x25_neigh_list_lock);
335         return use;
336 }
337
338 /*
339  *      Handle the ioctls that control the subscription functions.
340  */
341 int x25_subscr_ioctl(unsigned int cmd, void __user *arg)
342 {
343         struct x25_subscrip_struct x25_subscr;
344         struct x25_neigh *nb;
345         struct net_device *dev;
346         int rc = -EINVAL;
347
348         if (cmd != SIOCX25GSUBSCRIP && cmd != SIOCX25SSUBSCRIP)
349                 goto out;
350
351         rc = -EFAULT;
352         if (copy_from_user(&x25_subscr, arg, sizeof(x25_subscr)))
353                 goto out;
354
355         rc = -EINVAL;
356         if ((dev = x25_dev_get(x25_subscr.device)) == NULL)
357                 goto out;
358
359         if ((nb = x25_get_neigh(dev)) == NULL)
360                 goto out_dev_put;
361
362         dev_put(dev);
363
364         if (cmd == SIOCX25GSUBSCRIP) {
365                 read_lock_bh(&x25_neigh_list_lock);
366                 x25_subscr.extended          = nb->extended;
367                 x25_subscr.global_facil_mask = nb->global_facil_mask;
368                 read_unlock_bh(&x25_neigh_list_lock);
369                 rc = copy_to_user(arg, &x25_subscr,
370                                   sizeof(x25_subscr)) ? -EFAULT : 0;
371         } else {
372                 rc = -EINVAL;
373                 if (!(x25_subscr.extended && x25_subscr.extended != 1)) {
374                         rc = 0;
375                         write_lock_bh(&x25_neigh_list_lock);
376                         nb->extended         = x25_subscr.extended;
377                         nb->global_facil_mask = x25_subscr.global_facil_mask;
378                         write_unlock_bh(&x25_neigh_list_lock);
379                 }
380         }
381         x25_neigh_put(nb);
382 out:
383         return rc;
384 out_dev_put:
385         dev_put(dev);
386         goto out;
387 }
388
389
390 /*
391  *      Release all memory associated with X.25 neighbour structures.
392  */
393 void __exit x25_link_free(void)
394 {
395         struct x25_neigh *nb;
396         struct list_head *entry, *tmp;
397
398         write_lock_bh(&x25_neigh_list_lock);
399
400         list_for_each_safe(entry, tmp, &x25_neigh_list) {
401                 struct net_device *dev;
402
403                 nb = list_entry(entry, struct x25_neigh, node);
404                 dev = nb->dev;
405                 __x25_remove_neigh(nb);
406                 dev_put(dev);
407         }
408         write_unlock_bh(&x25_neigh_list_lock);
409 }