net/mlx4_en: Fix mixed PFC and Global pause user control requests
[pandora-kernel.git] / net / x25 / x25_forward.c
1 /*
2  *      This module:
3  *              This module is free software; you can redistribute it and/or
4  *              modify it under the terms of the GNU General Public License
5  *              as published by the Free Software Foundation; either version
6  *              2 of the License, or (at your option) any later version.
7  *
8  *      History
9  *      03-01-2007      Added forwarding for x.25       Andrew Hendry
10  */
11 #include <linux/if_arp.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <net/x25.h>
15
16 LIST_HEAD(x25_forward_list);
17 DEFINE_RWLOCK(x25_forward_list_lock);
18
19 int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
20                         struct sk_buff *skb, int lci)
21 {
22         struct x25_route *rt;
23         struct x25_neigh *neigh_new = NULL;
24         struct list_head *entry;
25         struct x25_forward *x25_frwd, *new_frwd;
26         struct sk_buff *skbn;
27         short same_lci = 0;
28         int rc = 0;
29
30         if ((rt = x25_get_route(dest_addr)) == NULL)
31                 goto out_no_route;
32
33         if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
34                 /* This shouldn't happen, if it occurs somehow
35                  * do something sensible
36                  */
37                 goto out_put_route;
38         }
39
40         /* Avoid a loop. This is the normal exit path for a
41          * system with only one x.25 iface and default route
42          */
43         if (rt->dev == from->dev) {
44                 goto out_put_nb;
45         }
46
47         /* Remote end sending a call request on an already
48          * established LCI? It shouldn't happen, just in case..
49          */
50         read_lock_bh(&x25_forward_list_lock);
51         list_for_each(entry, &x25_forward_list) {
52                 x25_frwd = list_entry(entry, struct x25_forward, node);
53                 if (x25_frwd->lci == lci) {
54                         printk(KERN_WARNING "X.25: call request for lci which is already registered!, transmitting but not registering new pair\n");
55                         same_lci = 1;
56                 }
57         }
58         read_unlock_bh(&x25_forward_list_lock);
59
60         /* Save the forwarding details for future traffic */
61         if (!same_lci){
62                 if ((new_frwd = kmalloc(sizeof(struct x25_forward),
63                                                 GFP_ATOMIC)) == NULL){
64                         rc = -ENOMEM;
65                         goto out_put_nb;
66                 }
67                 new_frwd->lci = lci;
68                 new_frwd->dev1 = rt->dev;
69                 new_frwd->dev2 = from->dev;
70                 write_lock_bh(&x25_forward_list_lock);
71                 list_add(&new_frwd->node, &x25_forward_list);
72                 write_unlock_bh(&x25_forward_list_lock);
73         }
74
75         /* Forward the call request */
76         if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
77                 goto out_put_nb;
78         }
79         x25_transmit_link(skbn, neigh_new);
80         rc = 1;
81
82
83 out_put_nb:
84         x25_neigh_put(neigh_new);
85
86 out_put_route:
87         x25_route_put(rt);
88
89 out_no_route:
90         return rc;
91 }
92
93
94 int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) {
95
96         struct x25_forward *frwd;
97         struct list_head *entry;
98         struct net_device *peer = NULL;
99         struct x25_neigh *nb;
100         struct sk_buff *skbn;
101         int rc = 0;
102
103         read_lock_bh(&x25_forward_list_lock);
104         list_for_each(entry, &x25_forward_list) {
105                 frwd = list_entry(entry, struct x25_forward, node);
106                 if (frwd->lci == lci) {
107                         /* The call is established, either side can send */
108                         if (from->dev == frwd->dev1) {
109                                 peer = frwd->dev2;
110                         } else {
111                                 peer = frwd->dev1;
112                         }
113                         break;
114                 }
115         }
116         read_unlock_bh(&x25_forward_list_lock);
117
118         if ( (nb = x25_get_neigh(peer)) == NULL)
119                 goto out;
120
121         if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){
122                 goto output;
123
124         }
125         x25_transmit_link(skbn, nb);
126
127         rc = 1;
128 output:
129         x25_neigh_put(nb);
130 out:
131         return rc;
132 }
133
134 void x25_clear_forward_by_lci(unsigned int lci)
135 {
136         struct x25_forward *fwd;
137         struct list_head *entry, *tmp;
138
139         write_lock_bh(&x25_forward_list_lock);
140
141         list_for_each_safe(entry, tmp, &x25_forward_list) {
142                 fwd = list_entry(entry, struct x25_forward, node);
143                 if (fwd->lci == lci) {
144                         list_del(&fwd->node);
145                         kfree(fwd);
146                 }
147         }
148         write_unlock_bh(&x25_forward_list_lock);
149 }
150
151
152 void x25_clear_forward_by_dev(struct net_device *dev)
153 {
154         struct x25_forward *fwd;
155         struct list_head *entry, *tmp;
156
157         write_lock_bh(&x25_forward_list_lock);
158
159         list_for_each_safe(entry, tmp, &x25_forward_list) {
160                 fwd = list_entry(entry, struct x25_forward, node);
161                 if ((fwd->dev1 == dev) || (fwd->dev2 == dev)){
162                         list_del(&fwd->node);
163                         kfree(fwd);
164                 }
165         }
166         write_unlock_bh(&x25_forward_list_lock);
167 }