cs5535-mfgpt: don't call __init function from __devinit
[pandora-kernel.git] / net / tipc / bcast.c
1 /*
2  * net/tipc/bcast.c: TIPC broadcast code
3  *
4  * Copyright (c) 2004-2006, Ericsson AB
5  * Copyright (c) 2004, Intel Corporation.
6  * Copyright (c) 2005, 2010-2011, Wind River Systems
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the names of the copyright holders nor the names of its
18  *    contributors may be used to endorse or promote products derived from
19  *    this software without specific prior written permission.
20  *
21  * Alternatively, this software may be distributed under the terms of the
22  * GNU General Public License ("GPL") version 2 as published by the Free
23  * Software Foundation.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37
38 #include "core.h"
39 #include "link.h"
40 #include "port.h"
41 #include "bcast.h"
42 #include "name_distr.h"
43
44 #define MAX_PKT_DEFAULT_MCAST 1500      /* bcast link max packet size (fixed) */
45
46 #define BCLINK_WIN_DEFAULT 20           /* bcast link window size (default) */
47
48 /**
49  * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
50  * @primary: pointer to primary bearer
51  * @secondary: pointer to secondary bearer
52  *
53  * Bearers must have same priority and same set of reachable destinations
54  * to be paired.
55  */
56
57 struct tipc_bcbearer_pair {
58         struct tipc_bearer *primary;
59         struct tipc_bearer *secondary;
60 };
61
62 /**
63  * struct tipc_bcbearer - bearer used by broadcast link
64  * @bearer: (non-standard) broadcast bearer structure
65  * @media: (non-standard) broadcast media structure
66  * @bpairs: array of bearer pairs
67  * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
68  * @remains: temporary node map used by tipc_bcbearer_send()
69  * @remains_new: temporary node map used tipc_bcbearer_send()
70  *
71  * Note: The fields labelled "temporary" are incorporated into the bearer
72  * to avoid consuming potentially limited stack space through the use of
73  * large local variables within multicast routines.  Concurrent access is
74  * prevented through use of the spinlock "bc_lock".
75  */
76
77 struct tipc_bcbearer {
78         struct tipc_bearer bearer;
79         struct tipc_media media;
80         struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
81         struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
82         struct tipc_node_map remains;
83         struct tipc_node_map remains_new;
84 };
85
86 /**
87  * struct tipc_bclink - link used for broadcast messages
88  * @link: (non-standard) broadcast link structure
89  * @node: (non-standard) node structure representing b'cast link's peer node
90  * @bcast_nodes: map of broadcast-capable nodes
91  * @retransmit_to: node that most recently requested a retransmit
92  *
93  * Handles sequence numbering, fragmentation, bundling, etc.
94  */
95
96 struct tipc_bclink {
97         struct tipc_link link;
98         struct tipc_node node;
99         struct tipc_node_map bcast_nodes;
100         struct tipc_node *retransmit_to;
101 };
102
103 static struct tipc_bcbearer bcast_bearer;
104 static struct tipc_bclink bcast_link;
105
106 static struct tipc_bcbearer *bcbearer = &bcast_bearer;
107 static struct tipc_bclink *bclink = &bcast_link;
108 static struct tipc_link *bcl = &bcast_link.link;
109
110 static DEFINE_SPINLOCK(bc_lock);
111
112 const char tipc_bclink_name[] = "broadcast-link";
113
114 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
115                            struct tipc_node_map *nm_b,
116                            struct tipc_node_map *nm_diff);
117
118 static u32 bcbuf_acks(struct sk_buff *buf)
119 {
120         return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
121 }
122
123 static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
124 {
125         TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
126 }
127
128 static void bcbuf_decr_acks(struct sk_buff *buf)
129 {
130         bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
131 }
132
133 void tipc_bclink_add_node(u32 addr)
134 {
135         spin_lock_bh(&bc_lock);
136         tipc_nmap_add(&bclink->bcast_nodes, addr);
137         spin_unlock_bh(&bc_lock);
138 }
139
140 void tipc_bclink_remove_node(u32 addr)
141 {
142         spin_lock_bh(&bc_lock);
143         tipc_nmap_remove(&bclink->bcast_nodes, addr);
144         spin_unlock_bh(&bc_lock);
145 }
146
147 static void bclink_set_last_sent(void)
148 {
149         if (bcl->next_out)
150                 bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1);
151         else
152                 bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
153 }
154
155 u32 tipc_bclink_get_last_sent(void)
156 {
157         return bcl->fsm_msg_cnt;
158 }
159
160 /**
161  * bclink_set_gap - set gap according to contents of current deferred pkt queue
162  *
163  * Called with 'node' locked, bc_lock unlocked
164  */
165
166 static void bclink_set_gap(struct tipc_node *n_ptr)
167 {
168         struct sk_buff *buf = n_ptr->bclink.deferred_head;
169
170         n_ptr->bclink.gap_after = n_ptr->bclink.gap_to =
171                 mod(n_ptr->bclink.last_in);
172         if (unlikely(buf != NULL))
173                 n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1);
174 }
175
176 /**
177  * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
178  *
179  * This mechanism endeavours to prevent all nodes in network from trying
180  * to ACK or NACK at the same time.
181  *
182  * Note: TIPC uses a different trigger to distribute ACKs than it does to
183  *       distribute NACKs, but tries to use the same spacing (divide by 16).
184  */
185
186 static int bclink_ack_allowed(u32 n)
187 {
188         return (n % TIPC_MIN_LINK_WIN) == tipc_own_tag;
189 }
190
191
192 /**
193  * tipc_bclink_retransmit_to - get most recent node to request retransmission
194  *
195  * Called with bc_lock locked
196  */
197
198 struct tipc_node *tipc_bclink_retransmit_to(void)
199 {
200         return bclink->retransmit_to;
201 }
202
203 /**
204  * bclink_retransmit_pkt - retransmit broadcast packets
205  * @after: sequence number of last packet to *not* retransmit
206  * @to: sequence number of last packet to retransmit
207  *
208  * Called with bc_lock locked
209  */
210
211 static void bclink_retransmit_pkt(u32 after, u32 to)
212 {
213         struct sk_buff *buf;
214
215         buf = bcl->first_out;
216         while (buf && less_eq(buf_seqno(buf), after))
217                 buf = buf->next;
218         tipc_link_retransmit(bcl, buf, mod(to - after));
219 }
220
221 /**
222  * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
223  * @n_ptr: node that sent acknowledgement info
224  * @acked: broadcast sequence # that has been acknowledged
225  *
226  * Node is locked, bc_lock unlocked.
227  */
228
229 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
230 {
231         struct sk_buff *crs;
232         struct sk_buff *next;
233         unsigned int released = 0;
234
235         spin_lock_bh(&bc_lock);
236
237         /* Bail out if tx queue is empty (no clean up is required) */
238         crs = bcl->first_out;
239         if (!crs)
240                 goto exit;
241
242         /* Determine which messages need to be acknowledged */
243         if (acked == INVALID_LINK_SEQ) {
244                 /*
245                  * Contact with specified node has been lost, so need to
246                  * acknowledge sent messages only (if other nodes still exist)
247                  * or both sent and unsent messages (otherwise)
248                  */
249                 if (bclink->bcast_nodes.count)
250                         acked = bcl->fsm_msg_cnt;
251                 else
252                         acked = bcl->next_out_no;
253         } else {
254                 /*
255                  * Bail out if specified sequence number does not correspond
256                  * to a message that has been sent and not yet acknowledged
257                  */
258                 if (less(acked, buf_seqno(crs)) ||
259                     less(bcl->fsm_msg_cnt, acked) ||
260                     less_eq(acked, n_ptr->bclink.acked))
261                         goto exit;
262         }
263
264         /* Skip over packets that node has previously acknowledged */
265         while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked))
266                 crs = crs->next;
267
268         /* Update packets that node is now acknowledging */
269
270         while (crs && less_eq(buf_seqno(crs), acked)) {
271                 next = crs->next;
272
273                 if (crs != bcl->next_out)
274                         bcbuf_decr_acks(crs);
275                 else {
276                         bcbuf_set_acks(crs, 0);
277                         bcl->next_out = next;
278                         bclink_set_last_sent();
279                 }
280
281                 if (bcbuf_acks(crs) == 0) {
282                         bcl->first_out = next;
283                         bcl->out_queue_size--;
284                         buf_discard(crs);
285                         released = 1;
286                 }
287                 crs = next;
288         }
289         n_ptr->bclink.acked = acked;
290
291         /* Try resolving broadcast link congestion, if necessary */
292
293         if (unlikely(bcl->next_out)) {
294                 tipc_link_push_queue(bcl);
295                 bclink_set_last_sent();
296         }
297         if (unlikely(released && !list_empty(&bcl->waiting_ports)))
298                 tipc_link_wakeup_ports(bcl, 0);
299 exit:
300         spin_unlock_bh(&bc_lock);
301 }
302
303 /**
304  * bclink_send_ack - unicast an ACK msg
305  *
306  * tipc_net_lock and node lock set
307  */
308
309 static void bclink_send_ack(struct tipc_node *n_ptr)
310 {
311         struct tipc_link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
312
313         if (l_ptr != NULL)
314                 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
315 }
316
317 /**
318  * bclink_send_nack- broadcast a NACK msg
319  *
320  * tipc_net_lock and node lock set
321  */
322
323 static void bclink_send_nack(struct tipc_node *n_ptr)
324 {
325         struct sk_buff *buf;
326         struct tipc_msg *msg;
327
328         if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to))
329                 return;
330
331         buf = tipc_buf_acquire(INT_H_SIZE);
332         if (buf) {
333                 msg = buf_msg(buf);
334                 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
335                          INT_H_SIZE, n_ptr->addr);
336                 msg_set_non_seq(msg, 1);
337                 msg_set_mc_netid(msg, tipc_net_id);
338                 msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
339                 msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
340                 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
341                 msg_set_bcast_tag(msg, tipc_own_tag);
342
343                 tipc_bearer_send(&bcbearer->bearer, buf, NULL);
344                 bcl->stats.sent_nacks++;
345                 buf_discard(buf);
346
347                 /*
348                  * Ensure we doesn't send another NACK msg to the node
349                  * until 16 more deferred messages arrive from it
350                  * (i.e. helps prevent all nodes from NACK'ing at same time)
351                  */
352
353                 n_ptr->bclink.nack_sync = tipc_own_tag;
354         }
355 }
356
357 /**
358  * tipc_bclink_check_gap - send a NACK if a sequence gap exists
359  *
360  * tipc_net_lock and node lock set
361  */
362
363 void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 last_sent)
364 {
365         if (!n_ptr->bclink.supported ||
366             less_eq(last_sent, mod(n_ptr->bclink.last_in)))
367                 return;
368
369         bclink_set_gap(n_ptr);
370         if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to)
371                 n_ptr->bclink.gap_to = last_sent;
372         bclink_send_nack(n_ptr);
373 }
374
375 /**
376  * tipc_bclink_peek_nack - process a NACK msg meant for another node
377  *
378  * Only tipc_net_lock set.
379  */
380
381 static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
382 {
383         struct tipc_node *n_ptr = tipc_node_find(dest);
384         u32 my_after, my_to;
385
386         if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr)))
387                 return;
388         tipc_node_lock(n_ptr);
389         /*
390          * Modify gap to suppress unnecessary NACKs from this node
391          */
392         my_after = n_ptr->bclink.gap_after;
393         my_to = n_ptr->bclink.gap_to;
394
395         if (less_eq(gap_after, my_after)) {
396                 if (less(my_after, gap_to) && less(gap_to, my_to))
397                         n_ptr->bclink.gap_after = gap_to;
398                 else if (less_eq(my_to, gap_to))
399                         n_ptr->bclink.gap_to = n_ptr->bclink.gap_after;
400         } else if (less_eq(gap_after, my_to)) {
401                 if (less_eq(my_to, gap_to))
402                         n_ptr->bclink.gap_to = gap_after;
403         } else {
404                 /*
405                  * Expand gap if missing bufs not in deferred queue:
406                  */
407                 struct sk_buff *buf = n_ptr->bclink.deferred_head;
408                 u32 prev = n_ptr->bclink.gap_to;
409
410                 for (; buf; buf = buf->next) {
411                         u32 seqno = buf_seqno(buf);
412
413                         if (mod(seqno - prev) != 1) {
414                                 buf = NULL;
415                                 break;
416                         }
417                         if (seqno == gap_after)
418                                 break;
419                         prev = seqno;
420                 }
421                 if (buf == NULL)
422                         n_ptr->bclink.gap_to = gap_after;
423         }
424         /*
425          * Some nodes may send a complementary NACK now:
426          */
427         if (bclink_ack_allowed(sender_tag + 1)) {
428                 if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) {
429                         bclink_send_nack(n_ptr);
430                         bclink_set_gap(n_ptr);
431                 }
432         }
433         tipc_node_unlock(n_ptr);
434 }
435
436 /**
437  * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
438  */
439
440 int tipc_bclink_send_msg(struct sk_buff *buf)
441 {
442         int res;
443
444         spin_lock_bh(&bc_lock);
445
446         if (!bclink->bcast_nodes.count) {
447                 res = msg_data_sz(buf_msg(buf));
448                 buf_discard(buf);
449                 goto exit;
450         }
451
452         res = tipc_link_send_buf(bcl, buf);
453         if (likely(res >= 0)) {
454                 bclink_set_last_sent();
455                 bcl->stats.queue_sz_counts++;
456                 bcl->stats.accu_queue_sz += bcl->out_queue_size;
457         }
458 exit:
459         spin_unlock_bh(&bc_lock);
460         return res;
461 }
462
463 /**
464  * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
465  *
466  * tipc_net_lock is read_locked, no other locks set
467  */
468
469 void tipc_bclink_recv_pkt(struct sk_buff *buf)
470 {
471         struct tipc_msg *msg = buf_msg(buf);
472         struct tipc_node *node;
473         u32 next_in;
474         u32 seqno;
475         struct sk_buff *deferred;
476
477         /* Screen out unwanted broadcast messages */
478
479         if (msg_mc_netid(msg) != tipc_net_id)
480                 goto exit;
481
482         node = tipc_node_find(msg_prevnode(msg));
483         if (unlikely(!node))
484                 goto exit;
485
486         tipc_node_lock(node);
487         if (unlikely(!node->bclink.supported))
488                 goto unlock;
489
490         if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
491                 if (msg_type(msg) != STATE_MSG)
492                         goto unlock;
493                 if (msg_destnode(msg) == tipc_own_addr) {
494                         tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
495                         tipc_node_unlock(node);
496                         spin_lock_bh(&bc_lock);
497                         bcl->stats.recv_nacks++;
498                         bclink->retransmit_to = node;
499                         bclink_retransmit_pkt(msg_bcgap_after(msg),
500                                               msg_bcgap_to(msg));
501                         spin_unlock_bh(&bc_lock);
502                 } else {
503                         tipc_node_unlock(node);
504                         tipc_bclink_peek_nack(msg_destnode(msg),
505                                               msg_bcast_tag(msg),
506                                               msg_bcgap_after(msg),
507                                               msg_bcgap_to(msg));
508                 }
509                 goto exit;
510         }
511
512         /* Handle in-sequence broadcast message */
513
514 receive:
515         next_in = mod(node->bclink.last_in + 1);
516         seqno = msg_seqno(msg);
517
518         if (likely(seqno == next_in)) {
519                 bcl->stats.recv_info++;
520                 node->bclink.last_in++;
521                 bclink_set_gap(node);
522                 if (unlikely(bclink_ack_allowed(seqno))) {
523                         bclink_send_ack(node);
524                         bcl->stats.sent_acks++;
525                 }
526                 if (likely(msg_isdata(msg))) {
527                         tipc_node_unlock(node);
528                         if (likely(msg_mcast(msg)))
529                                 tipc_port_recv_mcast(buf, NULL);
530                         else
531                                 buf_discard(buf);
532                 } else if (msg_user(msg) == MSG_BUNDLER) {
533                         bcl->stats.recv_bundles++;
534                         bcl->stats.recv_bundled += msg_msgcnt(msg);
535                         tipc_node_unlock(node);
536                         tipc_link_recv_bundle(buf);
537                 } else if (msg_user(msg) == MSG_FRAGMENTER) {
538                         bcl->stats.recv_fragments++;
539                         if (tipc_link_recv_fragment(&node->bclink.defragm,
540                                                     &buf, &msg))
541                                 bcl->stats.recv_fragmented++;
542                         tipc_node_unlock(node);
543                         tipc_net_route_msg(buf);
544                 } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
545                         tipc_node_unlock(node);
546                         tipc_named_recv(buf);
547                 } else {
548                         tipc_node_unlock(node);
549                         buf_discard(buf);
550                 }
551                 buf = NULL;
552                 tipc_node_lock(node);
553                 deferred = node->bclink.deferred_head;
554                 if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
555                         buf = deferred;
556                         msg = buf_msg(buf);
557                         node->bclink.deferred_head = deferred->next;
558                         goto receive;
559                 }
560         } else if (less(next_in, seqno)) {
561                 u32 gap_after = node->bclink.gap_after;
562                 u32 gap_to = node->bclink.gap_to;
563
564                 if (tipc_link_defer_pkt(&node->bclink.deferred_head,
565                                         &node->bclink.deferred_tail,
566                                         buf)) {
567                         node->bclink.nack_sync++;
568                         bcl->stats.deferred_recv++;
569                         if (seqno == mod(gap_after + 1))
570                                 node->bclink.gap_after = seqno;
571                         else if (less(gap_after, seqno) && less(seqno, gap_to))
572                                 node->bclink.gap_to = seqno;
573                 }
574                 buf = NULL;
575                 if (bclink_ack_allowed(node->bclink.nack_sync)) {
576                         if (gap_to != gap_after)
577                                 bclink_send_nack(node);
578                         bclink_set_gap(node);
579                 }
580         } else {
581                 bcl->stats.duplicates++;
582         }
583 unlock:
584         tipc_node_unlock(node);
585 exit:
586         buf_discard(buf);
587 }
588
589 u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
590 {
591         return (n_ptr->bclink.supported &&
592                 (tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
593 }
594
595
596 /**
597  * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
598  *
599  * Send packet over as many bearers as necessary to reach all nodes
600  * that have joined the broadcast link.
601  *
602  * Returns 0 (packet sent successfully) under all circumstances,
603  * since the broadcast link's pseudo-bearer never blocks
604  */
605
606 static int tipc_bcbearer_send(struct sk_buff *buf,
607                               struct tipc_bearer *unused1,
608                               struct tipc_media_addr *unused2)
609 {
610         int bp_index;
611
612         /*
613          * Prepare broadcast link message for reliable transmission,
614          * if first time trying to send it;
615          * preparation is skipped for broadcast link protocol messages
616          * since they are sent in an unreliable manner and don't need it
617          */
618
619         if (likely(!msg_non_seq(buf_msg(buf)))) {
620                 struct tipc_msg *msg;
621
622                 bcbuf_set_acks(buf, bclink->bcast_nodes.count);
623                 msg = buf_msg(buf);
624                 msg_set_non_seq(msg, 1);
625                 msg_set_mc_netid(msg, tipc_net_id);
626                 bcl->stats.sent_info++;
627
628                 if (WARN_ON(!bclink->bcast_nodes.count)) {
629                         dump_stack();
630                         return 0;
631                 }
632         }
633
634         /* Send buffer over bearers until all targets reached */
635
636         bcbearer->remains = bclink->bcast_nodes;
637
638         for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
639                 struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
640                 struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
641
642                 if (!p)
643                         break;  /* no more bearers to try */
644
645                 tipc_nmap_diff(&bcbearer->remains, &p->nodes, &bcbearer->remains_new);
646                 if (bcbearer->remains_new.count == bcbearer->remains.count)
647                         continue;       /* bearer pair doesn't add anything */
648
649                 if (p->blocked ||
650                     p->media->send_msg(buf, p, &p->media->bcast_addr)) {
651                         /* unable to send on primary bearer */
652                         if (!s || s->blocked ||
653                             s->media->send_msg(buf, s,
654                                                &s->media->bcast_addr)) {
655                                 /* unable to send on either bearer */
656                                 continue;
657                         }
658                 }
659
660                 if (s) {
661                         bcbearer->bpairs[bp_index].primary = s;
662                         bcbearer->bpairs[bp_index].secondary = p;
663                 }
664
665                 if (bcbearer->remains_new.count == 0)
666                         break;  /* all targets reached */
667
668                 bcbearer->remains = bcbearer->remains_new;
669         }
670
671         return 0;
672 }
673
674 /**
675  * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
676  */
677
678 void tipc_bcbearer_sort(void)
679 {
680         struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
681         struct tipc_bcbearer_pair *bp_curr;
682         int b_index;
683         int pri;
684
685         spin_lock_bh(&bc_lock);
686
687         /* Group bearers by priority (can assume max of two per priority) */
688
689         memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
690
691         for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
692                 struct tipc_bearer *b = &tipc_bearers[b_index];
693
694                 if (!b->active || !b->nodes.count)
695                         continue;
696
697                 if (!bp_temp[b->priority].primary)
698                         bp_temp[b->priority].primary = b;
699                 else
700                         bp_temp[b->priority].secondary = b;
701         }
702
703         /* Create array of bearer pairs for broadcasting */
704
705         bp_curr = bcbearer->bpairs;
706         memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
707
708         for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
709
710                 if (!bp_temp[pri].primary)
711                         continue;
712
713                 bp_curr->primary = bp_temp[pri].primary;
714
715                 if (bp_temp[pri].secondary) {
716                         if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
717                                             &bp_temp[pri].secondary->nodes)) {
718                                 bp_curr->secondary = bp_temp[pri].secondary;
719                         } else {
720                                 bp_curr++;
721                                 bp_curr->primary = bp_temp[pri].secondary;
722                         }
723                 }
724
725                 bp_curr++;
726         }
727
728         spin_unlock_bh(&bc_lock);
729 }
730
731
732 int tipc_bclink_stats(char *buf, const u32 buf_size)
733 {
734         struct print_buf pb;
735
736         if (!bcl)
737                 return 0;
738
739         tipc_printbuf_init(&pb, buf, buf_size);
740
741         spin_lock_bh(&bc_lock);
742
743         tipc_printf(&pb, "Link <%s>\n"
744                          "  Window:%u packets\n",
745                     bcl->name, bcl->queue_limit[0]);
746         tipc_printf(&pb, "  RX packets:%u fragments:%u/%u bundles:%u/%u\n",
747                     bcl->stats.recv_info,
748                     bcl->stats.recv_fragments,
749                     bcl->stats.recv_fragmented,
750                     bcl->stats.recv_bundles,
751                     bcl->stats.recv_bundled);
752         tipc_printf(&pb, "  TX packets:%u fragments:%u/%u bundles:%u/%u\n",
753                     bcl->stats.sent_info,
754                     bcl->stats.sent_fragments,
755                     bcl->stats.sent_fragmented,
756                     bcl->stats.sent_bundles,
757                     bcl->stats.sent_bundled);
758         tipc_printf(&pb, "  RX naks:%u defs:%u dups:%u\n",
759                     bcl->stats.recv_nacks,
760                     bcl->stats.deferred_recv,
761                     bcl->stats.duplicates);
762         tipc_printf(&pb, "  TX naks:%u acks:%u dups:%u\n",
763                     bcl->stats.sent_nacks,
764                     bcl->stats.sent_acks,
765                     bcl->stats.retransmitted);
766         tipc_printf(&pb, "  Congestion bearer:%u link:%u  Send queue max:%u avg:%u\n",
767                     bcl->stats.bearer_congs,
768                     bcl->stats.link_congs,
769                     bcl->stats.max_queue_sz,
770                     bcl->stats.queue_sz_counts
771                     ? (bcl->stats.accu_queue_sz / bcl->stats.queue_sz_counts)
772                     : 0);
773
774         spin_unlock_bh(&bc_lock);
775         return tipc_printbuf_validate(&pb);
776 }
777
778 int tipc_bclink_reset_stats(void)
779 {
780         if (!bcl)
781                 return -ENOPROTOOPT;
782
783         spin_lock_bh(&bc_lock);
784         memset(&bcl->stats, 0, sizeof(bcl->stats));
785         spin_unlock_bh(&bc_lock);
786         return 0;
787 }
788
789 int tipc_bclink_set_queue_limits(u32 limit)
790 {
791         if (!bcl)
792                 return -ENOPROTOOPT;
793         if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
794                 return -EINVAL;
795
796         spin_lock_bh(&bc_lock);
797         tipc_link_set_queue_limits(bcl, limit);
798         spin_unlock_bh(&bc_lock);
799         return 0;
800 }
801
802 void tipc_bclink_init(void)
803 {
804         INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
805         bcbearer->bearer.media = &bcbearer->media;
806         bcbearer->media.send_msg = tipc_bcbearer_send;
807         sprintf(bcbearer->media.name, "tipc-broadcast");
808
809         INIT_LIST_HEAD(&bcl->waiting_ports);
810         bcl->next_out_no = 1;
811         spin_lock_init(&bclink->node.lock);
812         bcl->owner = &bclink->node;
813         bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
814         tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
815         bcl->b_ptr = &bcbearer->bearer;
816         bcl->state = WORKING_WORKING;
817         strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
818 }
819
820 void tipc_bclink_stop(void)
821 {
822         spin_lock_bh(&bc_lock);
823         tipc_link_stop(bcl);
824         spin_unlock_bh(&bc_lock);
825
826         memset(bclink, 0, sizeof(*bclink));
827         memset(bcbearer, 0, sizeof(*bcbearer));
828 }
829
830
831 /**
832  * tipc_nmap_add - add a node to a node map
833  */
834
835 void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
836 {
837         int n = tipc_node(node);
838         int w = n / WSIZE;
839         u32 mask = (1 << (n % WSIZE));
840
841         if ((nm_ptr->map[w] & mask) == 0) {
842                 nm_ptr->count++;
843                 nm_ptr->map[w] |= mask;
844         }
845 }
846
847 /**
848  * tipc_nmap_remove - remove a node from a node map
849  */
850
851 void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
852 {
853         int n = tipc_node(node);
854         int w = n / WSIZE;
855         u32 mask = (1 << (n % WSIZE));
856
857         if ((nm_ptr->map[w] & mask) != 0) {
858                 nm_ptr->map[w] &= ~mask;
859                 nm_ptr->count--;
860         }
861 }
862
863 /**
864  * tipc_nmap_diff - find differences between node maps
865  * @nm_a: input node map A
866  * @nm_b: input node map B
867  * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
868  */
869
870 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
871                            struct tipc_node_map *nm_b,
872                            struct tipc_node_map *nm_diff)
873 {
874         int stop = ARRAY_SIZE(nm_a->map);
875         int w;
876         int b;
877         u32 map;
878
879         memset(nm_diff, 0, sizeof(*nm_diff));
880         for (w = 0; w < stop; w++) {
881                 map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
882                 nm_diff->map[w] = map;
883                 if (map != 0) {
884                         for (b = 0 ; b < WSIZE; b++) {
885                                 if (map & (1 << b))
886                                         nm_diff->count++;
887                         }
888                 }
889         }
890 }
891
892 /**
893  * tipc_port_list_add - add a port to a port list, ensuring no duplicates
894  */
895
896 void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port)
897 {
898         struct tipc_port_list *item = pl_ptr;
899         int i;
900         int item_sz = PLSIZE;
901         int cnt = pl_ptr->count;
902
903         for (; ; cnt -= item_sz, item = item->next) {
904                 if (cnt < PLSIZE)
905                         item_sz = cnt;
906                 for (i = 0; i < item_sz; i++)
907                         if (item->ports[i] == port)
908                                 return;
909                 if (i < PLSIZE) {
910                         item->ports[i] = port;
911                         pl_ptr->count++;
912                         return;
913                 }
914                 if (!item->next) {
915                         item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
916                         if (!item->next) {
917                                 warn("Incomplete multicast delivery, no memory\n");
918                                 return;
919                         }
920                         item->next->next = NULL;
921                 }
922         }
923 }
924
925 /**
926  * tipc_port_list_free - free dynamically created entries in port_list chain
927  *
928  */
929
930 void tipc_port_list_free(struct tipc_port_list *pl_ptr)
931 {
932         struct tipc_port_list *item;
933         struct tipc_port_list *next;
934
935         for (item = pl_ptr->next; item; item = next) {
936                 next = item->next;
937                 kfree(item);
938         }
939 }
940