Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
[pandora-kernel.git] / net / tipc / bcast.c
1 /*
2  * net/tipc/bcast.c: TIPC broadcast code
3  *
4  * Copyright (c) 2004-2006, Ericsson AB
5  * Copyright (c) 2004, Intel Corporation.
6  * Copyright (c) 2005, 2010-2011, Wind River Systems
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the names of the copyright holders nor the names of its
18  *    contributors may be used to endorse or promote products derived from
19  *    this software without specific prior written permission.
20  *
21  * Alternatively, this software may be distributed under the terms of the
22  * GNU General Public License ("GPL") version 2 as published by the Free
23  * Software Foundation.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37
38 #include "core.h"
39 #include "link.h"
40 #include "port.h"
41 #include "bcast.h"
42 #include "name_distr.h"
43
44 #define MAX_PKT_DEFAULT_MCAST 1500      /* bcast link max packet size (fixed) */
45
46 #define BCLINK_WIN_DEFAULT 20           /* bcast link window size (default) */
47
48 /**
49  * struct bcbearer_pair - a pair of bearers used by broadcast link
50  * @primary: pointer to primary bearer
51  * @secondary: pointer to secondary bearer
52  *
53  * Bearers must have same priority and same set of reachable destinations
54  * to be paired.
55  */
56
57 struct bcbearer_pair {
58         struct tipc_bearer *primary;
59         struct tipc_bearer *secondary;
60 };
61
62 /**
63  * struct bcbearer - bearer used by broadcast link
64  * @bearer: (non-standard) broadcast bearer structure
65  * @media: (non-standard) broadcast media structure
66  * @bpairs: array of bearer pairs
67  * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
68  * @remains: temporary node map used by tipc_bcbearer_send()
69  * @remains_new: temporary node map used tipc_bcbearer_send()
70  *
71  * Note: The fields labelled "temporary" are incorporated into the bearer
72  * to avoid consuming potentially limited stack space through the use of
73  * large local variables within multicast routines.  Concurrent access is
74  * prevented through use of the spinlock "bc_lock".
75  */
76
77 struct bcbearer {
78         struct tipc_bearer bearer;
79         struct media media;
80         struct bcbearer_pair bpairs[MAX_BEARERS];
81         struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
82         struct tipc_node_map remains;
83         struct tipc_node_map remains_new;
84 };
85
86 /**
87  * struct bclink - link used for broadcast messages
88  * @link: (non-standard) broadcast link structure
89  * @node: (non-standard) node structure representing b'cast link's peer node
90  * @retransmit_to: node that most recently requested a retransmit
91  *
92  * Handles sequence numbering, fragmentation, bundling, etc.
93  */
94
95 struct bclink {
96         struct link link;
97         struct tipc_node node;
98         struct tipc_node *retransmit_to;
99 };
100
101
102 static struct bcbearer *bcbearer;
103 static struct bclink *bclink;
104 static struct link *bcl;
105 static DEFINE_SPINLOCK(bc_lock);
106
107 /* broadcast-capable node map */
108 struct tipc_node_map tipc_bcast_nmap;
109
110 const char tipc_bclink_name[] = "broadcast-link";
111
112 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
113                            struct tipc_node_map *nm_b,
114                            struct tipc_node_map *nm_diff);
115
116 static u32 buf_seqno(struct sk_buff *buf)
117 {
118         return msg_seqno(buf_msg(buf));
119 }
120
121 static u32 bcbuf_acks(struct sk_buff *buf)
122 {
123         return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
124 }
125
126 static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
127 {
128         TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
129 }
130
131 static void bcbuf_decr_acks(struct sk_buff *buf)
132 {
133         bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
134 }
135
136
137 static void bclink_set_last_sent(void)
138 {
139         if (bcl->next_out)
140                 bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1);
141         else
142                 bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
143 }
144
145 u32 tipc_bclink_get_last_sent(void)
146 {
147         return bcl->fsm_msg_cnt;
148 }
149
150 /**
151  * bclink_set_gap - set gap according to contents of current deferred pkt queue
152  *
153  * Called with 'node' locked, bc_lock unlocked
154  */
155
156 static void bclink_set_gap(struct tipc_node *n_ptr)
157 {
158         struct sk_buff *buf = n_ptr->bclink.deferred_head;
159
160         n_ptr->bclink.gap_after = n_ptr->bclink.gap_to =
161                 mod(n_ptr->bclink.last_in);
162         if (unlikely(buf != NULL))
163                 n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1);
164 }
165
166 /**
167  * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
168  *
169  * This mechanism endeavours to prevent all nodes in network from trying
170  * to ACK or NACK at the same time.
171  *
172  * Note: TIPC uses a different trigger to distribute ACKs than it does to
173  *       distribute NACKs, but tries to use the same spacing (divide by 16).
174  */
175
176 static int bclink_ack_allowed(u32 n)
177 {
178         return (n % TIPC_MIN_LINK_WIN) == tipc_own_tag;
179 }
180
181
182 /**
183  * tipc_bclink_retransmit_to - get most recent node to request retransmission
184  *
185  * Called with bc_lock locked
186  */
187
188 struct tipc_node *tipc_bclink_retransmit_to(void)
189 {
190         return bclink->retransmit_to;
191 }
192
193 /**
194  * bclink_retransmit_pkt - retransmit broadcast packets
195  * @after: sequence number of last packet to *not* retransmit
196  * @to: sequence number of last packet to retransmit
197  *
198  * Called with bc_lock locked
199  */
200
201 static void bclink_retransmit_pkt(u32 after, u32 to)
202 {
203         struct sk_buff *buf;
204
205         buf = bcl->first_out;
206         while (buf && less_eq(buf_seqno(buf), after))
207                 buf = buf->next;
208         tipc_link_retransmit(bcl, buf, mod(to - after));
209 }
210
211 /**
212  * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
213  * @n_ptr: node that sent acknowledgement info
214  * @acked: broadcast sequence # that has been acknowledged
215  *
216  * Node is locked, bc_lock unlocked.
217  */
218
219 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
220 {
221         struct sk_buff *crs;
222         struct sk_buff *next;
223         unsigned int released = 0;
224
225         if (less_eq(acked, n_ptr->bclink.acked))
226                 return;
227
228         spin_lock_bh(&bc_lock);
229
230         /* Skip over packets that node has previously acknowledged */
231
232         crs = bcl->first_out;
233         while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked))
234                 crs = crs->next;
235
236         /* Update packets that node is now acknowledging */
237
238         while (crs && less_eq(buf_seqno(crs), acked)) {
239                 next = crs->next;
240                 bcbuf_decr_acks(crs);
241                 if (bcbuf_acks(crs) == 0) {
242                         bcl->first_out = next;
243                         bcl->out_queue_size--;
244                         buf_discard(crs);
245                         released = 1;
246                 }
247                 crs = next;
248         }
249         n_ptr->bclink.acked = acked;
250
251         /* Try resolving broadcast link congestion, if necessary */
252
253         if (unlikely(bcl->next_out)) {
254                 tipc_link_push_queue(bcl);
255                 bclink_set_last_sent();
256         }
257         if (unlikely(released && !list_empty(&bcl->waiting_ports)))
258                 tipc_link_wakeup_ports(bcl, 0);
259         spin_unlock_bh(&bc_lock);
260 }
261
262 /**
263  * bclink_send_ack - unicast an ACK msg
264  *
265  * tipc_net_lock and node lock set
266  */
267
268 static void bclink_send_ack(struct tipc_node *n_ptr)
269 {
270         struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
271
272         if (l_ptr != NULL)
273                 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
274 }
275
276 /**
277  * bclink_send_nack- broadcast a NACK msg
278  *
279  * tipc_net_lock and node lock set
280  */
281
282 static void bclink_send_nack(struct tipc_node *n_ptr)
283 {
284         struct sk_buff *buf;
285         struct tipc_msg *msg;
286
287         if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to))
288                 return;
289
290         buf = tipc_buf_acquire(INT_H_SIZE);
291         if (buf) {
292                 msg = buf_msg(buf);
293                 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
294                          INT_H_SIZE, n_ptr->addr);
295                 msg_set_non_seq(msg, 1);
296                 msg_set_mc_netid(msg, tipc_net_id);
297                 msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
298                 msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
299                 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
300                 msg_set_bcast_tag(msg, tipc_own_tag);
301
302                 tipc_bearer_send(&bcbearer->bearer, buf, NULL);
303                 bcl->stats.sent_nacks++;
304                 buf_discard(buf);
305
306                 /*
307                  * Ensure we doesn't send another NACK msg to the node
308                  * until 16 more deferred messages arrive from it
309                  * (i.e. helps prevent all nodes from NACK'ing at same time)
310                  */
311
312                 n_ptr->bclink.nack_sync = tipc_own_tag;
313         }
314 }
315
316 /**
317  * tipc_bclink_check_gap - send a NACK if a sequence gap exists
318  *
319  * tipc_net_lock and node lock set
320  */
321
322 void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 last_sent)
323 {
324         if (!n_ptr->bclink.supported ||
325             less_eq(last_sent, mod(n_ptr->bclink.last_in)))
326                 return;
327
328         bclink_set_gap(n_ptr);
329         if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to)
330                 n_ptr->bclink.gap_to = last_sent;
331         bclink_send_nack(n_ptr);
332 }
333
334 /**
335  * tipc_bclink_peek_nack - process a NACK msg meant for another node
336  *
337  * Only tipc_net_lock set.
338  */
339
340 static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
341 {
342         struct tipc_node *n_ptr = tipc_node_find(dest);
343         u32 my_after, my_to;
344
345         if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr)))
346                 return;
347         tipc_node_lock(n_ptr);
348         /*
349          * Modify gap to suppress unnecessary NACKs from this node
350          */
351         my_after = n_ptr->bclink.gap_after;
352         my_to = n_ptr->bclink.gap_to;
353
354         if (less_eq(gap_after, my_after)) {
355                 if (less(my_after, gap_to) && less(gap_to, my_to))
356                         n_ptr->bclink.gap_after = gap_to;
357                 else if (less_eq(my_to, gap_to))
358                         n_ptr->bclink.gap_to = n_ptr->bclink.gap_after;
359         } else if (less_eq(gap_after, my_to)) {
360                 if (less_eq(my_to, gap_to))
361                         n_ptr->bclink.gap_to = gap_after;
362         } else {
363                 /*
364                  * Expand gap if missing bufs not in deferred queue:
365                  */
366                 struct sk_buff *buf = n_ptr->bclink.deferred_head;
367                 u32 prev = n_ptr->bclink.gap_to;
368
369                 for (; buf; buf = buf->next) {
370                         u32 seqno = buf_seqno(buf);
371
372                         if (mod(seqno - prev) != 1) {
373                                 buf = NULL;
374                                 break;
375                         }
376                         if (seqno == gap_after)
377                                 break;
378                         prev = seqno;
379                 }
380                 if (buf == NULL)
381                         n_ptr->bclink.gap_to = gap_after;
382         }
383         /*
384          * Some nodes may send a complementary NACK now:
385          */
386         if (bclink_ack_allowed(sender_tag + 1)) {
387                 if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) {
388                         bclink_send_nack(n_ptr);
389                         bclink_set_gap(n_ptr);
390                 }
391         }
392         tipc_node_unlock(n_ptr);
393 }
394
395 /**
396  * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
397  */
398
399 int tipc_bclink_send_msg(struct sk_buff *buf)
400 {
401         int res;
402
403         spin_lock_bh(&bc_lock);
404
405         res = tipc_link_send_buf(bcl, buf);
406         if (likely(res > 0))
407                 bclink_set_last_sent();
408
409         bcl->stats.queue_sz_counts++;
410         bcl->stats.accu_queue_sz += bcl->out_queue_size;
411
412         spin_unlock_bh(&bc_lock);
413         return res;
414 }
415
416 /**
417  * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
418  *
419  * tipc_net_lock is read_locked, no other locks set
420  */
421
422 void tipc_bclink_recv_pkt(struct sk_buff *buf)
423 {
424         struct tipc_msg *msg = buf_msg(buf);
425         struct tipc_node *node;
426         u32 next_in;
427         u32 seqno;
428         struct sk_buff *deferred;
429
430         /* Screen out unwanted broadcast messages */
431
432         if (msg_mc_netid(msg) != tipc_net_id)
433                 goto exit;
434
435         node = tipc_node_find(msg_prevnode(msg));
436         if (unlikely(!node))
437                 goto exit;
438
439         tipc_node_lock(node);
440         if (unlikely(!node->bclink.supported))
441                 goto unlock;
442
443         if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
444                 if (msg_type(msg) != STATE_MSG)
445                         goto unlock;
446                 if (msg_destnode(msg) == tipc_own_addr) {
447                         tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
448                         tipc_node_unlock(node);
449                         spin_lock_bh(&bc_lock);
450                         bcl->stats.recv_nacks++;
451                         bclink->retransmit_to = node;
452                         bclink_retransmit_pkt(msg_bcgap_after(msg),
453                                               msg_bcgap_to(msg));
454                         spin_unlock_bh(&bc_lock);
455                 } else {
456                         tipc_node_unlock(node);
457                         tipc_bclink_peek_nack(msg_destnode(msg),
458                                               msg_bcast_tag(msg),
459                                               msg_bcgap_after(msg),
460                                               msg_bcgap_to(msg));
461                 }
462                 goto exit;
463         }
464
465         /* Handle in-sequence broadcast message */
466
467 receive:
468         next_in = mod(node->bclink.last_in + 1);
469         seqno = msg_seqno(msg);
470
471         if (likely(seqno == next_in)) {
472                 bcl->stats.recv_info++;
473                 node->bclink.last_in++;
474                 bclink_set_gap(node);
475                 if (unlikely(bclink_ack_allowed(seqno))) {
476                         bclink_send_ack(node);
477                         bcl->stats.sent_acks++;
478                 }
479                 if (likely(msg_isdata(msg))) {
480                         tipc_node_unlock(node);
481                         if (likely(msg_mcast(msg)))
482                                 tipc_port_recv_mcast(buf, NULL);
483                         else
484                                 buf_discard(buf);
485                 } else if (msg_user(msg) == MSG_BUNDLER) {
486                         bcl->stats.recv_bundles++;
487                         bcl->stats.recv_bundled += msg_msgcnt(msg);
488                         tipc_node_unlock(node);
489                         tipc_link_recv_bundle(buf);
490                 } else if (msg_user(msg) == MSG_FRAGMENTER) {
491                         bcl->stats.recv_fragments++;
492                         if (tipc_link_recv_fragment(&node->bclink.defragm,
493                                                     &buf, &msg))
494                                 bcl->stats.recv_fragmented++;
495                         tipc_node_unlock(node);
496                         tipc_net_route_msg(buf);
497                 } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
498                         tipc_node_unlock(node);
499                         tipc_named_recv(buf);
500                 } else {
501                         tipc_node_unlock(node);
502                         buf_discard(buf);
503                 }
504                 buf = NULL;
505                 tipc_node_lock(node);
506                 deferred = node->bclink.deferred_head;
507                 if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
508                         buf = deferred;
509                         msg = buf_msg(buf);
510                         node->bclink.deferred_head = deferred->next;
511                         goto receive;
512                 }
513         } else if (less(next_in, seqno)) {
514                 u32 gap_after = node->bclink.gap_after;
515                 u32 gap_to = node->bclink.gap_to;
516
517                 if (tipc_link_defer_pkt(&node->bclink.deferred_head,
518                                         &node->bclink.deferred_tail,
519                                         buf)) {
520                         node->bclink.nack_sync++;
521                         bcl->stats.deferred_recv++;
522                         if (seqno == mod(gap_after + 1))
523                                 node->bclink.gap_after = seqno;
524                         else if (less(gap_after, seqno) && less(seqno, gap_to))
525                                 node->bclink.gap_to = seqno;
526                 }
527                 buf = NULL;
528                 if (bclink_ack_allowed(node->bclink.nack_sync)) {
529                         if (gap_to != gap_after)
530                                 bclink_send_nack(node);
531                         bclink_set_gap(node);
532                 }
533         } else {
534                 bcl->stats.duplicates++;
535         }
536 unlock:
537         tipc_node_unlock(node);
538 exit:
539         buf_discard(buf);
540 }
541
542 u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
543 {
544         return (n_ptr->bclink.supported &&
545                 (tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
546 }
547
548
549 /**
550  * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
551  *
552  * Send packet over as many bearers as necessary to reach all nodes
553  * that have joined the broadcast link.
554  *
555  * Returns 0 (packet sent successfully) under all circumstances,
556  * since the broadcast link's pseudo-bearer never blocks
557  */
558
559 static int tipc_bcbearer_send(struct sk_buff *buf,
560                               struct tipc_bearer *unused1,
561                               struct tipc_media_addr *unused2)
562 {
563         int bp_index;
564
565         /*
566          * Prepare broadcast link message for reliable transmission,
567          * if first time trying to send it;
568          * preparation is skipped for broadcast link protocol messages
569          * since they are sent in an unreliable manner and don't need it
570          */
571
572         if (likely(!msg_non_seq(buf_msg(buf)))) {
573                 struct tipc_msg *msg;
574
575                 bcbuf_set_acks(buf, tipc_bcast_nmap.count);
576                 msg = buf_msg(buf);
577                 msg_set_non_seq(msg, 1);
578                 msg_set_mc_netid(msg, tipc_net_id);
579                 bcl->stats.sent_info++;
580
581                 if (WARN_ON(!tipc_bcast_nmap.count)) {
582                         dump_stack();
583                         return 0;
584                 }
585         }
586
587         /* Send buffer over bearers until all targets reached */
588
589         bcbearer->remains = tipc_bcast_nmap;
590
591         for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
592                 struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
593                 struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
594
595                 if (!p)
596                         break;  /* no more bearers to try */
597
598                 tipc_nmap_diff(&bcbearer->remains, &p->nodes, &bcbearer->remains_new);
599                 if (bcbearer->remains_new.count == bcbearer->remains.count)
600                         continue;       /* bearer pair doesn't add anything */
601
602                 if (p->blocked ||
603                     p->media->send_msg(buf, p, &p->media->bcast_addr)) {
604                         /* unable to send on primary bearer */
605                         if (!s || s->blocked ||
606                             s->media->send_msg(buf, s,
607                                                &s->media->bcast_addr)) {
608                                 /* unable to send on either bearer */
609                                 continue;
610                         }
611                 }
612
613                 if (s) {
614                         bcbearer->bpairs[bp_index].primary = s;
615                         bcbearer->bpairs[bp_index].secondary = p;
616                 }
617
618                 if (bcbearer->remains_new.count == 0)
619                         break;  /* all targets reached */
620
621                 bcbearer->remains = bcbearer->remains_new;
622         }
623
624         return 0;
625 }
626
627 /**
628  * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
629  */
630
631 void tipc_bcbearer_sort(void)
632 {
633         struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
634         struct bcbearer_pair *bp_curr;
635         int b_index;
636         int pri;
637
638         spin_lock_bh(&bc_lock);
639
640         /* Group bearers by priority (can assume max of two per priority) */
641
642         memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
643
644         for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
645                 struct tipc_bearer *b = &tipc_bearers[b_index];
646
647                 if (!b->active || !b->nodes.count)
648                         continue;
649
650                 if (!bp_temp[b->priority].primary)
651                         bp_temp[b->priority].primary = b;
652                 else
653                         bp_temp[b->priority].secondary = b;
654         }
655
656         /* Create array of bearer pairs for broadcasting */
657
658         bp_curr = bcbearer->bpairs;
659         memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
660
661         for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
662
663                 if (!bp_temp[pri].primary)
664                         continue;
665
666                 bp_curr->primary = bp_temp[pri].primary;
667
668                 if (bp_temp[pri].secondary) {
669                         if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
670                                             &bp_temp[pri].secondary->nodes)) {
671                                 bp_curr->secondary = bp_temp[pri].secondary;
672                         } else {
673                                 bp_curr++;
674                                 bp_curr->primary = bp_temp[pri].secondary;
675                         }
676                 }
677
678                 bp_curr++;
679         }
680
681         spin_unlock_bh(&bc_lock);
682 }
683
684
685 int tipc_bclink_stats(char *buf, const u32 buf_size)
686 {
687         struct print_buf pb;
688
689         if (!bcl)
690                 return 0;
691
692         tipc_printbuf_init(&pb, buf, buf_size);
693
694         spin_lock_bh(&bc_lock);
695
696         tipc_printf(&pb, "Link <%s>\n"
697                          "  Window:%u packets\n",
698                     bcl->name, bcl->queue_limit[0]);
699         tipc_printf(&pb, "  RX packets:%u fragments:%u/%u bundles:%u/%u\n",
700                     bcl->stats.recv_info,
701                     bcl->stats.recv_fragments,
702                     bcl->stats.recv_fragmented,
703                     bcl->stats.recv_bundles,
704                     bcl->stats.recv_bundled);
705         tipc_printf(&pb, "  TX packets:%u fragments:%u/%u bundles:%u/%u\n",
706                     bcl->stats.sent_info,
707                     bcl->stats.sent_fragments,
708                     bcl->stats.sent_fragmented,
709                     bcl->stats.sent_bundles,
710                     bcl->stats.sent_bundled);
711         tipc_printf(&pb, "  RX naks:%u defs:%u dups:%u\n",
712                     bcl->stats.recv_nacks,
713                     bcl->stats.deferred_recv,
714                     bcl->stats.duplicates);
715         tipc_printf(&pb, "  TX naks:%u acks:%u dups:%u\n",
716                     bcl->stats.sent_nacks,
717                     bcl->stats.sent_acks,
718                     bcl->stats.retransmitted);
719         tipc_printf(&pb, "  Congestion bearer:%u link:%u  Send queue max:%u avg:%u\n",
720                     bcl->stats.bearer_congs,
721                     bcl->stats.link_congs,
722                     bcl->stats.max_queue_sz,
723                     bcl->stats.queue_sz_counts
724                     ? (bcl->stats.accu_queue_sz / bcl->stats.queue_sz_counts)
725                     : 0);
726
727         spin_unlock_bh(&bc_lock);
728         return tipc_printbuf_validate(&pb);
729 }
730
731 int tipc_bclink_reset_stats(void)
732 {
733         if (!bcl)
734                 return -ENOPROTOOPT;
735
736         spin_lock_bh(&bc_lock);
737         memset(&bcl->stats, 0, sizeof(bcl->stats));
738         spin_unlock_bh(&bc_lock);
739         return 0;
740 }
741
742 int tipc_bclink_set_queue_limits(u32 limit)
743 {
744         if (!bcl)
745                 return -ENOPROTOOPT;
746         if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
747                 return -EINVAL;
748
749         spin_lock_bh(&bc_lock);
750         tipc_link_set_queue_limits(bcl, limit);
751         spin_unlock_bh(&bc_lock);
752         return 0;
753 }
754
755 int tipc_bclink_init(void)
756 {
757         bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
758         bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
759         if (!bcbearer || !bclink) {
760                 warn("Broadcast link creation failed, no memory\n");
761                 kfree(bcbearer);
762                 bcbearer = NULL;
763                 kfree(bclink);
764                 bclink = NULL;
765                 return -ENOMEM;
766         }
767
768         INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
769         bcbearer->bearer.media = &bcbearer->media;
770         bcbearer->media.send_msg = tipc_bcbearer_send;
771         sprintf(bcbearer->media.name, "tipc-broadcast");
772
773         bcl = &bclink->link;
774         INIT_LIST_HEAD(&bcl->waiting_ports);
775         bcl->next_out_no = 1;
776         spin_lock_init(&bclink->node.lock);
777         bcl->owner = &bclink->node;
778         bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
779         tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
780         bcl->b_ptr = &bcbearer->bearer;
781         bcl->state = WORKING_WORKING;
782         strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
783
784         return 0;
785 }
786
787 void tipc_bclink_stop(void)
788 {
789         spin_lock_bh(&bc_lock);
790         if (bcbearer) {
791                 tipc_link_stop(bcl);
792                 bcl = NULL;
793                 kfree(bclink);
794                 bclink = NULL;
795                 kfree(bcbearer);
796                 bcbearer = NULL;
797         }
798         spin_unlock_bh(&bc_lock);
799 }
800
801
802 /**
803  * tipc_nmap_add - add a node to a node map
804  */
805
806 void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
807 {
808         int n = tipc_node(node);
809         int w = n / WSIZE;
810         u32 mask = (1 << (n % WSIZE));
811
812         if ((nm_ptr->map[w] & mask) == 0) {
813                 nm_ptr->count++;
814                 nm_ptr->map[w] |= mask;
815         }
816 }
817
818 /**
819  * tipc_nmap_remove - remove a node from a node map
820  */
821
822 void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
823 {
824         int n = tipc_node(node);
825         int w = n / WSIZE;
826         u32 mask = (1 << (n % WSIZE));
827
828         if ((nm_ptr->map[w] & mask) != 0) {
829                 nm_ptr->map[w] &= ~mask;
830                 nm_ptr->count--;
831         }
832 }
833
834 /**
835  * tipc_nmap_diff - find differences between node maps
836  * @nm_a: input node map A
837  * @nm_b: input node map B
838  * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
839  */
840
841 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
842                            struct tipc_node_map *nm_b,
843                            struct tipc_node_map *nm_diff)
844 {
845         int stop = ARRAY_SIZE(nm_a->map);
846         int w;
847         int b;
848         u32 map;
849
850         memset(nm_diff, 0, sizeof(*nm_diff));
851         for (w = 0; w < stop; w++) {
852                 map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
853                 nm_diff->map[w] = map;
854                 if (map != 0) {
855                         for (b = 0 ; b < WSIZE; b++) {
856                                 if (map & (1 << b))
857                                         nm_diff->count++;
858                         }
859                 }
860         }
861 }
862
863 /**
864  * tipc_port_list_add - add a port to a port list, ensuring no duplicates
865  */
866
867 void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
868 {
869         struct port_list *item = pl_ptr;
870         int i;
871         int item_sz = PLSIZE;
872         int cnt = pl_ptr->count;
873
874         for (; ; cnt -= item_sz, item = item->next) {
875                 if (cnt < PLSIZE)
876                         item_sz = cnt;
877                 for (i = 0; i < item_sz; i++)
878                         if (item->ports[i] == port)
879                                 return;
880                 if (i < PLSIZE) {
881                         item->ports[i] = port;
882                         pl_ptr->count++;
883                         return;
884                 }
885                 if (!item->next) {
886                         item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
887                         if (!item->next) {
888                                 warn("Incomplete multicast delivery, no memory\n");
889                                 return;
890                         }
891                         item->next->next = NULL;
892                 }
893         }
894 }
895
896 /**
897  * tipc_port_list_free - free dynamically created entries in port_list chain
898  *
899  */
900
901 void tipc_port_list_free(struct port_list *pl_ptr)
902 {
903         struct port_list *item;
904         struct port_list *next;
905
906         for (item = pl_ptr->next; item; item = next) {
907                 next = item->next;
908                 kfree(item);
909         }
910 }
911