Merge branch 'batman-adv/maint' of git://git.open-mesh.org/linux-merge
[pandora-kernel.git] / net / tipc / link.c
1 /*
2  * net/tipc/link.c: TIPC link code
3  *
4  * Copyright (c) 1996-2007, Ericsson AB
5  * Copyright (c) 2004-2007, 2010-2011, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include "core.h"
38 #include "link.h"
39 #include "port.h"
40 #include "name_distr.h"
41 #include "discover.h"
42 #include "config.h"
43
44
45 /*
46  * Out-of-range value for link session numbers
47  */
48
49 #define INVALID_SESSION 0x10000
50
51 /*
52  * Link state events:
53  */
54
55 #define  STARTING_EVT    856384768      /* link processing trigger */
56 #define  TRAFFIC_MSG_EVT 560815u        /* rx'd ??? */
57 #define  TIMEOUT_EVT     560817u        /* link timer expired */
58
59 /*
60  * The following two 'message types' is really just implementation
61  * data conveniently stored in the message header.
62  * They must not be considered part of the protocol
63  */
64 #define OPEN_MSG   0
65 #define CLOSED_MSG 1
66
67 /*
68  * State value stored in 'exp_msg_count'
69  */
70
71 #define START_CHANGEOVER 100000u
72
73 /**
74  * struct link_name - deconstructed link name
75  * @addr_local: network address of node at this end
76  * @if_local: name of interface at this end
77  * @addr_peer: network address of node at far end
78  * @if_peer: name of interface at far end
79  */
80
81 struct link_name {
82         u32 addr_local;
83         char if_local[TIPC_MAX_IF_NAME];
84         u32 addr_peer;
85         char if_peer[TIPC_MAX_IF_NAME];
86 };
87
88 static void link_handle_out_of_seq_msg(struct link *l_ptr,
89                                        struct sk_buff *buf);
90 static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf);
91 static int  link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
92 static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
93 static int  link_send_sections_long(struct tipc_port *sender,
94                                     struct iovec const *msg_sect,
95                                     u32 num_sect, unsigned int total_len,
96                                     u32 destnode);
97 static void link_check_defragm_bufs(struct link *l_ptr);
98 static void link_state_event(struct link *l_ptr, u32 event);
99 static void link_reset_statistics(struct link *l_ptr);
100 static void link_print(struct link *l_ptr, const char *str);
101 static void link_start(struct link *l_ptr);
102 static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf);
103
104 /*
105  *  Simple link routines
106  */
107
108 static unsigned int align(unsigned int i)
109 {
110         return (i + 3) & ~3u;
111 }
112
113 static void link_init_max_pkt(struct link *l_ptr)
114 {
115         u32 max_pkt;
116
117         max_pkt = (l_ptr->b_ptr->mtu & ~3);
118         if (max_pkt > MAX_MSG_SIZE)
119                 max_pkt = MAX_MSG_SIZE;
120
121         l_ptr->max_pkt_target = max_pkt;
122         if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
123                 l_ptr->max_pkt = l_ptr->max_pkt_target;
124         else
125                 l_ptr->max_pkt = MAX_PKT_DEFAULT;
126
127         l_ptr->max_pkt_probes = 0;
128 }
129
130 static u32 link_next_sent(struct link *l_ptr)
131 {
132         if (l_ptr->next_out)
133                 return msg_seqno(buf_msg(l_ptr->next_out));
134         return mod(l_ptr->next_out_no);
135 }
136
137 static u32 link_last_sent(struct link *l_ptr)
138 {
139         return mod(link_next_sent(l_ptr) - 1);
140 }
141
142 /*
143  *  Simple non-static link routines (i.e. referenced outside this file)
144  */
145
146 int tipc_link_is_up(struct link *l_ptr)
147 {
148         if (!l_ptr)
149                 return 0;
150         return link_working_working(l_ptr) || link_working_unknown(l_ptr);
151 }
152
153 int tipc_link_is_active(struct link *l_ptr)
154 {
155         return  (l_ptr->owner->active_links[0] == l_ptr) ||
156                 (l_ptr->owner->active_links[1] == l_ptr);
157 }
158
159 /**
160  * link_name_validate - validate & (optionally) deconstruct link name
161  * @name - ptr to link name string
162  * @name_parts - ptr to area for link name components (or NULL if not needed)
163  *
164  * Returns 1 if link name is valid, otherwise 0.
165  */
166
167 static int link_name_validate(const char *name, struct link_name *name_parts)
168 {
169         char name_copy[TIPC_MAX_LINK_NAME];
170         char *addr_local;
171         char *if_local;
172         char *addr_peer;
173         char *if_peer;
174         char dummy;
175         u32 z_local, c_local, n_local;
176         u32 z_peer, c_peer, n_peer;
177         u32 if_local_len;
178         u32 if_peer_len;
179
180         /* copy link name & ensure length is OK */
181
182         name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
183         /* need above in case non-Posix strncpy() doesn't pad with nulls */
184         strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
185         if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0)
186                 return 0;
187
188         /* ensure all component parts of link name are present */
189
190         addr_local = name_copy;
191         if_local = strchr(addr_local, ':');
192         if (if_local == NULL)
193                 return 0;
194         *(if_local++) = 0;
195         addr_peer = strchr(if_local, '-');
196         if (addr_peer == NULL)
197                 return 0;
198         *(addr_peer++) = 0;
199         if_local_len = addr_peer - if_local;
200         if_peer = strchr(addr_peer, ':');
201         if (if_peer == NULL)
202                 return 0;
203         *(if_peer++) = 0;
204         if_peer_len = strlen(if_peer) + 1;
205
206         /* validate component parts of link name */
207
208         if ((sscanf(addr_local, "%u.%u.%u%c",
209                     &z_local, &c_local, &n_local, &dummy) != 3) ||
210             (sscanf(addr_peer, "%u.%u.%u%c",
211                     &z_peer, &c_peer, &n_peer, &dummy) != 3) ||
212             (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
213             (z_peer  > 255) || (c_peer  > 4095) || (n_peer  > 4095) ||
214             (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) ||
215             (if_peer_len  <= 1) || (if_peer_len  > TIPC_MAX_IF_NAME) ||
216             (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) ||
217             (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1)))
218                 return 0;
219
220         /* return link name components, if necessary */
221
222         if (name_parts) {
223                 name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
224                 strcpy(name_parts->if_local, if_local);
225                 name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer);
226                 strcpy(name_parts->if_peer, if_peer);
227         }
228         return 1;
229 }
230
231 /**
232  * link_timeout - handle expiration of link timer
233  * @l_ptr: pointer to link
234  *
235  * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict
236  * with tipc_link_delete().  (There is no risk that the node will be deleted by
237  * another thread because tipc_link_delete() always cancels the link timer before
238  * tipc_node_delete() is called.)
239  */
240
241 static void link_timeout(struct link *l_ptr)
242 {
243         tipc_node_lock(l_ptr->owner);
244
245         /* update counters used in statistical profiling of send traffic */
246
247         l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
248         l_ptr->stats.queue_sz_counts++;
249
250         if (l_ptr->first_out) {
251                 struct tipc_msg *msg = buf_msg(l_ptr->first_out);
252                 u32 length = msg_size(msg);
253
254                 if ((msg_user(msg) == MSG_FRAGMENTER) &&
255                     (msg_type(msg) == FIRST_FRAGMENT)) {
256                         length = msg_size(msg_get_wrapped(msg));
257                 }
258                 if (length) {
259                         l_ptr->stats.msg_lengths_total += length;
260                         l_ptr->stats.msg_length_counts++;
261                         if (length <= 64)
262                                 l_ptr->stats.msg_length_profile[0]++;
263                         else if (length <= 256)
264                                 l_ptr->stats.msg_length_profile[1]++;
265                         else if (length <= 1024)
266                                 l_ptr->stats.msg_length_profile[2]++;
267                         else if (length <= 4096)
268                                 l_ptr->stats.msg_length_profile[3]++;
269                         else if (length <= 16384)
270                                 l_ptr->stats.msg_length_profile[4]++;
271                         else if (length <= 32768)
272                                 l_ptr->stats.msg_length_profile[5]++;
273                         else
274                                 l_ptr->stats.msg_length_profile[6]++;
275                 }
276         }
277
278         /* do all other link processing performed on a periodic basis */
279
280         link_check_defragm_bufs(l_ptr);
281
282         link_state_event(l_ptr, TIMEOUT_EVT);
283
284         if (l_ptr->next_out)
285                 tipc_link_push_queue(l_ptr);
286
287         tipc_node_unlock(l_ptr->owner);
288 }
289
290 static void link_set_timer(struct link *l_ptr, u32 time)
291 {
292         k_start_timer(&l_ptr->timer, time);
293 }
294
295 /**
296  * tipc_link_create - create a new link
297  * @n_ptr: pointer to associated node
298  * @b_ptr: pointer to associated bearer
299  * @media_addr: media address to use when sending messages over link
300  *
301  * Returns pointer to link.
302  */
303
304 struct link *tipc_link_create(struct tipc_node *n_ptr,
305                               struct tipc_bearer *b_ptr,
306                               const struct tipc_media_addr *media_addr)
307 {
308         struct link *l_ptr;
309         struct tipc_msg *msg;
310         char *if_name;
311         char addr_string[16];
312         u32 peer = n_ptr->addr;
313
314         if (n_ptr->link_cnt >= 2) {
315                 tipc_addr_string_fill(addr_string, n_ptr->addr);
316                 err("Attempt to establish third link to %s\n", addr_string);
317                 return NULL;
318         }
319
320         if (n_ptr->links[b_ptr->identity]) {
321                 tipc_addr_string_fill(addr_string, n_ptr->addr);
322                 err("Attempt to establish second link on <%s> to %s\n",
323                     b_ptr->name, addr_string);
324                 return NULL;
325         }
326
327         l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
328         if (!l_ptr) {
329                 warn("Link creation failed, no memory\n");
330                 return NULL;
331         }
332
333         l_ptr->addr = peer;
334         if_name = strchr(b_ptr->name, ':') + 1;
335         sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
336                 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
337                 tipc_node(tipc_own_addr),
338                 if_name,
339                 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
340                 /* note: peer i/f name is updated by reset/activate message */
341         memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
342         l_ptr->owner = n_ptr;
343         l_ptr->checkpoint = 1;
344         l_ptr->peer_session = INVALID_SESSION;
345         l_ptr->b_ptr = b_ptr;
346         link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
347         l_ptr->state = RESET_UNKNOWN;
348
349         l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
350         msg = l_ptr->pmsg;
351         tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
352         msg_set_size(msg, sizeof(l_ptr->proto_msg));
353         msg_set_session(msg, (tipc_random & 0xffff));
354         msg_set_bearer_id(msg, b_ptr->identity);
355         strcpy((char *)msg_data(msg), if_name);
356
357         l_ptr->priority = b_ptr->priority;
358         tipc_link_set_queue_limits(l_ptr, b_ptr->media->window);
359
360         link_init_max_pkt(l_ptr);
361
362         l_ptr->next_out_no = 1;
363         INIT_LIST_HEAD(&l_ptr->waiting_ports);
364
365         link_reset_statistics(l_ptr);
366
367         tipc_node_attach_link(n_ptr, l_ptr);
368
369         k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
370         list_add_tail(&l_ptr->link_list, &b_ptr->links);
371         tipc_k_signal((Handler)link_start, (unsigned long)l_ptr);
372
373         return l_ptr;
374 }
375
376 /**
377  * tipc_link_delete - delete a link
378  * @l_ptr: pointer to link
379  *
380  * Note: 'tipc_net_lock' is write_locked, bearer is locked.
381  * This routine must not grab the node lock until after link timer cancellation
382  * to avoid a potential deadlock situation.
383  */
384
385 void tipc_link_delete(struct link *l_ptr)
386 {
387         if (!l_ptr) {
388                 err("Attempt to delete non-existent link\n");
389                 return;
390         }
391
392         k_cancel_timer(&l_ptr->timer);
393
394         tipc_node_lock(l_ptr->owner);
395         tipc_link_reset(l_ptr);
396         tipc_node_detach_link(l_ptr->owner, l_ptr);
397         tipc_link_stop(l_ptr);
398         list_del_init(&l_ptr->link_list);
399         tipc_node_unlock(l_ptr->owner);
400         k_term_timer(&l_ptr->timer);
401         kfree(l_ptr);
402 }
403
404 static void link_start(struct link *l_ptr)
405 {
406         tipc_node_lock(l_ptr->owner);
407         link_state_event(l_ptr, STARTING_EVT);
408         tipc_node_unlock(l_ptr->owner);
409 }
410
411 /**
412  * link_schedule_port - schedule port for deferred sending
413  * @l_ptr: pointer to link
414  * @origport: reference to sending port
415  * @sz: amount of data to be sent
416  *
417  * Schedules port for renewed sending of messages after link congestion
418  * has abated.
419  */
420
421 static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
422 {
423         struct tipc_port *p_ptr;
424
425         spin_lock_bh(&tipc_port_list_lock);
426         p_ptr = tipc_port_lock(origport);
427         if (p_ptr) {
428                 if (!p_ptr->wakeup)
429                         goto exit;
430                 if (!list_empty(&p_ptr->wait_list))
431                         goto exit;
432                 p_ptr->congested = 1;
433                 p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
434                 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
435                 l_ptr->stats.link_congs++;
436 exit:
437                 tipc_port_unlock(p_ptr);
438         }
439         spin_unlock_bh(&tipc_port_list_lock);
440         return -ELINKCONG;
441 }
442
443 void tipc_link_wakeup_ports(struct link *l_ptr, int all)
444 {
445         struct tipc_port *p_ptr;
446         struct tipc_port *temp_p_ptr;
447         int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
448
449         if (all)
450                 win = 100000;
451         if (win <= 0)
452                 return;
453         if (!spin_trylock_bh(&tipc_port_list_lock))
454                 return;
455         if (link_congested(l_ptr))
456                 goto exit;
457         list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports,
458                                  wait_list) {
459                 if (win <= 0)
460                         break;
461                 list_del_init(&p_ptr->wait_list);
462                 spin_lock_bh(p_ptr->lock);
463                 p_ptr->congested = 0;
464                 p_ptr->wakeup(p_ptr);
465                 win -= p_ptr->waiting_pkts;
466                 spin_unlock_bh(p_ptr->lock);
467         }
468
469 exit:
470         spin_unlock_bh(&tipc_port_list_lock);
471 }
472
473 /**
474  * link_release_outqueue - purge link's outbound message queue
475  * @l_ptr: pointer to link
476  */
477
478 static void link_release_outqueue(struct link *l_ptr)
479 {
480         struct sk_buff *buf = l_ptr->first_out;
481         struct sk_buff *next;
482
483         while (buf) {
484                 next = buf->next;
485                 buf_discard(buf);
486                 buf = next;
487         }
488         l_ptr->first_out = NULL;
489         l_ptr->out_queue_size = 0;
490 }
491
492 /**
493  * tipc_link_reset_fragments - purge link's inbound message fragments queue
494  * @l_ptr: pointer to link
495  */
496
497 void tipc_link_reset_fragments(struct link *l_ptr)
498 {
499         struct sk_buff *buf = l_ptr->defragm_buf;
500         struct sk_buff *next;
501
502         while (buf) {
503                 next = buf->next;
504                 buf_discard(buf);
505                 buf = next;
506         }
507         l_ptr->defragm_buf = NULL;
508 }
509
510 /**
511  * tipc_link_stop - purge all inbound and outbound messages associated with link
512  * @l_ptr: pointer to link
513  */
514
515 void tipc_link_stop(struct link *l_ptr)
516 {
517         struct sk_buff *buf;
518         struct sk_buff *next;
519
520         buf = l_ptr->oldest_deferred_in;
521         while (buf) {
522                 next = buf->next;
523                 buf_discard(buf);
524                 buf = next;
525         }
526
527         buf = l_ptr->first_out;
528         while (buf) {
529                 next = buf->next;
530                 buf_discard(buf);
531                 buf = next;
532         }
533
534         tipc_link_reset_fragments(l_ptr);
535
536         buf_discard(l_ptr->proto_msg_queue);
537         l_ptr->proto_msg_queue = NULL;
538 }
539
540 void tipc_link_reset(struct link *l_ptr)
541 {
542         struct sk_buff *buf;
543         u32 prev_state = l_ptr->state;
544         u32 checkpoint = l_ptr->next_in_no;
545         int was_active_link = tipc_link_is_active(l_ptr);
546
547         msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
548
549         /* Link is down, accept any session */
550         l_ptr->peer_session = INVALID_SESSION;
551
552         /* Prepare for max packet size negotiation */
553         link_init_max_pkt(l_ptr);
554
555         l_ptr->state = RESET_UNKNOWN;
556
557         if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
558                 return;
559
560         tipc_node_link_down(l_ptr->owner, l_ptr);
561         tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
562
563         if (was_active_link && tipc_node_active_links(l_ptr->owner) &&
564             l_ptr->owner->permit_changeover) {
565                 l_ptr->reset_checkpoint = checkpoint;
566                 l_ptr->exp_msg_count = START_CHANGEOVER;
567         }
568
569         /* Clean up all queues: */
570
571         link_release_outqueue(l_ptr);
572         buf_discard(l_ptr->proto_msg_queue);
573         l_ptr->proto_msg_queue = NULL;
574         buf = l_ptr->oldest_deferred_in;
575         while (buf) {
576                 struct sk_buff *next = buf->next;
577                 buf_discard(buf);
578                 buf = next;
579         }
580         if (!list_empty(&l_ptr->waiting_ports))
581                 tipc_link_wakeup_ports(l_ptr, 1);
582
583         l_ptr->retransm_queue_head = 0;
584         l_ptr->retransm_queue_size = 0;
585         l_ptr->last_out = NULL;
586         l_ptr->first_out = NULL;
587         l_ptr->next_out = NULL;
588         l_ptr->unacked_window = 0;
589         l_ptr->checkpoint = 1;
590         l_ptr->next_out_no = 1;
591         l_ptr->deferred_inqueue_sz = 0;
592         l_ptr->oldest_deferred_in = NULL;
593         l_ptr->newest_deferred_in = NULL;
594         l_ptr->fsm_msg_cnt = 0;
595         l_ptr->stale_count = 0;
596         link_reset_statistics(l_ptr);
597 }
598
599
600 static void link_activate(struct link *l_ptr)
601 {
602         l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
603         tipc_node_link_up(l_ptr->owner, l_ptr);
604         tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
605 }
606
607 /**
608  * link_state_event - link finite state machine
609  * @l_ptr: pointer to link
610  * @event: state machine event to process
611  */
612
613 static void link_state_event(struct link *l_ptr, unsigned event)
614 {
615         struct link *other;
616         u32 cont_intv = l_ptr->continuity_interval;
617
618         if (!l_ptr->started && (event != STARTING_EVT))
619                 return;         /* Not yet. */
620
621         if (link_blocked(l_ptr)) {
622                 if (event == TIMEOUT_EVT)
623                         link_set_timer(l_ptr, cont_intv);
624                 return;   /* Changeover going on */
625         }
626
627         switch (l_ptr->state) {
628         case WORKING_WORKING:
629                 switch (event) {
630                 case TRAFFIC_MSG_EVT:
631                 case ACTIVATE_MSG:
632                         break;
633                 case TIMEOUT_EVT:
634                         if (l_ptr->next_in_no != l_ptr->checkpoint) {
635                                 l_ptr->checkpoint = l_ptr->next_in_no;
636                                 if (tipc_bclink_acks_missing(l_ptr->owner)) {
637                                         tipc_link_send_proto_msg(l_ptr, STATE_MSG,
638                                                                  0, 0, 0, 0, 0);
639                                         l_ptr->fsm_msg_cnt++;
640                                 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
641                                         tipc_link_send_proto_msg(l_ptr, STATE_MSG,
642                                                                  1, 0, 0, 0, 0);
643                                         l_ptr->fsm_msg_cnt++;
644                                 }
645                                 link_set_timer(l_ptr, cont_intv);
646                                 break;
647                         }
648                         l_ptr->state = WORKING_UNKNOWN;
649                         l_ptr->fsm_msg_cnt = 0;
650                         tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
651                         l_ptr->fsm_msg_cnt++;
652                         link_set_timer(l_ptr, cont_intv / 4);
653                         break;
654                 case RESET_MSG:
655                         info("Resetting link <%s>, requested by peer\n",
656                              l_ptr->name);
657                         tipc_link_reset(l_ptr);
658                         l_ptr->state = RESET_RESET;
659                         l_ptr->fsm_msg_cnt = 0;
660                         tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
661                         l_ptr->fsm_msg_cnt++;
662                         link_set_timer(l_ptr, cont_intv);
663                         break;
664                 default:
665                         err("Unknown link event %u in WW state\n", event);
666                 }
667                 break;
668         case WORKING_UNKNOWN:
669                 switch (event) {
670                 case TRAFFIC_MSG_EVT:
671                 case ACTIVATE_MSG:
672                         l_ptr->state = WORKING_WORKING;
673                         l_ptr->fsm_msg_cnt = 0;
674                         link_set_timer(l_ptr, cont_intv);
675                         break;
676                 case RESET_MSG:
677                         info("Resetting link <%s>, requested by peer "
678                              "while probing\n", l_ptr->name);
679                         tipc_link_reset(l_ptr);
680                         l_ptr->state = RESET_RESET;
681                         l_ptr->fsm_msg_cnt = 0;
682                         tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
683                         l_ptr->fsm_msg_cnt++;
684                         link_set_timer(l_ptr, cont_intv);
685                         break;
686                 case TIMEOUT_EVT:
687                         if (l_ptr->next_in_no != l_ptr->checkpoint) {
688                                 l_ptr->state = WORKING_WORKING;
689                                 l_ptr->fsm_msg_cnt = 0;
690                                 l_ptr->checkpoint = l_ptr->next_in_no;
691                                 if (tipc_bclink_acks_missing(l_ptr->owner)) {
692                                         tipc_link_send_proto_msg(l_ptr, STATE_MSG,
693                                                                  0, 0, 0, 0, 0);
694                                         l_ptr->fsm_msg_cnt++;
695                                 }
696                                 link_set_timer(l_ptr, cont_intv);
697                         } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
698                                 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
699                                                          1, 0, 0, 0, 0);
700                                 l_ptr->fsm_msg_cnt++;
701                                 link_set_timer(l_ptr, cont_intv / 4);
702                         } else {        /* Link has failed */
703                                 warn("Resetting link <%s>, peer not responding\n",
704                                      l_ptr->name);
705                                 tipc_link_reset(l_ptr);
706                                 l_ptr->state = RESET_UNKNOWN;
707                                 l_ptr->fsm_msg_cnt = 0;
708                                 tipc_link_send_proto_msg(l_ptr, RESET_MSG,
709                                                          0, 0, 0, 0, 0);
710                                 l_ptr->fsm_msg_cnt++;
711                                 link_set_timer(l_ptr, cont_intv);
712                         }
713                         break;
714                 default:
715                         err("Unknown link event %u in WU state\n", event);
716                 }
717                 break;
718         case RESET_UNKNOWN:
719                 switch (event) {
720                 case TRAFFIC_MSG_EVT:
721                         break;
722                 case ACTIVATE_MSG:
723                         other = l_ptr->owner->active_links[0];
724                         if (other && link_working_unknown(other))
725                                 break;
726                         l_ptr->state = WORKING_WORKING;
727                         l_ptr->fsm_msg_cnt = 0;
728                         link_activate(l_ptr);
729                         tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
730                         l_ptr->fsm_msg_cnt++;
731                         link_set_timer(l_ptr, cont_intv);
732                         break;
733                 case RESET_MSG:
734                         l_ptr->state = RESET_RESET;
735                         l_ptr->fsm_msg_cnt = 0;
736                         tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
737                         l_ptr->fsm_msg_cnt++;
738                         link_set_timer(l_ptr, cont_intv);
739                         break;
740                 case STARTING_EVT:
741                         l_ptr->started = 1;
742                         /* fall through */
743                 case TIMEOUT_EVT:
744                         tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
745                         l_ptr->fsm_msg_cnt++;
746                         link_set_timer(l_ptr, cont_intv);
747                         break;
748                 default:
749                         err("Unknown link event %u in RU state\n", event);
750                 }
751                 break;
752         case RESET_RESET:
753                 switch (event) {
754                 case TRAFFIC_MSG_EVT:
755                 case ACTIVATE_MSG:
756                         other = l_ptr->owner->active_links[0];
757                         if (other && link_working_unknown(other))
758                                 break;
759                         l_ptr->state = WORKING_WORKING;
760                         l_ptr->fsm_msg_cnt = 0;
761                         link_activate(l_ptr);
762                         tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
763                         l_ptr->fsm_msg_cnt++;
764                         link_set_timer(l_ptr, cont_intv);
765                         break;
766                 case RESET_MSG:
767                         break;
768                 case TIMEOUT_EVT:
769                         tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
770                         l_ptr->fsm_msg_cnt++;
771                         link_set_timer(l_ptr, cont_intv);
772                         break;
773                 default:
774                         err("Unknown link event %u in RR state\n", event);
775                 }
776                 break;
777         default:
778                 err("Unknown link state %u/%u\n", l_ptr->state, event);
779         }
780 }
781
782 /*
783  * link_bundle_buf(): Append contents of a buffer to
784  * the tail of an existing one.
785  */
786
787 static int link_bundle_buf(struct link *l_ptr,
788                            struct sk_buff *bundler,
789                            struct sk_buff *buf)
790 {
791         struct tipc_msg *bundler_msg = buf_msg(bundler);
792         struct tipc_msg *msg = buf_msg(buf);
793         u32 size = msg_size(msg);
794         u32 bundle_size = msg_size(bundler_msg);
795         u32 to_pos = align(bundle_size);
796         u32 pad = to_pos - bundle_size;
797
798         if (msg_user(bundler_msg) != MSG_BUNDLER)
799                 return 0;
800         if (msg_type(bundler_msg) != OPEN_MSG)
801                 return 0;
802         if (skb_tailroom(bundler) < (pad + size))
803                 return 0;
804         if (l_ptr->max_pkt < (to_pos + size))
805                 return 0;
806
807         skb_put(bundler, pad + size);
808         skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size);
809         msg_set_size(bundler_msg, to_pos + size);
810         msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
811         buf_discard(buf);
812         l_ptr->stats.sent_bundled++;
813         return 1;
814 }
815
816 static void link_add_to_outqueue(struct link *l_ptr,
817                                  struct sk_buff *buf,
818                                  struct tipc_msg *msg)
819 {
820         u32 ack = mod(l_ptr->next_in_no - 1);
821         u32 seqno = mod(l_ptr->next_out_no++);
822
823         msg_set_word(msg, 2, ((ack << 16) | seqno));
824         msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
825         buf->next = NULL;
826         if (l_ptr->first_out) {
827                 l_ptr->last_out->next = buf;
828                 l_ptr->last_out = buf;
829         } else
830                 l_ptr->first_out = l_ptr->last_out = buf;
831
832         l_ptr->out_queue_size++;
833         if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
834                 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
835 }
836
837 static void link_add_chain_to_outqueue(struct link *l_ptr,
838                                        struct sk_buff *buf_chain,
839                                        u32 long_msgno)
840 {
841         struct sk_buff *buf;
842         struct tipc_msg *msg;
843
844         if (!l_ptr->next_out)
845                 l_ptr->next_out = buf_chain;
846         while (buf_chain) {
847                 buf = buf_chain;
848                 buf_chain = buf_chain->next;
849
850                 msg = buf_msg(buf);
851                 msg_set_long_msgno(msg, long_msgno);
852                 link_add_to_outqueue(l_ptr, buf, msg);
853         }
854 }
855
856 /*
857  * tipc_link_send_buf() is the 'full path' for messages, called from
858  * inside TIPC when the 'fast path' in tipc_send_buf
859  * has failed, and from link_send()
860  */
861
862 int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
863 {
864         struct tipc_msg *msg = buf_msg(buf);
865         u32 size = msg_size(msg);
866         u32 dsz = msg_data_sz(msg);
867         u32 queue_size = l_ptr->out_queue_size;
868         u32 imp = tipc_msg_tot_importance(msg);
869         u32 queue_limit = l_ptr->queue_limit[imp];
870         u32 max_packet = l_ptr->max_pkt;
871
872         msg_set_prevnode(msg, tipc_own_addr);   /* If routed message */
873
874         /* Match msg importance against queue limits: */
875
876         if (unlikely(queue_size >= queue_limit)) {
877                 if (imp <= TIPC_CRITICAL_IMPORTANCE) {
878                         link_schedule_port(l_ptr, msg_origport(msg), size);
879                         buf_discard(buf);
880                         return -ELINKCONG;
881                 }
882                 buf_discard(buf);
883                 if (imp > CONN_MANAGER) {
884                         warn("Resetting link <%s>, send queue full", l_ptr->name);
885                         tipc_link_reset(l_ptr);
886                 }
887                 return dsz;
888         }
889
890         /* Fragmentation needed ? */
891
892         if (size > max_packet)
893                 return link_send_long_buf(l_ptr, buf);
894
895         /* Packet can be queued or sent: */
896
897         if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) &&
898                    !link_congested(l_ptr))) {
899                 link_add_to_outqueue(l_ptr, buf, msg);
900
901                 if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
902                         l_ptr->unacked_window = 0;
903                 } else {
904                         tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
905                         l_ptr->stats.bearer_congs++;
906                         l_ptr->next_out = buf;
907                 }
908                 return dsz;
909         }
910         /* Congestion: can message be bundled ?: */
911
912         if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
913             (msg_user(msg) != MSG_FRAGMENTER)) {
914
915                 /* Try adding message to an existing bundle */
916
917                 if (l_ptr->next_out &&
918                     link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
919                         tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
920                         return dsz;
921                 }
922
923                 /* Try creating a new bundle */
924
925                 if (size <= max_packet * 2 / 3) {
926                         struct sk_buff *bundler = tipc_buf_acquire(max_packet);
927                         struct tipc_msg bundler_hdr;
928
929                         if (bundler) {
930                                 tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
931                                          INT_H_SIZE, l_ptr->addr);
932                                 skb_copy_to_linear_data(bundler, &bundler_hdr,
933                                                         INT_H_SIZE);
934                                 skb_trim(bundler, INT_H_SIZE);
935                                 link_bundle_buf(l_ptr, bundler, buf);
936                                 buf = bundler;
937                                 msg = buf_msg(buf);
938                                 l_ptr->stats.sent_bundles++;
939                         }
940                 }
941         }
942         if (!l_ptr->next_out)
943                 l_ptr->next_out = buf;
944         link_add_to_outqueue(l_ptr, buf, msg);
945         tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
946         return dsz;
947 }
948
949 /*
950  * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has
951  * not been selected yet, and the the owner node is not locked
952  * Called by TIPC internal users, e.g. the name distributor
953  */
954
955 int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
956 {
957         struct link *l_ptr;
958         struct tipc_node *n_ptr;
959         int res = -ELINKCONG;
960
961         read_lock_bh(&tipc_net_lock);
962         n_ptr = tipc_node_find(dest);
963         if (n_ptr) {
964                 tipc_node_lock(n_ptr);
965                 l_ptr = n_ptr->active_links[selector & 1];
966                 if (l_ptr)
967                         res = tipc_link_send_buf(l_ptr, buf);
968                 else
969                         buf_discard(buf);
970                 tipc_node_unlock(n_ptr);
971         } else {
972                 buf_discard(buf);
973         }
974         read_unlock_bh(&tipc_net_lock);
975         return res;
976 }
977
978 /*
979  * tipc_link_send_names - send name table entries to new neighbor
980  *
981  * Send routine for bulk delivery of name table messages when contact
982  * with a new neighbor occurs. No link congestion checking is performed
983  * because name table messages *must* be delivered. The messages must be
984  * small enough not to require fragmentation.
985  * Called without any locks held.
986  */
987
988 void tipc_link_send_names(struct list_head *message_list, u32 dest)
989 {
990         struct tipc_node *n_ptr;
991         struct link *l_ptr;
992         struct sk_buff *buf;
993         struct sk_buff *temp_buf;
994
995         if (list_empty(message_list))
996                 return;
997
998         read_lock_bh(&tipc_net_lock);
999         n_ptr = tipc_node_find(dest);
1000         if (n_ptr) {
1001                 tipc_node_lock(n_ptr);
1002                 l_ptr = n_ptr->active_links[0];
1003                 if (l_ptr) {
1004                         /* convert circular list to linear list */
1005                         ((struct sk_buff *)message_list->prev)->next = NULL;
1006                         link_add_chain_to_outqueue(l_ptr,
1007                                 (struct sk_buff *)message_list->next, 0);
1008                         tipc_link_push_queue(l_ptr);
1009                         INIT_LIST_HEAD(message_list);
1010                 }
1011                 tipc_node_unlock(n_ptr);
1012         }
1013         read_unlock_bh(&tipc_net_lock);
1014
1015         /* discard the messages if they couldn't be sent */
1016
1017         list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
1018                 list_del((struct list_head *)buf);
1019                 buf_discard(buf);
1020         }
1021 }
1022
1023 /*
1024  * link_send_buf_fast: Entry for data messages where the
1025  * destination link is known and the header is complete,
1026  * inclusive total message length. Very time critical.
1027  * Link is locked. Returns user data length.
1028  */
1029
1030 static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
1031                               u32 *used_max_pkt)
1032 {
1033         struct tipc_msg *msg = buf_msg(buf);
1034         int res = msg_data_sz(msg);
1035
1036         if (likely(!link_congested(l_ptr))) {
1037                 if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
1038                         if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
1039                                 link_add_to_outqueue(l_ptr, buf, msg);
1040                                 if (likely(tipc_bearer_send(l_ptr->b_ptr, buf,
1041                                                             &l_ptr->media_addr))) {
1042                                         l_ptr->unacked_window = 0;
1043                                         return res;
1044                                 }
1045                                 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1046                                 l_ptr->stats.bearer_congs++;
1047                                 l_ptr->next_out = buf;
1048                                 return res;
1049                         }
1050                 } else
1051                         *used_max_pkt = l_ptr->max_pkt;
1052         }
1053         return tipc_link_send_buf(l_ptr, buf);  /* All other cases */
1054 }
1055
1056 /*
1057  * tipc_send_buf_fast: Entry for data messages where the
1058  * destination node is known and the header is complete,
1059  * inclusive total message length.
1060  * Returns user data length.
1061  */
1062 int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
1063 {
1064         struct link *l_ptr;
1065         struct tipc_node *n_ptr;
1066         int res;
1067         u32 selector = msg_origport(buf_msg(buf)) & 1;
1068         u32 dummy;
1069
1070         read_lock_bh(&tipc_net_lock);
1071         n_ptr = tipc_node_find(destnode);
1072         if (likely(n_ptr)) {
1073                 tipc_node_lock(n_ptr);
1074                 l_ptr = n_ptr->active_links[selector];
1075                 if (likely(l_ptr)) {
1076                         res = link_send_buf_fast(l_ptr, buf, &dummy);
1077                         tipc_node_unlock(n_ptr);
1078                         read_unlock_bh(&tipc_net_lock);
1079                         return res;
1080                 }
1081                 tipc_node_unlock(n_ptr);
1082         }
1083         read_unlock_bh(&tipc_net_lock);
1084         res = msg_data_sz(buf_msg(buf));
1085         tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1086         return res;
1087 }
1088
1089
1090 /*
1091  * tipc_link_send_sections_fast: Entry for messages where the
1092  * destination processor is known and the header is complete,
1093  * except for total message length.
1094  * Returns user data length or errno.
1095  */
1096 int tipc_link_send_sections_fast(struct tipc_port *sender,
1097                                  struct iovec const *msg_sect,
1098                                  const u32 num_sect,
1099                                  unsigned int total_len,
1100                                  u32 destaddr)
1101 {
1102         struct tipc_msg *hdr = &sender->phdr;
1103         struct link *l_ptr;
1104         struct sk_buff *buf;
1105         struct tipc_node *node;
1106         int res;
1107         u32 selector = msg_origport(hdr) & 1;
1108
1109 again:
1110         /*
1111          * Try building message using port's max_pkt hint.
1112          * (Must not hold any locks while building message.)
1113          */
1114
1115         res = tipc_msg_build(hdr, msg_sect, num_sect, total_len,
1116                              sender->max_pkt, !sender->user_port, &buf);
1117
1118         read_lock_bh(&tipc_net_lock);
1119         node = tipc_node_find(destaddr);
1120         if (likely(node)) {
1121                 tipc_node_lock(node);
1122                 l_ptr = node->active_links[selector];
1123                 if (likely(l_ptr)) {
1124                         if (likely(buf)) {
1125                                 res = link_send_buf_fast(l_ptr, buf,
1126                                                          &sender->max_pkt);
1127 exit:
1128                                 tipc_node_unlock(node);
1129                                 read_unlock_bh(&tipc_net_lock);
1130                                 return res;
1131                         }
1132
1133                         /* Exit if build request was invalid */
1134
1135                         if (unlikely(res < 0))
1136                                 goto exit;
1137
1138                         /* Exit if link (or bearer) is congested */
1139
1140                         if (link_congested(l_ptr) ||
1141                             !list_empty(&l_ptr->b_ptr->cong_links)) {
1142                                 res = link_schedule_port(l_ptr,
1143                                                          sender->ref, res);
1144                                 goto exit;
1145                         }
1146
1147                         /*
1148                          * Message size exceeds max_pkt hint; update hint,
1149                          * then re-try fast path or fragment the message
1150                          */
1151
1152                         sender->max_pkt = l_ptr->max_pkt;
1153                         tipc_node_unlock(node);
1154                         read_unlock_bh(&tipc_net_lock);
1155
1156
1157                         if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
1158                                 goto again;
1159
1160                         return link_send_sections_long(sender, msg_sect,
1161                                                        num_sect, total_len,
1162                                                        destaddr);
1163                 }
1164                 tipc_node_unlock(node);
1165         }
1166         read_unlock_bh(&tipc_net_lock);
1167
1168         /* Couldn't find a link to the destination node */
1169
1170         if (buf)
1171                 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1172         if (res >= 0)
1173                 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1174                                                  total_len, TIPC_ERR_NO_NODE);
1175         return res;
1176 }
1177
1178 /*
1179  * link_send_sections_long(): Entry for long messages where the
1180  * destination node is known and the header is complete,
1181  * inclusive total message length.
1182  * Link and bearer congestion status have been checked to be ok,
1183  * and are ignored if they change.
1184  *
1185  * Note that fragments do not use the full link MTU so that they won't have
1186  * to undergo refragmentation if link changeover causes them to be sent
1187  * over another link with an additional tunnel header added as prefix.
1188  * (Refragmentation will still occur if the other link has a smaller MTU.)
1189  *
1190  * Returns user data length or errno.
1191  */
1192 static int link_send_sections_long(struct tipc_port *sender,
1193                                    struct iovec const *msg_sect,
1194                                    u32 num_sect,
1195                                    unsigned int total_len,
1196                                    u32 destaddr)
1197 {
1198         struct link *l_ptr;
1199         struct tipc_node *node;
1200         struct tipc_msg *hdr = &sender->phdr;
1201         u32 dsz = total_len;
1202         u32 max_pkt, fragm_sz, rest;
1203         struct tipc_msg fragm_hdr;
1204         struct sk_buff *buf, *buf_chain, *prev;
1205         u32 fragm_crs, fragm_rest, hsz, sect_rest;
1206         const unchar *sect_crs;
1207         int curr_sect;
1208         u32 fragm_no;
1209
1210 again:
1211         fragm_no = 1;
1212         max_pkt = sender->max_pkt - INT_H_SIZE;
1213                 /* leave room for tunnel header in case of link changeover */
1214         fragm_sz = max_pkt - INT_H_SIZE;
1215                 /* leave room for fragmentation header in each fragment */
1216         rest = dsz;
1217         fragm_crs = 0;
1218         fragm_rest = 0;
1219         sect_rest = 0;
1220         sect_crs = NULL;
1221         curr_sect = -1;
1222
1223         /* Prepare reusable fragment header: */
1224
1225         tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
1226                  INT_H_SIZE, msg_destnode(hdr));
1227         msg_set_size(&fragm_hdr, max_pkt);
1228         msg_set_fragm_no(&fragm_hdr, 1);
1229
1230         /* Prepare header of first fragment: */
1231
1232         buf_chain = buf = tipc_buf_acquire(max_pkt);
1233         if (!buf)
1234                 return -ENOMEM;
1235         buf->next = NULL;
1236         skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
1237         hsz = msg_hdr_sz(hdr);
1238         skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz);
1239
1240         /* Chop up message: */
1241
1242         fragm_crs = INT_H_SIZE + hsz;
1243         fragm_rest = fragm_sz - hsz;
1244
1245         do {            /* For all sections */
1246                 u32 sz;
1247
1248                 if (!sect_rest) {
1249                         sect_rest = msg_sect[++curr_sect].iov_len;
1250                         sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
1251                 }
1252
1253                 if (sect_rest < fragm_rest)
1254                         sz = sect_rest;
1255                 else
1256                         sz = fragm_rest;
1257
1258                 if (likely(!sender->user_port)) {
1259                         if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
1260 error:
1261                                 for (; buf_chain; buf_chain = buf) {
1262                                         buf = buf_chain->next;
1263                                         buf_discard(buf_chain);
1264                                 }
1265                                 return -EFAULT;
1266                         }
1267                 } else
1268                         skb_copy_to_linear_data_offset(buf, fragm_crs,
1269                                                        sect_crs, sz);
1270                 sect_crs += sz;
1271                 sect_rest -= sz;
1272                 fragm_crs += sz;
1273                 fragm_rest -= sz;
1274                 rest -= sz;
1275
1276                 if (!fragm_rest && rest) {
1277
1278                         /* Initiate new fragment: */
1279                         if (rest <= fragm_sz) {
1280                                 fragm_sz = rest;
1281                                 msg_set_type(&fragm_hdr, LAST_FRAGMENT);
1282                         } else {
1283                                 msg_set_type(&fragm_hdr, FRAGMENT);
1284                         }
1285                         msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
1286                         msg_set_fragm_no(&fragm_hdr, ++fragm_no);
1287                         prev = buf;
1288                         buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
1289                         if (!buf)
1290                                 goto error;
1291
1292                         buf->next = NULL;
1293                         prev->next = buf;
1294                         skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
1295                         fragm_crs = INT_H_SIZE;
1296                         fragm_rest = fragm_sz;
1297                 }
1298         } while (rest > 0);
1299
1300         /*
1301          * Now we have a buffer chain. Select a link and check
1302          * that packet size is still OK
1303          */
1304         node = tipc_node_find(destaddr);
1305         if (likely(node)) {
1306                 tipc_node_lock(node);
1307                 l_ptr = node->active_links[sender->ref & 1];
1308                 if (!l_ptr) {
1309                         tipc_node_unlock(node);
1310                         goto reject;
1311                 }
1312                 if (l_ptr->max_pkt < max_pkt) {
1313                         sender->max_pkt = l_ptr->max_pkt;
1314                         tipc_node_unlock(node);
1315                         for (; buf_chain; buf_chain = buf) {
1316                                 buf = buf_chain->next;
1317                                 buf_discard(buf_chain);
1318                         }
1319                         goto again;
1320                 }
1321         } else {
1322 reject:
1323                 for (; buf_chain; buf_chain = buf) {
1324                         buf = buf_chain->next;
1325                         buf_discard(buf_chain);
1326                 }
1327                 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1328                                                  total_len, TIPC_ERR_NO_NODE);
1329         }
1330
1331         /* Append chain of fragments to send queue & send them */
1332
1333         l_ptr->long_msg_seq_no++;
1334         link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
1335         l_ptr->stats.sent_fragments += fragm_no;
1336         l_ptr->stats.sent_fragmented++;
1337         tipc_link_push_queue(l_ptr);
1338         tipc_node_unlock(node);
1339         return dsz;
1340 }
1341
1342 /*
1343  * tipc_link_push_packet: Push one unsent packet to the media
1344  */
1345 u32 tipc_link_push_packet(struct link *l_ptr)
1346 {
1347         struct sk_buff *buf = l_ptr->first_out;
1348         u32 r_q_size = l_ptr->retransm_queue_size;
1349         u32 r_q_head = l_ptr->retransm_queue_head;
1350
1351         /* Step to position where retransmission failed, if any,    */
1352         /* consider that buffers may have been released in meantime */
1353
1354         if (r_q_size && buf) {
1355                 u32 last = lesser(mod(r_q_head + r_q_size),
1356                                   link_last_sent(l_ptr));
1357                 u32 first = msg_seqno(buf_msg(buf));
1358
1359                 while (buf && less(first, r_q_head)) {
1360                         first = mod(first + 1);
1361                         buf = buf->next;
1362                 }
1363                 l_ptr->retransm_queue_head = r_q_head = first;
1364                 l_ptr->retransm_queue_size = r_q_size = mod(last - first);
1365         }
1366
1367         /* Continue retransmission now, if there is anything: */
1368
1369         if (r_q_size && buf) {
1370                 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1371                 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1372                 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1373                         l_ptr->retransm_queue_head = mod(++r_q_head);
1374                         l_ptr->retransm_queue_size = --r_q_size;
1375                         l_ptr->stats.retransmitted++;
1376                         return 0;
1377                 } else {
1378                         l_ptr->stats.bearer_congs++;
1379                         return PUSH_FAILED;
1380                 }
1381         }
1382
1383         /* Send deferred protocol message, if any: */
1384
1385         buf = l_ptr->proto_msg_queue;
1386         if (buf) {
1387                 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1388                 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1389                 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1390                         l_ptr->unacked_window = 0;
1391                         buf_discard(buf);
1392                         l_ptr->proto_msg_queue = NULL;
1393                         return 0;
1394                 } else {
1395                         l_ptr->stats.bearer_congs++;
1396                         return PUSH_FAILED;
1397                 }
1398         }
1399
1400         /* Send one deferred data message, if send window not full: */
1401
1402         buf = l_ptr->next_out;
1403         if (buf) {
1404                 struct tipc_msg *msg = buf_msg(buf);
1405                 u32 next = msg_seqno(msg);
1406                 u32 first = msg_seqno(buf_msg(l_ptr->first_out));
1407
1408                 if (mod(next - first) < l_ptr->queue_limit[0]) {
1409                         msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1410                         msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1411                         if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1412                                 if (msg_user(msg) == MSG_BUNDLER)
1413                                         msg_set_type(msg, CLOSED_MSG);
1414                                 l_ptr->next_out = buf->next;
1415                                 return 0;
1416                         } else {
1417                                 l_ptr->stats.bearer_congs++;
1418                                 return PUSH_FAILED;
1419                         }
1420                 }
1421         }
1422         return PUSH_FINISHED;
1423 }
1424
1425 /*
1426  * push_queue(): push out the unsent messages of a link where
1427  *               congestion has abated. Node is locked
1428  */
1429 void tipc_link_push_queue(struct link *l_ptr)
1430 {
1431         u32 res;
1432
1433         if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr))
1434                 return;
1435
1436         do {
1437                 res = tipc_link_push_packet(l_ptr);
1438         } while (!res);
1439
1440         if (res == PUSH_FAILED)
1441                 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1442 }
1443
1444 static void link_reset_all(unsigned long addr)
1445 {
1446         struct tipc_node *n_ptr;
1447         char addr_string[16];
1448         u32 i;
1449
1450         read_lock_bh(&tipc_net_lock);
1451         n_ptr = tipc_node_find((u32)addr);
1452         if (!n_ptr) {
1453                 read_unlock_bh(&tipc_net_lock);
1454                 return; /* node no longer exists */
1455         }
1456
1457         tipc_node_lock(n_ptr);
1458
1459         warn("Resetting all links to %s\n",
1460              tipc_addr_string_fill(addr_string, n_ptr->addr));
1461
1462         for (i = 0; i < MAX_BEARERS; i++) {
1463                 if (n_ptr->links[i]) {
1464                         link_print(n_ptr->links[i], "Resetting link\n");
1465                         tipc_link_reset(n_ptr->links[i]);
1466                 }
1467         }
1468
1469         tipc_node_unlock(n_ptr);
1470         read_unlock_bh(&tipc_net_lock);
1471 }
1472
1473 static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
1474 {
1475         struct tipc_msg *msg = buf_msg(buf);
1476
1477         warn("Retransmission failure on link <%s>\n", l_ptr->name);
1478
1479         if (l_ptr->addr) {
1480
1481                 /* Handle failure on standard link */
1482
1483                 link_print(l_ptr, "Resetting link\n");
1484                 tipc_link_reset(l_ptr);
1485
1486         } else {
1487
1488                 /* Handle failure on broadcast link */
1489
1490                 struct tipc_node *n_ptr;
1491                 char addr_string[16];
1492
1493                 info("Msg seq number: %u,  ", msg_seqno(msg));
1494                 info("Outstanding acks: %lu\n",
1495                      (unsigned long) TIPC_SKB_CB(buf)->handle);
1496
1497                 n_ptr = tipc_bclink_retransmit_to();
1498                 tipc_node_lock(n_ptr);
1499
1500                 tipc_addr_string_fill(addr_string, n_ptr->addr);
1501                 info("Multicast link info for %s\n", addr_string);
1502                 info("Supported: %d,  ", n_ptr->bclink.supported);
1503                 info("Acked: %u\n", n_ptr->bclink.acked);
1504                 info("Last in: %u,  ", n_ptr->bclink.last_in);
1505                 info("Gap after: %u,  ", n_ptr->bclink.gap_after);
1506                 info("Gap to: %u\n", n_ptr->bclink.gap_to);
1507                 info("Nack sync: %u\n\n", n_ptr->bclink.nack_sync);
1508
1509                 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
1510
1511                 tipc_node_unlock(n_ptr);
1512
1513                 l_ptr->stale_count = 0;
1514         }
1515 }
1516
1517 void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1518                           u32 retransmits)
1519 {
1520         struct tipc_msg *msg;
1521
1522         if (!buf)
1523                 return;
1524
1525         msg = buf_msg(buf);
1526
1527         if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
1528                 if (l_ptr->retransm_queue_size == 0) {
1529                         l_ptr->retransm_queue_head = msg_seqno(msg);
1530                         l_ptr->retransm_queue_size = retransmits;
1531                 } else {
1532                         err("Unexpected retransmit on link %s (qsize=%d)\n",
1533                             l_ptr->name, l_ptr->retransm_queue_size);
1534                 }
1535                 return;
1536         } else {
1537                 /* Detect repeated retransmit failures on uncongested bearer */
1538
1539                 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1540                         if (++l_ptr->stale_count > 100) {
1541                                 link_retransmit_failure(l_ptr, buf);
1542                                 return;
1543                         }
1544                 } else {
1545                         l_ptr->last_retransmitted = msg_seqno(msg);
1546                         l_ptr->stale_count = 1;
1547                 }
1548         }
1549
1550         while (retransmits && (buf != l_ptr->next_out) && buf) {
1551                 msg = buf_msg(buf);
1552                 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1553                 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1554                 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1555                         buf = buf->next;
1556                         retransmits--;
1557                         l_ptr->stats.retransmitted++;
1558                 } else {
1559                         tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1560                         l_ptr->stats.bearer_congs++;
1561                         l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
1562                         l_ptr->retransm_queue_size = retransmits;
1563                         return;
1564                 }
1565         }
1566
1567         l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1568 }
1569
1570 /**
1571  * link_insert_deferred_queue - insert deferred messages back into receive chain
1572  */
1573
1574 static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
1575                                                   struct sk_buff *buf)
1576 {
1577         u32 seq_no;
1578
1579         if (l_ptr->oldest_deferred_in == NULL)
1580                 return buf;
1581
1582         seq_no = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
1583         if (seq_no == mod(l_ptr->next_in_no)) {
1584                 l_ptr->newest_deferred_in->next = buf;
1585                 buf = l_ptr->oldest_deferred_in;
1586                 l_ptr->oldest_deferred_in = NULL;
1587                 l_ptr->deferred_inqueue_sz = 0;
1588         }
1589         return buf;
1590 }
1591
1592 /**
1593  * link_recv_buf_validate - validate basic format of received message
1594  *
1595  * This routine ensures a TIPC message has an acceptable header, and at least
1596  * as much data as the header indicates it should.  The routine also ensures
1597  * that the entire message header is stored in the main fragment of the message
1598  * buffer, to simplify future access to message header fields.
1599  *
1600  * Note: Having extra info present in the message header or data areas is OK.
1601  * TIPC will ignore the excess, under the assumption that it is optional info
1602  * introduced by a later release of the protocol.
1603  */
1604
1605 static int link_recv_buf_validate(struct sk_buff *buf)
1606 {
1607         static u32 min_data_hdr_size[8] = {
1608                 SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE,
1609                 MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
1610                 };
1611
1612         struct tipc_msg *msg;
1613         u32 tipc_hdr[2];
1614         u32 size;
1615         u32 hdr_size;
1616         u32 min_hdr_size;
1617
1618         if (unlikely(buf->len < MIN_H_SIZE))
1619                 return 0;
1620
1621         msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr);
1622         if (msg == NULL)
1623                 return 0;
1624
1625         if (unlikely(msg_version(msg) != TIPC_VERSION))
1626                 return 0;
1627
1628         size = msg_size(msg);
1629         hdr_size = msg_hdr_sz(msg);
1630         min_hdr_size = msg_isdata(msg) ?
1631                 min_data_hdr_size[msg_type(msg)] : INT_H_SIZE;
1632
1633         if (unlikely((hdr_size < min_hdr_size) ||
1634                      (size < hdr_size) ||
1635                      (buf->len < size) ||
1636                      (size - hdr_size > TIPC_MAX_USER_MSG_SIZE)))
1637                 return 0;
1638
1639         return pskb_may_pull(buf, hdr_size);
1640 }
1641
1642 /**
1643  * tipc_recv_msg - process TIPC messages arriving from off-node
1644  * @head: pointer to message buffer chain
1645  * @tb_ptr: pointer to bearer message arrived on
1646  *
1647  * Invoked with no locks held.  Bearer pointer must point to a valid bearer
1648  * structure (i.e. cannot be NULL), but bearer can be inactive.
1649  */
1650
1651 void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1652 {
1653         read_lock_bh(&tipc_net_lock);
1654         while (head) {
1655                 struct tipc_node *n_ptr;
1656                 struct link *l_ptr;
1657                 struct sk_buff *crs;
1658                 struct sk_buff *buf = head;
1659                 struct tipc_msg *msg;
1660                 u32 seq_no;
1661                 u32 ackd;
1662                 u32 released = 0;
1663                 int type;
1664
1665                 head = head->next;
1666
1667                 /* Ensure bearer is still enabled */
1668
1669                 if (unlikely(!b_ptr->active))
1670                         goto cont;
1671
1672                 /* Ensure message is well-formed */
1673
1674                 if (unlikely(!link_recv_buf_validate(buf)))
1675                         goto cont;
1676
1677                 /* Ensure message data is a single contiguous unit */
1678
1679                 if (unlikely(buf_linearize(buf)))
1680                         goto cont;
1681
1682                 /* Handle arrival of a non-unicast link message */
1683
1684                 msg = buf_msg(buf);
1685
1686                 if (unlikely(msg_non_seq(msg))) {
1687                         if (msg_user(msg) ==  LINK_CONFIG)
1688                                 tipc_disc_recv_msg(buf, b_ptr);
1689                         else
1690                                 tipc_bclink_recv_pkt(buf);
1691                         continue;
1692                 }
1693
1694                 /* Discard unicast link messages destined for another node */
1695
1696                 if (unlikely(!msg_short(msg) &&
1697                              (msg_destnode(msg) != tipc_own_addr)))
1698                         goto cont;
1699
1700                 /* Locate neighboring node that sent message */
1701
1702                 n_ptr = tipc_node_find(msg_prevnode(msg));
1703                 if (unlikely(!n_ptr))
1704                         goto cont;
1705                 tipc_node_lock(n_ptr);
1706
1707                 /* Locate unicast link endpoint that should handle message */
1708
1709                 l_ptr = n_ptr->links[b_ptr->identity];
1710                 if (unlikely(!l_ptr)) {
1711                         tipc_node_unlock(n_ptr);
1712                         goto cont;
1713                 }
1714
1715                 /* Verify that communication with node is currently allowed */
1716
1717                 if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
1718                         msg_user(msg) == LINK_PROTOCOL &&
1719                         (msg_type(msg) == RESET_MSG ||
1720                                         msg_type(msg) == ACTIVATE_MSG) &&
1721                         !msg_redundant_link(msg))
1722                         n_ptr->block_setup &= ~WAIT_PEER_DOWN;
1723
1724                 if (n_ptr->block_setup) {
1725                         tipc_node_unlock(n_ptr);
1726                         goto cont;
1727                 }
1728
1729                 /* Validate message sequence number info */
1730
1731                 seq_no = msg_seqno(msg);
1732                 ackd = msg_ack(msg);
1733
1734                 /* Release acked messages */
1735
1736                 if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
1737                         if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported)
1738                                 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1739                 }
1740
1741                 crs = l_ptr->first_out;
1742                 while ((crs != l_ptr->next_out) &&
1743                        less_eq(msg_seqno(buf_msg(crs)), ackd)) {
1744                         struct sk_buff *next = crs->next;
1745
1746                         buf_discard(crs);
1747                         crs = next;
1748                         released++;
1749                 }
1750                 if (released) {
1751                         l_ptr->first_out = crs;
1752                         l_ptr->out_queue_size -= released;
1753                 }
1754
1755                 /* Try sending any messages link endpoint has pending */
1756
1757                 if (unlikely(l_ptr->next_out))
1758                         tipc_link_push_queue(l_ptr);
1759                 if (unlikely(!list_empty(&l_ptr->waiting_ports)))
1760                         tipc_link_wakeup_ports(l_ptr, 0);
1761                 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1762                         l_ptr->stats.sent_acks++;
1763                         tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1764                 }
1765
1766                 /* Now (finally!) process the incoming message */
1767
1768 protocol_check:
1769                 if (likely(link_working_working(l_ptr))) {
1770                         if (likely(seq_no == mod(l_ptr->next_in_no))) {
1771                                 l_ptr->next_in_no++;
1772                                 if (unlikely(l_ptr->oldest_deferred_in))
1773                                         head = link_insert_deferred_queue(l_ptr,
1774                                                                           head);
1775                                 if (likely(msg_is_dest(msg, tipc_own_addr))) {
1776 deliver:
1777                                         if (likely(msg_isdata(msg))) {
1778                                                 tipc_node_unlock(n_ptr);
1779                                                 tipc_port_recv_msg(buf);
1780                                                 continue;
1781                                         }
1782                                         switch (msg_user(msg)) {
1783                                         case MSG_BUNDLER:
1784                                                 l_ptr->stats.recv_bundles++;
1785                                                 l_ptr->stats.recv_bundled +=
1786                                                         msg_msgcnt(msg);
1787                                                 tipc_node_unlock(n_ptr);
1788                                                 tipc_link_recv_bundle(buf);
1789                                                 continue;
1790                                         case NAME_DISTRIBUTOR:
1791                                                 tipc_node_unlock(n_ptr);
1792                                                 tipc_named_recv(buf);
1793                                                 continue;
1794                                         case CONN_MANAGER:
1795                                                 tipc_node_unlock(n_ptr);
1796                                                 tipc_port_recv_proto_msg(buf);
1797                                                 continue;
1798                                         case MSG_FRAGMENTER:
1799                                                 l_ptr->stats.recv_fragments++;
1800                                                 if (tipc_link_recv_fragment(&l_ptr->defragm_buf,
1801                                                                             &buf, &msg)) {
1802                                                         l_ptr->stats.recv_fragmented++;
1803                                                         goto deliver;
1804                                                 }
1805                                                 break;
1806                                         case CHANGEOVER_PROTOCOL:
1807                                                 type = msg_type(msg);
1808                                                 if (link_recv_changeover_msg(&l_ptr, &buf)) {
1809                                                         msg = buf_msg(buf);
1810                                                         seq_no = msg_seqno(msg);
1811                                                         if (type == ORIGINAL_MSG)
1812                                                                 goto deliver;
1813                                                         goto protocol_check;
1814                                                 }
1815                                                 break;
1816                                         default:
1817                                                 buf_discard(buf);
1818                                                 buf = NULL;
1819                                                 break;
1820                                         }
1821                                 }
1822                                 tipc_node_unlock(n_ptr);
1823                                 tipc_net_route_msg(buf);
1824                                 continue;
1825                         }
1826                         link_handle_out_of_seq_msg(l_ptr, buf);
1827                         head = link_insert_deferred_queue(l_ptr, head);
1828                         tipc_node_unlock(n_ptr);
1829                         continue;
1830                 }
1831
1832                 if (msg_user(msg) == LINK_PROTOCOL) {
1833                         link_recv_proto_msg(l_ptr, buf);
1834                         head = link_insert_deferred_queue(l_ptr, head);
1835                         tipc_node_unlock(n_ptr);
1836                         continue;
1837                 }
1838                 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1839
1840                 if (link_working_working(l_ptr)) {
1841                         /* Re-insert in front of queue */
1842                         buf->next = head;
1843                         head = buf;
1844                         tipc_node_unlock(n_ptr);
1845                         continue;
1846                 }
1847                 tipc_node_unlock(n_ptr);
1848 cont:
1849                 buf_discard(buf);
1850         }
1851         read_unlock_bh(&tipc_net_lock);
1852 }
1853
1854 /*
1855  * link_defer_buf(): Sort a received out-of-sequence packet
1856  *                   into the deferred reception queue.
1857  * Returns the increase of the queue length,i.e. 0 or 1
1858  */
1859
1860 u32 tipc_link_defer_pkt(struct sk_buff **head,
1861                         struct sk_buff **tail,
1862                         struct sk_buff *buf)
1863 {
1864         struct sk_buff *prev = NULL;
1865         struct sk_buff *crs = *head;
1866         u32 seq_no = msg_seqno(buf_msg(buf));
1867
1868         buf->next = NULL;
1869
1870         /* Empty queue ? */
1871         if (*head == NULL) {
1872                 *head = *tail = buf;
1873                 return 1;
1874         }
1875
1876         /* Last ? */
1877         if (less(msg_seqno(buf_msg(*tail)), seq_no)) {
1878                 (*tail)->next = buf;
1879                 *tail = buf;
1880                 return 1;
1881         }
1882
1883         /* Scan through queue and sort it in */
1884         do {
1885                 struct tipc_msg *msg = buf_msg(crs);
1886
1887                 if (less(seq_no, msg_seqno(msg))) {
1888                         buf->next = crs;
1889                         if (prev)
1890                                 prev->next = buf;
1891                         else
1892                                 *head = buf;
1893                         return 1;
1894                 }
1895                 if (seq_no == msg_seqno(msg))
1896                         break;
1897                 prev = crs;
1898                 crs = crs->next;
1899         } while (crs);
1900
1901         /* Message is a duplicate of an existing message */
1902
1903         buf_discard(buf);
1904         return 0;
1905 }
1906
1907 /**
1908  * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1909  */
1910
1911 static void link_handle_out_of_seq_msg(struct link *l_ptr,
1912                                        struct sk_buff *buf)
1913 {
1914         u32 seq_no = msg_seqno(buf_msg(buf));
1915
1916         if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1917                 link_recv_proto_msg(l_ptr, buf);
1918                 return;
1919         }
1920
1921         /* Record OOS packet arrival (force mismatch on next timeout) */
1922
1923         l_ptr->checkpoint--;
1924
1925         /*
1926          * Discard packet if a duplicate; otherwise add it to deferred queue
1927          * and notify peer of gap as per protocol specification
1928          */
1929
1930         if (less(seq_no, mod(l_ptr->next_in_no))) {
1931                 l_ptr->stats.duplicates++;
1932                 buf_discard(buf);
1933                 return;
1934         }
1935
1936         if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
1937                                 &l_ptr->newest_deferred_in, buf)) {
1938                 l_ptr->deferred_inqueue_sz++;
1939                 l_ptr->stats.deferred_recv++;
1940                 if ((l_ptr->deferred_inqueue_sz % 16) == 1)
1941                         tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1942         } else
1943                 l_ptr->stats.duplicates++;
1944 }
1945
1946 /*
1947  * Send protocol message to the other endpoint.
1948  */
1949 void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
1950                               u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
1951 {
1952         struct sk_buff *buf = NULL;
1953         struct tipc_msg *msg = l_ptr->pmsg;
1954         u32 msg_size = sizeof(l_ptr->proto_msg);
1955         int r_flag;
1956
1957         if (link_blocked(l_ptr))
1958                 return;
1959
1960         /* Abort non-RESET send if communication with node is prohibited */
1961
1962         if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG))
1963                 return;
1964
1965         msg_set_type(msg, msg_typ);
1966         msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
1967         msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in));
1968         msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
1969
1970         if (msg_typ == STATE_MSG) {
1971                 u32 next_sent = mod(l_ptr->next_out_no);
1972
1973                 if (!tipc_link_is_up(l_ptr))
1974                         return;
1975                 if (l_ptr->next_out)
1976                         next_sent = msg_seqno(buf_msg(l_ptr->next_out));
1977                 msg_set_next_sent(msg, next_sent);
1978                 if (l_ptr->oldest_deferred_in) {
1979                         u32 rec = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
1980                         gap = mod(rec - mod(l_ptr->next_in_no));
1981                 }
1982                 msg_set_seq_gap(msg, gap);
1983                 if (gap)
1984                         l_ptr->stats.sent_nacks++;
1985                 msg_set_link_tolerance(msg, tolerance);
1986                 msg_set_linkprio(msg, priority);
1987                 msg_set_max_pkt(msg, ack_mtu);
1988                 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1989                 msg_set_probe(msg, probe_msg != 0);
1990                 if (probe_msg) {
1991                         u32 mtu = l_ptr->max_pkt;
1992
1993                         if ((mtu < l_ptr->max_pkt_target) &&
1994                             link_working_working(l_ptr) &&
1995                             l_ptr->fsm_msg_cnt) {
1996                                 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1997                                 if (l_ptr->max_pkt_probes == 10) {
1998                                         l_ptr->max_pkt_target = (msg_size - 4);
1999                                         l_ptr->max_pkt_probes = 0;
2000                                         msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
2001                                 }
2002                                 l_ptr->max_pkt_probes++;
2003                         }
2004
2005                         l_ptr->stats.sent_probes++;
2006                 }
2007                 l_ptr->stats.sent_states++;
2008         } else {                /* RESET_MSG or ACTIVATE_MSG */
2009                 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
2010                 msg_set_seq_gap(msg, 0);
2011                 msg_set_next_sent(msg, 1);
2012                 msg_set_probe(msg, 0);
2013                 msg_set_link_tolerance(msg, l_ptr->tolerance);
2014                 msg_set_linkprio(msg, l_ptr->priority);
2015                 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
2016         }
2017
2018         r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
2019         msg_set_redundant_link(msg, r_flag);
2020         msg_set_linkprio(msg, l_ptr->priority);
2021
2022         /* Ensure sequence number will not fit : */
2023
2024         msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
2025
2026         /* Congestion? */
2027
2028         if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
2029                 if (!l_ptr->proto_msg_queue) {
2030                         l_ptr->proto_msg_queue =
2031                                 tipc_buf_acquire(sizeof(l_ptr->proto_msg));
2032                 }
2033                 buf = l_ptr->proto_msg_queue;
2034                 if (!buf)
2035                         return;
2036                 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
2037                 return;
2038         }
2039
2040         /* Message can be sent */
2041
2042         buf = tipc_buf_acquire(msg_size);
2043         if (!buf)
2044                 return;
2045
2046         skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
2047         msg_set_size(buf_msg(buf), msg_size);
2048
2049         if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
2050                 l_ptr->unacked_window = 0;
2051                 buf_discard(buf);
2052                 return;
2053         }
2054
2055         /* New congestion */
2056         tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
2057         l_ptr->proto_msg_queue = buf;
2058         l_ptr->stats.bearer_congs++;
2059 }
2060
2061 /*
2062  * Receive protocol message :
2063  * Note that network plane id propagates through the network, and may
2064  * change at any time. The node with lowest address rules
2065  */
2066
2067 static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2068 {
2069         u32 rec_gap = 0;
2070         u32 max_pkt_info;
2071         u32 max_pkt_ack;
2072         u32 msg_tol;
2073         struct tipc_msg *msg = buf_msg(buf);
2074
2075         if (link_blocked(l_ptr))
2076                 goto exit;
2077
2078         /* record unnumbered packet arrival (force mismatch on next timeout) */
2079
2080         l_ptr->checkpoint--;
2081
2082         if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
2083                 if (tipc_own_addr > msg_prevnode(msg))
2084                         l_ptr->b_ptr->net_plane = msg_net_plane(msg);
2085
2086         l_ptr->owner->permit_changeover = msg_redundant_link(msg);
2087
2088         switch (msg_type(msg)) {
2089
2090         case RESET_MSG:
2091                 if (!link_working_unknown(l_ptr) &&
2092                     (l_ptr->peer_session != INVALID_SESSION)) {
2093                         if (less_eq(msg_session(msg), l_ptr->peer_session))
2094                                 break; /* duplicate or old reset: ignore */
2095                 }
2096
2097                 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
2098                                 link_working_unknown(l_ptr))) {
2099                         /*
2100                          * peer has lost contact -- don't allow peer's links
2101                          * to reactivate before we recognize loss & clean up
2102                          */
2103                         l_ptr->owner->block_setup = WAIT_NODE_DOWN;
2104                 }
2105
2106                 /* fall thru' */
2107         case ACTIVATE_MSG:
2108                 /* Update link settings according other endpoint's values */
2109
2110                 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
2111
2112                 msg_tol = msg_link_tolerance(msg);
2113                 if (msg_tol > l_ptr->tolerance)
2114                         link_set_supervision_props(l_ptr, msg_tol);
2115
2116                 if (msg_linkprio(msg) > l_ptr->priority)
2117                         l_ptr->priority = msg_linkprio(msg);
2118
2119                 max_pkt_info = msg_max_pkt(msg);
2120                 if (max_pkt_info) {
2121                         if (max_pkt_info < l_ptr->max_pkt_target)
2122                                 l_ptr->max_pkt_target = max_pkt_info;
2123                         if (l_ptr->max_pkt > l_ptr->max_pkt_target)
2124                                 l_ptr->max_pkt = l_ptr->max_pkt_target;
2125                 } else {
2126                         l_ptr->max_pkt = l_ptr->max_pkt_target;
2127                 }
2128                 l_ptr->owner->bclink.supported = (max_pkt_info != 0);
2129
2130                 link_state_event(l_ptr, msg_type(msg));
2131
2132                 l_ptr->peer_session = msg_session(msg);
2133                 l_ptr->peer_bearer_id = msg_bearer_id(msg);
2134
2135                 /* Synchronize broadcast sequence numbers */
2136                 if (!tipc_node_redundant_links(l_ptr->owner))
2137                         l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
2138                 break;
2139         case STATE_MSG:
2140
2141                 msg_tol = msg_link_tolerance(msg);
2142                 if (msg_tol)
2143                         link_set_supervision_props(l_ptr, msg_tol);
2144
2145                 if (msg_linkprio(msg) &&
2146                     (msg_linkprio(msg) != l_ptr->priority)) {
2147                         warn("Resetting link <%s>, priority change %u->%u\n",
2148                              l_ptr->name, l_ptr->priority, msg_linkprio(msg));
2149                         l_ptr->priority = msg_linkprio(msg);
2150                         tipc_link_reset(l_ptr); /* Enforce change to take effect */
2151                         break;
2152                 }
2153                 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
2154                 l_ptr->stats.recv_states++;
2155                 if (link_reset_unknown(l_ptr))
2156                         break;
2157
2158                 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
2159                         rec_gap = mod(msg_next_sent(msg) -
2160                                       mod(l_ptr->next_in_no));
2161                 }
2162
2163                 max_pkt_ack = msg_max_pkt(msg);
2164                 if (max_pkt_ack > l_ptr->max_pkt) {
2165                         l_ptr->max_pkt = max_pkt_ack;
2166                         l_ptr->max_pkt_probes = 0;
2167                 }
2168
2169                 max_pkt_ack = 0;
2170                 if (msg_probe(msg)) {
2171                         l_ptr->stats.recv_probes++;
2172                         if (msg_size(msg) > sizeof(l_ptr->proto_msg))
2173                                 max_pkt_ack = msg_size(msg);
2174                 }
2175
2176                 /* Protocol message before retransmits, reduce loss risk */
2177
2178                 tipc_bclink_check_gap(l_ptr->owner, msg_last_bcast(msg));
2179
2180                 if (rec_gap || (msg_probe(msg))) {
2181                         tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2182                                                  0, rec_gap, 0, 0, max_pkt_ack);
2183                 }
2184                 if (msg_seq_gap(msg)) {
2185                         l_ptr->stats.recv_nacks++;
2186                         tipc_link_retransmit(l_ptr, l_ptr->first_out,
2187                                              msg_seq_gap(msg));
2188                 }
2189                 break;
2190         }
2191 exit:
2192         buf_discard(buf);
2193 }
2194
2195
2196 /*
2197  * tipc_link_tunnel(): Send one message via a link belonging to
2198  * another bearer. Owner node is locked.
2199  */
2200 static void tipc_link_tunnel(struct link *l_ptr,
2201                              struct tipc_msg *tunnel_hdr,
2202                              struct tipc_msg  *msg,
2203                              u32 selector)
2204 {
2205         struct link *tunnel;
2206         struct sk_buff *buf;
2207         u32 length = msg_size(msg);
2208
2209         tunnel = l_ptr->owner->active_links[selector & 1];
2210         if (!tipc_link_is_up(tunnel)) {
2211                 warn("Link changeover error, "
2212                      "tunnel link no longer available\n");
2213                 return;
2214         }
2215         msg_set_size(tunnel_hdr, length + INT_H_SIZE);
2216         buf = tipc_buf_acquire(length + INT_H_SIZE);
2217         if (!buf) {
2218                 warn("Link changeover error, "
2219                      "unable to send tunnel msg\n");
2220                 return;
2221         }
2222         skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
2223         skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
2224         tipc_link_send_buf(tunnel, buf);
2225 }
2226
2227
2228
2229 /*
2230  * changeover(): Send whole message queue via the remaining link
2231  *               Owner node is locked.
2232  */
2233
2234 void tipc_link_changeover(struct link *l_ptr)
2235 {
2236         u32 msgcount = l_ptr->out_queue_size;
2237         struct sk_buff *crs = l_ptr->first_out;
2238         struct link *tunnel = l_ptr->owner->active_links[0];
2239         struct tipc_msg tunnel_hdr;
2240         int split_bundles;
2241
2242         if (!tunnel)
2243                 return;
2244
2245         if (!l_ptr->owner->permit_changeover) {
2246                 warn("Link changeover error, "
2247                      "peer did not permit changeover\n");
2248                 return;
2249         }
2250
2251         tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2252                  ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
2253         msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2254         msg_set_msgcnt(&tunnel_hdr, msgcount);
2255
2256         if (!l_ptr->first_out) {
2257                 struct sk_buff *buf;
2258
2259                 buf = tipc_buf_acquire(INT_H_SIZE);
2260                 if (buf) {
2261                         skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
2262                         msg_set_size(&tunnel_hdr, INT_H_SIZE);
2263                         tipc_link_send_buf(tunnel, buf);
2264                 } else {
2265                         warn("Link changeover error, "
2266                              "unable to send changeover msg\n");
2267                 }
2268                 return;
2269         }
2270
2271         split_bundles = (l_ptr->owner->active_links[0] !=
2272                          l_ptr->owner->active_links[1]);
2273
2274         while (crs) {
2275                 struct tipc_msg *msg = buf_msg(crs);
2276
2277                 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
2278                         struct tipc_msg *m = msg_get_wrapped(msg);
2279                         unchar *pos = (unchar *)m;
2280
2281                         msgcount = msg_msgcnt(msg);
2282                         while (msgcount--) {
2283                                 msg_set_seqno(m, msg_seqno(msg));
2284                                 tipc_link_tunnel(l_ptr, &tunnel_hdr, m,
2285                                                  msg_link_selector(m));
2286                                 pos += align(msg_size(m));
2287                                 m = (struct tipc_msg *)pos;
2288                         }
2289                 } else {
2290                         tipc_link_tunnel(l_ptr, &tunnel_hdr, msg,
2291                                          msg_link_selector(msg));
2292                 }
2293                 crs = crs->next;
2294         }
2295 }
2296
2297 void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
2298 {
2299         struct sk_buff *iter;
2300         struct tipc_msg tunnel_hdr;
2301
2302         tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2303                  DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
2304         msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
2305         msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2306         iter = l_ptr->first_out;
2307         while (iter) {
2308                 struct sk_buff *outbuf;
2309                 struct tipc_msg *msg = buf_msg(iter);
2310                 u32 length = msg_size(msg);
2311
2312                 if (msg_user(msg) == MSG_BUNDLER)
2313                         msg_set_type(msg, CLOSED_MSG);
2314                 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));   /* Update */
2315                 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
2316                 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
2317                 outbuf = tipc_buf_acquire(length + INT_H_SIZE);
2318                 if (outbuf == NULL) {
2319                         warn("Link changeover error, "
2320                              "unable to send duplicate msg\n");
2321                         return;
2322                 }
2323                 skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
2324                 skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
2325                                                length);
2326                 tipc_link_send_buf(tunnel, outbuf);
2327                 if (!tipc_link_is_up(l_ptr))
2328                         return;
2329                 iter = iter->next;
2330         }
2331 }
2332
2333
2334
2335 /**
2336  * buf_extract - extracts embedded TIPC message from another message
2337  * @skb: encapsulating message buffer
2338  * @from_pos: offset to extract from
2339  *
2340  * Returns a new message buffer containing an embedded message.  The
2341  * encapsulating message itself is left unchanged.
2342  */
2343
2344 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2345 {
2346         struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
2347         u32 size = msg_size(msg);
2348         struct sk_buff *eb;
2349
2350         eb = tipc_buf_acquire(size);
2351         if (eb)
2352                 skb_copy_to_linear_data(eb, msg, size);
2353         return eb;
2354 }
2355
2356 /*
2357  *  link_recv_changeover_msg(): Receive tunneled packet sent
2358  *  via other link. Node is locked. Return extracted buffer.
2359  */
2360
2361 static int link_recv_changeover_msg(struct link **l_ptr,
2362                                     struct sk_buff **buf)
2363 {
2364         struct sk_buff *tunnel_buf = *buf;
2365         struct link *dest_link;
2366         struct tipc_msg *msg;
2367         struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
2368         u32 msg_typ = msg_type(tunnel_msg);
2369         u32 msg_count = msg_msgcnt(tunnel_msg);
2370
2371         dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
2372         if (!dest_link)
2373                 goto exit;
2374         if (dest_link == *l_ptr) {
2375                 err("Unexpected changeover message on link <%s>\n",
2376                     (*l_ptr)->name);
2377                 goto exit;
2378         }
2379         *l_ptr = dest_link;
2380         msg = msg_get_wrapped(tunnel_msg);
2381
2382         if (msg_typ == DUPLICATE_MSG) {
2383                 if (less(msg_seqno(msg), mod(dest_link->next_in_no)))
2384                         goto exit;
2385                 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2386                 if (*buf == NULL) {
2387                         warn("Link changeover error, duplicate msg dropped\n");
2388                         goto exit;
2389                 }
2390                 buf_discard(tunnel_buf);
2391                 return 1;
2392         }
2393
2394         /* First original message ?: */
2395
2396         if (tipc_link_is_up(dest_link)) {
2397                 info("Resetting link <%s>, changeover initiated by peer\n",
2398                      dest_link->name);
2399                 tipc_link_reset(dest_link);
2400                 dest_link->exp_msg_count = msg_count;
2401                 if (!msg_count)
2402                         goto exit;
2403         } else if (dest_link->exp_msg_count == START_CHANGEOVER) {
2404                 dest_link->exp_msg_count = msg_count;
2405                 if (!msg_count)
2406                         goto exit;
2407         }
2408
2409         /* Receive original message */
2410
2411         if (dest_link->exp_msg_count == 0) {
2412                 warn("Link switchover error, "
2413                      "got too many tunnelled messages\n");
2414                 goto exit;
2415         }
2416         dest_link->exp_msg_count--;
2417         if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
2418                 goto exit;
2419         } else {
2420                 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2421                 if (*buf != NULL) {
2422                         buf_discard(tunnel_buf);
2423                         return 1;
2424                 } else {
2425                         warn("Link changeover error, original msg dropped\n");
2426                 }
2427         }
2428 exit:
2429         *buf = NULL;
2430         buf_discard(tunnel_buf);
2431         return 0;
2432 }
2433
2434 /*
2435  *  Bundler functionality:
2436  */
2437 void tipc_link_recv_bundle(struct sk_buff *buf)
2438 {
2439         u32 msgcount = msg_msgcnt(buf_msg(buf));
2440         u32 pos = INT_H_SIZE;
2441         struct sk_buff *obuf;
2442
2443         while (msgcount--) {
2444                 obuf = buf_extract(buf, pos);
2445                 if (obuf == NULL) {
2446                         warn("Link unable to unbundle message(s)\n");
2447                         break;
2448                 }
2449                 pos += align(msg_size(buf_msg(obuf)));
2450                 tipc_net_route_msg(obuf);
2451         }
2452         buf_discard(buf);
2453 }
2454
2455 /*
2456  *  Fragmentation/defragmentation:
2457  */
2458
2459
2460 /*
2461  * link_send_long_buf: Entry for buffers needing fragmentation.
2462  * The buffer is complete, inclusive total message length.
2463  * Returns user data length.
2464  */
2465 static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2466 {
2467         struct sk_buff *buf_chain = NULL;
2468         struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
2469         struct tipc_msg *inmsg = buf_msg(buf);
2470         struct tipc_msg fragm_hdr;
2471         u32 insize = msg_size(inmsg);
2472         u32 dsz = msg_data_sz(inmsg);
2473         unchar *crs = buf->data;
2474         u32 rest = insize;
2475         u32 pack_sz = l_ptr->max_pkt;
2476         u32 fragm_sz = pack_sz - INT_H_SIZE;
2477         u32 fragm_no = 0;
2478         u32 destaddr;
2479
2480         if (msg_short(inmsg))
2481                 destaddr = l_ptr->addr;
2482         else
2483                 destaddr = msg_destnode(inmsg);
2484
2485         /* Prepare reusable fragment header: */
2486
2487         tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
2488                  INT_H_SIZE, destaddr);
2489
2490         /* Chop up message: */
2491
2492         while (rest > 0) {
2493                 struct sk_buff *fragm;
2494
2495                 if (rest <= fragm_sz) {
2496                         fragm_sz = rest;
2497                         msg_set_type(&fragm_hdr, LAST_FRAGMENT);
2498                 }
2499                 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
2500                 if (fragm == NULL) {
2501                         buf_discard(buf);
2502                         while (buf_chain) {
2503                                 buf = buf_chain;
2504                                 buf_chain = buf_chain->next;
2505                                 buf_discard(buf);
2506                         }
2507                         return -ENOMEM;
2508                 }
2509                 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
2510                 fragm_no++;
2511                 msg_set_fragm_no(&fragm_hdr, fragm_no);
2512                 skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE);
2513                 skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs,
2514                                                fragm_sz);
2515                 buf_chain_tail->next = fragm;
2516                 buf_chain_tail = fragm;
2517
2518                 rest -= fragm_sz;
2519                 crs += fragm_sz;
2520                 msg_set_type(&fragm_hdr, FRAGMENT);
2521         }
2522         buf_discard(buf);
2523
2524         /* Append chain of fragments to send queue & send them */
2525
2526         l_ptr->long_msg_seq_no++;
2527         link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
2528         l_ptr->stats.sent_fragments += fragm_no;
2529         l_ptr->stats.sent_fragmented++;
2530         tipc_link_push_queue(l_ptr);
2531
2532         return dsz;
2533 }
2534
2535 /*
2536  * A pending message being re-assembled must store certain values
2537  * to handle subsequent fragments correctly. The following functions
2538  * help storing these values in unused, available fields in the
2539  * pending message. This makes dynamic memory allocation unnecessary.
2540  */
2541
2542 static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
2543 {
2544         msg_set_seqno(buf_msg(buf), seqno);
2545 }
2546
2547 static u32 get_fragm_size(struct sk_buff *buf)
2548 {
2549         return msg_ack(buf_msg(buf));
2550 }
2551
2552 static void set_fragm_size(struct sk_buff *buf, u32 sz)
2553 {
2554         msg_set_ack(buf_msg(buf), sz);
2555 }
2556
2557 static u32 get_expected_frags(struct sk_buff *buf)
2558 {
2559         return msg_bcast_ack(buf_msg(buf));
2560 }
2561
2562 static void set_expected_frags(struct sk_buff *buf, u32 exp)
2563 {
2564         msg_set_bcast_ack(buf_msg(buf), exp);
2565 }
2566
2567 static u32 get_timer_cnt(struct sk_buff *buf)
2568 {
2569         return msg_reroute_cnt(buf_msg(buf));
2570 }
2571
2572 static void incr_timer_cnt(struct sk_buff *buf)
2573 {
2574         msg_incr_reroute_cnt(buf_msg(buf));
2575 }
2576
2577 /*
2578  * tipc_link_recv_fragment(): Called with node lock on. Returns
2579  * the reassembled buffer if message is complete.
2580  */
2581 int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2582                             struct tipc_msg **m)
2583 {
2584         struct sk_buff *prev = NULL;
2585         struct sk_buff *fbuf = *fb;
2586         struct tipc_msg *fragm = buf_msg(fbuf);
2587         struct sk_buff *pbuf = *pending;
2588         u32 long_msg_seq_no = msg_long_msgno(fragm);
2589
2590         *fb = NULL;
2591
2592         /* Is there an incomplete message waiting for this fragment? */
2593
2594         while (pbuf && ((msg_seqno(buf_msg(pbuf)) != long_msg_seq_no) ||
2595                         (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
2596                 prev = pbuf;
2597                 pbuf = pbuf->next;
2598         }
2599
2600         if (!pbuf && (msg_type(fragm) == FIRST_FRAGMENT)) {
2601                 struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
2602                 u32 msg_sz = msg_size(imsg);
2603                 u32 fragm_sz = msg_data_sz(fragm);
2604                 u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz);
2605                 u32 max =  TIPC_MAX_USER_MSG_SIZE + NAMED_H_SIZE;
2606                 if (msg_type(imsg) == TIPC_MCAST_MSG)
2607                         max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
2608                 if (msg_size(imsg) > max) {
2609                         buf_discard(fbuf);
2610                         return 0;
2611                 }
2612                 pbuf = tipc_buf_acquire(msg_size(imsg));
2613                 if (pbuf != NULL) {
2614                         pbuf->next = *pending;
2615                         *pending = pbuf;
2616                         skb_copy_to_linear_data(pbuf, imsg,
2617                                                 msg_data_sz(fragm));
2618                         /*  Prepare buffer for subsequent fragments. */
2619
2620                         set_long_msg_seqno(pbuf, long_msg_seq_no);
2621                         set_fragm_size(pbuf, fragm_sz);
2622                         set_expected_frags(pbuf, exp_fragm_cnt - 1);
2623                 } else {
2624                         warn("Link unable to reassemble fragmented message\n");
2625                 }
2626                 buf_discard(fbuf);
2627                 return 0;
2628         } else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) {
2629                 u32 dsz = msg_data_sz(fragm);
2630                 u32 fsz = get_fragm_size(pbuf);
2631                 u32 crs = ((msg_fragm_no(fragm) - 1) * fsz);
2632                 u32 exp_frags = get_expected_frags(pbuf) - 1;
2633                 skb_copy_to_linear_data_offset(pbuf, crs,
2634                                                msg_data(fragm), dsz);
2635                 buf_discard(fbuf);
2636
2637                 /* Is message complete? */
2638
2639                 if (exp_frags == 0) {
2640                         if (prev)
2641                                 prev->next = pbuf->next;
2642                         else
2643                                 *pending = pbuf->next;
2644                         msg_reset_reroute_cnt(buf_msg(pbuf));
2645                         *fb = pbuf;
2646                         *m = buf_msg(pbuf);
2647                         return 1;
2648                 }
2649                 set_expected_frags(pbuf, exp_frags);
2650                 return 0;
2651         }
2652         buf_discard(fbuf);
2653         return 0;
2654 }
2655
2656 /**
2657  * link_check_defragm_bufs - flush stale incoming message fragments
2658  * @l_ptr: pointer to link
2659  */
2660
2661 static void link_check_defragm_bufs(struct link *l_ptr)
2662 {
2663         struct sk_buff *prev = NULL;
2664         struct sk_buff *next = NULL;
2665         struct sk_buff *buf = l_ptr->defragm_buf;
2666
2667         if (!buf)
2668                 return;
2669         if (!link_working_working(l_ptr))
2670                 return;
2671         while (buf) {
2672                 u32 cnt = get_timer_cnt(buf);
2673
2674                 next = buf->next;
2675                 if (cnt < 4) {
2676                         incr_timer_cnt(buf);
2677                         prev = buf;
2678                 } else {
2679                         if (prev)
2680                                 prev->next = buf->next;
2681                         else
2682                                 l_ptr->defragm_buf = buf->next;
2683                         buf_discard(buf);
2684                 }
2685                 buf = next;
2686         }
2687 }
2688
2689
2690
2691 static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
2692 {
2693         if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
2694                 return;
2695
2696         l_ptr->tolerance = tolerance;
2697         l_ptr->continuity_interval =
2698                 ((tolerance / 4) > 500) ? 500 : tolerance / 4;
2699         l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
2700 }
2701
2702
2703 void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
2704 {
2705         /* Data messages from this node, inclusive FIRST_FRAGM */
2706         l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
2707         l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
2708         l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
2709         l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
2710         /* Transiting data messages,inclusive FIRST_FRAGM */
2711         l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
2712         l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
2713         l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
2714         l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
2715         l_ptr->queue_limit[CONN_MANAGER] = 1200;
2716         l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
2717         l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
2718         /* FRAGMENT and LAST_FRAGMENT packets */
2719         l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
2720 }
2721
2722 /**
2723  * link_find_link - locate link by name
2724  * @name - ptr to link name string
2725  * @node - ptr to area to be filled with ptr to associated node
2726  *
2727  * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
2728  * this also prevents link deletion.
2729  *
2730  * Returns pointer to link (or 0 if invalid link name).
2731  */
2732
2733 static struct link *link_find_link(const char *name, struct tipc_node **node)
2734 {
2735         struct link_name link_name_parts;
2736         struct tipc_bearer *b_ptr;
2737         struct link *l_ptr;
2738
2739         if (!link_name_validate(name, &link_name_parts))
2740                 return NULL;
2741
2742         b_ptr = tipc_bearer_find_interface(link_name_parts.if_local);
2743         if (!b_ptr)
2744                 return NULL;
2745
2746         *node = tipc_node_find(link_name_parts.addr_peer);
2747         if (!*node)
2748                 return NULL;
2749
2750         l_ptr = (*node)->links[b_ptr->identity];
2751         if (!l_ptr || strcmp(l_ptr->name, name))
2752                 return NULL;
2753
2754         return l_ptr;
2755 }
2756
2757 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
2758                                      u16 cmd)
2759 {
2760         struct tipc_link_config *args;
2761         u32 new_value;
2762         struct link *l_ptr;
2763         struct tipc_node *node;
2764         int res;
2765
2766         if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
2767                 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2768
2769         args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
2770         new_value = ntohl(args->value);
2771
2772         if (!strcmp(args->name, tipc_bclink_name)) {
2773                 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
2774                     (tipc_bclink_set_queue_limits(new_value) == 0))
2775                         return tipc_cfg_reply_none();
2776                 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
2777                                                    " (cannot change setting on broadcast link)");
2778         }
2779
2780         read_lock_bh(&tipc_net_lock);
2781         l_ptr = link_find_link(args->name, &node);
2782         if (!l_ptr) {
2783                 read_unlock_bh(&tipc_net_lock);
2784                 return tipc_cfg_reply_error_string("link not found");
2785         }
2786
2787         tipc_node_lock(node);
2788         res = -EINVAL;
2789         switch (cmd) {
2790         case TIPC_CMD_SET_LINK_TOL:
2791                 if ((new_value >= TIPC_MIN_LINK_TOL) &&
2792                     (new_value <= TIPC_MAX_LINK_TOL)) {
2793                         link_set_supervision_props(l_ptr, new_value);
2794                         tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2795                                                  0, 0, new_value, 0, 0);
2796                         res = 0;
2797                 }
2798                 break;
2799         case TIPC_CMD_SET_LINK_PRI:
2800                 if ((new_value >= TIPC_MIN_LINK_PRI) &&
2801                     (new_value <= TIPC_MAX_LINK_PRI)) {
2802                         l_ptr->priority = new_value;
2803                         tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2804                                                  0, 0, 0, new_value, 0);
2805                         res = 0;
2806                 }
2807                 break;
2808         case TIPC_CMD_SET_LINK_WINDOW:
2809                 if ((new_value >= TIPC_MIN_LINK_WIN) &&
2810                     (new_value <= TIPC_MAX_LINK_WIN)) {
2811                         tipc_link_set_queue_limits(l_ptr, new_value);
2812                         res = 0;
2813                 }
2814                 break;
2815         }
2816         tipc_node_unlock(node);
2817
2818         read_unlock_bh(&tipc_net_lock);
2819         if (res)
2820                 return tipc_cfg_reply_error_string("cannot change link setting");
2821
2822         return tipc_cfg_reply_none();
2823 }
2824
2825 /**
2826  * link_reset_statistics - reset link statistics
2827  * @l_ptr: pointer to link
2828  */
2829
2830 static void link_reset_statistics(struct link *l_ptr)
2831 {
2832         memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
2833         l_ptr->stats.sent_info = l_ptr->next_out_no;
2834         l_ptr->stats.recv_info = l_ptr->next_in_no;
2835 }
2836
2837 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
2838 {
2839         char *link_name;
2840         struct link *l_ptr;
2841         struct tipc_node *node;
2842
2843         if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2844                 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2845
2846         link_name = (char *)TLV_DATA(req_tlv_area);
2847         if (!strcmp(link_name, tipc_bclink_name)) {
2848                 if (tipc_bclink_reset_stats())
2849                         return tipc_cfg_reply_error_string("link not found");
2850                 return tipc_cfg_reply_none();
2851         }
2852
2853         read_lock_bh(&tipc_net_lock);
2854         l_ptr = link_find_link(link_name, &node);
2855         if (!l_ptr) {
2856                 read_unlock_bh(&tipc_net_lock);
2857                 return tipc_cfg_reply_error_string("link not found");
2858         }
2859
2860         tipc_node_lock(node);
2861         link_reset_statistics(l_ptr);
2862         tipc_node_unlock(node);
2863         read_unlock_bh(&tipc_net_lock);
2864         return tipc_cfg_reply_none();
2865 }
2866
2867 /**
2868  * percent - convert count to a percentage of total (rounding up or down)
2869  */
2870
2871 static u32 percent(u32 count, u32 total)
2872 {
2873         return (count * 100 + (total / 2)) / total;
2874 }
2875
2876 /**
2877  * tipc_link_stats - print link statistics
2878  * @name: link name
2879  * @buf: print buffer area
2880  * @buf_size: size of print buffer area
2881  *
2882  * Returns length of print buffer data string (or 0 if error)
2883  */
2884
2885 static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2886 {
2887         struct print_buf pb;
2888         struct link *l_ptr;
2889         struct tipc_node *node;
2890         char *status;
2891         u32 profile_total = 0;
2892
2893         if (!strcmp(name, tipc_bclink_name))
2894                 return tipc_bclink_stats(buf, buf_size);
2895
2896         tipc_printbuf_init(&pb, buf, buf_size);
2897
2898         read_lock_bh(&tipc_net_lock);
2899         l_ptr = link_find_link(name, &node);
2900         if (!l_ptr) {
2901                 read_unlock_bh(&tipc_net_lock);
2902                 return 0;
2903         }
2904         tipc_node_lock(node);
2905
2906         if (tipc_link_is_active(l_ptr))
2907                 status = "ACTIVE";
2908         else if (tipc_link_is_up(l_ptr))
2909                 status = "STANDBY";
2910         else
2911                 status = "DEFUNCT";
2912         tipc_printf(&pb, "Link <%s>\n"
2913                          "  %s  MTU:%u  Priority:%u  Tolerance:%u ms"
2914                          "  Window:%u packets\n",
2915                     l_ptr->name, status, l_ptr->max_pkt,
2916                     l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]);
2917         tipc_printf(&pb, "  RX packets:%u fragments:%u/%u bundles:%u/%u\n",
2918                     l_ptr->next_in_no - l_ptr->stats.recv_info,
2919                     l_ptr->stats.recv_fragments,
2920                     l_ptr->stats.recv_fragmented,
2921                     l_ptr->stats.recv_bundles,
2922                     l_ptr->stats.recv_bundled);
2923         tipc_printf(&pb, "  TX packets:%u fragments:%u/%u bundles:%u/%u\n",
2924                     l_ptr->next_out_no - l_ptr->stats.sent_info,
2925                     l_ptr->stats.sent_fragments,
2926                     l_ptr->stats.sent_fragmented,
2927                     l_ptr->stats.sent_bundles,
2928                     l_ptr->stats.sent_bundled);
2929         profile_total = l_ptr->stats.msg_length_counts;
2930         if (!profile_total)
2931                 profile_total = 1;
2932         tipc_printf(&pb, "  TX profile sample:%u packets  average:%u octets\n"
2933                          "  0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
2934                          "-16384:%u%% -32768:%u%% -66000:%u%%\n",
2935                     l_ptr->stats.msg_length_counts,
2936                     l_ptr->stats.msg_lengths_total / profile_total,
2937                     percent(l_ptr->stats.msg_length_profile[0], profile_total),
2938                     percent(l_ptr->stats.msg_length_profile[1], profile_total),
2939                     percent(l_ptr->stats.msg_length_profile[2], profile_total),
2940                     percent(l_ptr->stats.msg_length_profile[3], profile_total),
2941                     percent(l_ptr->stats.msg_length_profile[4], profile_total),
2942                     percent(l_ptr->stats.msg_length_profile[5], profile_total),
2943                     percent(l_ptr->stats.msg_length_profile[6], profile_total));
2944         tipc_printf(&pb, "  RX states:%u probes:%u naks:%u defs:%u dups:%u\n",
2945                     l_ptr->stats.recv_states,
2946                     l_ptr->stats.recv_probes,
2947                     l_ptr->stats.recv_nacks,
2948                     l_ptr->stats.deferred_recv,
2949                     l_ptr->stats.duplicates);
2950         tipc_printf(&pb, "  TX states:%u probes:%u naks:%u acks:%u dups:%u\n",
2951                     l_ptr->stats.sent_states,
2952                     l_ptr->stats.sent_probes,
2953                     l_ptr->stats.sent_nacks,
2954                     l_ptr->stats.sent_acks,
2955                     l_ptr->stats.retransmitted);
2956         tipc_printf(&pb, "  Congestion bearer:%u link:%u  Send queue max:%u avg:%u\n",
2957                     l_ptr->stats.bearer_congs,
2958                     l_ptr->stats.link_congs,
2959                     l_ptr->stats.max_queue_sz,
2960                     l_ptr->stats.queue_sz_counts
2961                     ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts)
2962                     : 0);
2963
2964         tipc_node_unlock(node);
2965         read_unlock_bh(&tipc_net_lock);
2966         return tipc_printbuf_validate(&pb);
2967 }
2968
2969 #define MAX_LINK_STATS_INFO 2000
2970
2971 struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
2972 {
2973         struct sk_buff *buf;
2974         struct tlv_desc *rep_tlv;
2975         int str_len;
2976
2977         if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2978                 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2979
2980         buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO));
2981         if (!buf)
2982                 return NULL;
2983
2984         rep_tlv = (struct tlv_desc *)buf->data;
2985
2986         str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
2987                                   (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
2988         if (!str_len) {
2989                 buf_discard(buf);
2990                 return tipc_cfg_reply_error_string("link not found");
2991         }
2992
2993         skb_put(buf, TLV_SPACE(str_len));
2994         TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
2995
2996         return buf;
2997 }
2998
2999 /**
3000  * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
3001  * @dest: network address of destination node
3002  * @selector: used to select from set of active links
3003  *
3004  * If no active link can be found, uses default maximum packet size.
3005  */
3006
3007 u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
3008 {
3009         struct tipc_node *n_ptr;
3010         struct link *l_ptr;
3011         u32 res = MAX_PKT_DEFAULT;
3012
3013         if (dest == tipc_own_addr)
3014                 return MAX_MSG_SIZE;
3015
3016         read_lock_bh(&tipc_net_lock);
3017         n_ptr = tipc_node_find(dest);
3018         if (n_ptr) {
3019                 tipc_node_lock(n_ptr);
3020                 l_ptr = n_ptr->active_links[selector & 1];
3021                 if (l_ptr)
3022                         res = l_ptr->max_pkt;
3023                 tipc_node_unlock(n_ptr);
3024         }
3025         read_unlock_bh(&tipc_net_lock);
3026         return res;
3027 }
3028
3029 static void link_print(struct link *l_ptr, const char *str)
3030 {
3031         char print_area[256];
3032         struct print_buf pb;
3033         struct print_buf *buf = &pb;
3034
3035         tipc_printbuf_init(buf, print_area, sizeof(print_area));
3036
3037         tipc_printf(buf, str);
3038         tipc_printf(buf, "Link %x<%s>:",
3039                     l_ptr->addr, l_ptr->b_ptr->name);
3040
3041 #ifdef CONFIG_TIPC_DEBUG
3042         if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
3043                 goto print_state;
3044
3045         tipc_printf(buf, ": NXO(%u):", mod(l_ptr->next_out_no));
3046         tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
3047         tipc_printf(buf, "SQUE");
3048         if (l_ptr->first_out) {
3049                 tipc_printf(buf, "[%u..", msg_seqno(buf_msg(l_ptr->first_out)));
3050                 if (l_ptr->next_out)
3051                         tipc_printf(buf, "%u..",
3052                                     msg_seqno(buf_msg(l_ptr->next_out)));
3053                 tipc_printf(buf, "%u]", msg_seqno(buf_msg(l_ptr->last_out)));
3054                 if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) -
3055                          msg_seqno(buf_msg(l_ptr->first_out)))
3056                      != (l_ptr->out_queue_size - 1)) ||
3057                     (l_ptr->last_out->next != NULL)) {
3058                         tipc_printf(buf, "\nSend queue inconsistency\n");
3059                         tipc_printf(buf, "first_out= %p ", l_ptr->first_out);
3060                         tipc_printf(buf, "next_out= %p ", l_ptr->next_out);
3061                         tipc_printf(buf, "last_out= %p ", l_ptr->last_out);
3062                 }
3063         } else
3064                 tipc_printf(buf, "[]");
3065         tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size);
3066         if (l_ptr->oldest_deferred_in) {
3067                 u32 o = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
3068                 u32 n = msg_seqno(buf_msg(l_ptr->newest_deferred_in));
3069                 tipc_printf(buf, ":RQUE[%u..%u]", o, n);
3070                 if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) {
3071                         tipc_printf(buf, ":RQSIZ(%u)",
3072                                     l_ptr->deferred_inqueue_sz);
3073                 }
3074         }
3075 print_state:
3076 #endif
3077
3078         if (link_working_unknown(l_ptr))
3079                 tipc_printf(buf, ":WU");
3080         else if (link_reset_reset(l_ptr))
3081                 tipc_printf(buf, ":RR");
3082         else if (link_reset_unknown(l_ptr))
3083                 tipc_printf(buf, ":RU");
3084         else if (link_working_working(l_ptr))
3085                 tipc_printf(buf, ":WW");
3086         tipc_printf(buf, "\n");
3087
3088         tipc_printbuf_validate(buf);
3089         info("%s", print_area);
3090 }
3091