2 * net/tipc/port.c: TIPC port code
4 * Copyright (c) 1992-2007, Ericsson AB
5 * Copyright (c) 2004-2007, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
44 #include "name_table.h"
49 /* Connection management: */
50 #define PROBING_INTERVAL 3600000 /* [ms] => 1 h */
54 #define MAX_REJECT_SIZE 1024
56 static struct sk_buff *msg_queue_head = NULL;
57 static struct sk_buff *msg_queue_tail = NULL;
59 DEFINE_SPINLOCK(tipc_port_list_lock);
60 static DEFINE_SPINLOCK(queue_lock);
62 static LIST_HEAD(ports);
63 static void port_handle_node_down(unsigned long ref);
64 static struct sk_buff* port_build_self_abort_msg(struct port *,u32 err);
65 static struct sk_buff* port_build_peer_abort_msg(struct port *,u32 err);
66 static void port_timeout(unsigned long ref);
69 static u32 port_peernode(struct port *p_ptr)
71 return msg_destnode(&p_ptr->publ.phdr);
74 static u32 port_peerport(struct port *p_ptr)
76 return msg_destport(&p_ptr->publ.phdr);
79 static u32 port_out_seqno(struct port *p_ptr)
81 return msg_transp_seqno(&p_ptr->publ.phdr);
84 static void port_incr_out_seqno(struct port *p_ptr)
86 struct tipc_msg *m = &p_ptr->publ.phdr;
88 if (likely(!msg_routed(m)))
90 msg_set_transp_seqno(m, (msg_transp_seqno(m) + 1));
94 * tipc_multicast - send a multicast message to local and remote destinations
97 int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
98 u32 num_sect, struct iovec const *msg_sect)
100 struct tipc_msg *hdr;
102 struct sk_buff *ibuf = NULL;
103 struct port_list dports = {0, NULL, };
104 struct port *oport = tipc_port_deref(ref);
108 if (unlikely(!oport))
111 /* Create multicast message */
113 hdr = &oport->publ.phdr;
114 msg_set_type(hdr, TIPC_MCAST_MSG);
115 msg_set_nametype(hdr, seq->type);
116 msg_set_namelower(hdr, seq->lower);
117 msg_set_nameupper(hdr, seq->upper);
118 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
119 res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
120 !oport->user_port, &buf);
124 /* Figure out where to send multicast message */
126 ext_targets = tipc_nametbl_mc_translate(seq->type, seq->lower, seq->upper,
127 TIPC_NODE_SCOPE, &dports);
129 /* Send message to destinations (duplicate it only if necessary) */
132 if (dports.count != 0) {
133 ibuf = skb_copy(buf, GFP_ATOMIC);
135 tipc_port_list_free(&dports);
140 res = tipc_bclink_send_msg(buf);
141 if ((res < 0) && (dports.count != 0)) {
150 tipc_port_recv_mcast(ibuf, &dports);
152 tipc_port_list_free(&dports);
158 * tipc_port_recv_mcast - deliver multicast message to all destination ports
160 * If there is no port list, perform a lookup to create one
163 void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
165 struct tipc_msg* msg;
166 struct port_list dports = {0, NULL, };
167 struct port_list *item = dp;
172 /* Create destination port list, if one wasn't supplied */
175 tipc_nametbl_mc_translate(msg_nametype(msg),
183 /* Deliver a copy of message to each destination port */
185 if (dp->count != 0) {
186 if (dp->count == 1) {
187 msg_set_destport(msg, dp->ports[0]);
188 tipc_port_recv_msg(buf);
189 tipc_port_list_free(dp);
192 for (; cnt < dp->count; cnt++) {
193 int index = cnt % PLSIZE;
194 struct sk_buff *b = skb_clone(buf, GFP_ATOMIC);
197 warn("Unable to deliver multicast message(s)\n");
198 msg_dbg(msg, "LOST:");
201 if ((index == 0) && (cnt != 0)) {
204 msg_set_destport(buf_msg(b),item->ports[index]);
205 tipc_port_recv_msg(b);
210 tipc_port_list_free(dp);
214 * tipc_createport_raw - create a generic TIPC port
216 * Returns port reference, or 0 if unable to create it
218 * Note: The newly created port is returned in the locked state.
221 u32 tipc_createport_raw(void *usr_handle,
222 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
223 void (*wakeup)(struct tipc_port *),
224 const u32 importance,
225 struct tipc_port **tp_ptr)
228 struct tipc_msg *msg;
231 p_ptr = kzalloc(sizeof(*p_ptr), GFP_ATOMIC);
233 warn("Port creation failed, no memory\n");
236 ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock);
238 warn("Port creation failed, reference table exhausted\n");
243 p_ptr->publ.usr_handle = usr_handle;
244 p_ptr->publ.max_pkt = MAX_PKT_DEFAULT;
245 p_ptr->publ.ref = ref;
246 msg = &p_ptr->publ.phdr;
247 msg_init(msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE,
249 msg_set_orignode(msg, tipc_own_addr);
250 msg_set_prevnode(msg, tipc_own_addr);
251 msg_set_origport(msg, ref);
252 msg_set_importance(msg,importance);
253 p_ptr->last_in_seqno = 41;
255 INIT_LIST_HEAD(&p_ptr->wait_list);
256 INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
257 p_ptr->congested_link = NULL;
258 p_ptr->dispatcher = dispatcher;
259 p_ptr->wakeup = wakeup;
260 p_ptr->user_port = NULL;
261 k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref);
262 spin_lock_bh(&tipc_port_list_lock);
263 INIT_LIST_HEAD(&p_ptr->publications);
264 INIT_LIST_HEAD(&p_ptr->port_list);
265 list_add_tail(&p_ptr->port_list, &ports);
266 spin_unlock_bh(&tipc_port_list_lock);
267 *tp_ptr = &p_ptr->publ;
271 int tipc_deleteport(u32 ref)
274 struct sk_buff *buf = NULL;
276 tipc_withdraw(ref, 0, NULL);
277 p_ptr = tipc_port_lock(ref);
281 tipc_ref_discard(ref);
282 tipc_port_unlock(p_ptr);
284 k_cancel_timer(&p_ptr->timer);
285 if (p_ptr->publ.connected) {
286 buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
287 tipc_nodesub_unsubscribe(&p_ptr->subscription);
289 if (p_ptr->user_port) {
290 tipc_reg_remove_port(p_ptr->user_port);
291 kfree(p_ptr->user_port);
294 spin_lock_bh(&tipc_port_list_lock);
295 list_del(&p_ptr->port_list);
296 list_del(&p_ptr->wait_list);
297 spin_unlock_bh(&tipc_port_list_lock);
298 k_term_timer(&p_ptr->timer);
300 dbg("Deleted port %u\n", ref);
301 tipc_net_route_msg(buf);
306 * tipc_get_port() - return port associated with 'ref'
308 * Note: Port is not locked.
311 struct tipc_port *tipc_get_port(const u32 ref)
313 return (struct tipc_port *)tipc_ref_deref(ref);
317 * tipc_get_handle - return user handle associated to port 'ref'
320 void *tipc_get_handle(const u32 ref)
325 p_ptr = tipc_port_lock(ref);
328 handle = p_ptr->publ.usr_handle;
329 tipc_port_unlock(p_ptr);
333 static int port_unreliable(struct port *p_ptr)
335 return msg_src_droppable(&p_ptr->publ.phdr);
338 int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
342 p_ptr = tipc_port_lock(ref);
345 *isunreliable = port_unreliable(p_ptr);
346 tipc_port_unlock(p_ptr);
350 int tipc_set_portunreliable(u32 ref, unsigned int isunreliable)
354 p_ptr = tipc_port_lock(ref);
357 msg_set_src_droppable(&p_ptr->publ.phdr, (isunreliable != 0));
358 tipc_port_unlock(p_ptr);
362 static int port_unreturnable(struct port *p_ptr)
364 return msg_dest_droppable(&p_ptr->publ.phdr);
367 int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
371 p_ptr = tipc_port_lock(ref);
374 *isunrejectable = port_unreturnable(p_ptr);
375 tipc_port_unlock(p_ptr);
379 int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
383 p_ptr = tipc_port_lock(ref);
386 msg_set_dest_droppable(&p_ptr->publ.phdr, (isunrejectable != 0));
387 tipc_port_unlock(p_ptr);
392 * port_build_proto_msg(): build a port level protocol
393 * or a connection abortion message. Called with
396 static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
397 u32 origport, u32 orignode,
398 u32 usr, u32 type, u32 err,
402 struct tipc_msg *msg;
404 buf = buf_acquire(LONG_H_SIZE);
407 msg_init(msg, usr, type, err, LONG_H_SIZE, destnode);
408 msg_set_destport(msg, destport);
409 msg_set_origport(msg, origport);
410 msg_set_destnode(msg, destnode);
411 msg_set_orignode(msg, orignode);
412 msg_set_transp_seqno(msg, seqno);
413 msg_set_msgcnt(msg, ack);
414 msg_dbg(msg, "PORT>SEND>:");
419 int tipc_reject_msg(struct sk_buff *buf, u32 err)
421 struct tipc_msg *msg = buf_msg(buf);
422 struct sk_buff *rbuf;
423 struct tipc_msg *rmsg;
425 u32 imp = msg_importance(msg);
426 u32 data_sz = msg_data_sz(msg);
428 if (data_sz > MAX_REJECT_SIZE)
429 data_sz = MAX_REJECT_SIZE;
430 if (msg_connected(msg) && (imp < TIPC_CRITICAL_IMPORTANCE))
432 msg_dbg(msg, "port->rej: ");
434 /* discard rejected message if it shouldn't be returned to sender */
435 if (msg_errcode(msg) || msg_dest_droppable(msg)) {
440 /* construct rejected message */
442 hdr_sz = MCAST_H_SIZE;
444 hdr_sz = LONG_H_SIZE;
445 rbuf = buf_acquire(data_sz + hdr_sz);
450 rmsg = buf_msg(rbuf);
451 msg_init(rmsg, imp, msg_type(msg), err, hdr_sz, msg_orignode(msg));
452 msg_set_destport(rmsg, msg_origport(msg));
453 msg_set_prevnode(rmsg, tipc_own_addr);
454 msg_set_origport(rmsg, msg_destport(msg));
456 msg_set_orignode(rmsg, tipc_own_addr);
458 msg_set_orignode(rmsg, msg_destnode(msg));
459 msg_set_size(rmsg, data_sz + hdr_sz);
460 msg_set_nametype(rmsg, msg_nametype(msg));
461 msg_set_nameinst(rmsg, msg_nameinst(msg));
462 skb_copy_to_linear_data_offset(rbuf, hdr_sz, msg_data(msg), data_sz);
464 /* send self-abort message when rejecting on a connected port */
465 if (msg_connected(msg)) {
466 struct sk_buff *abuf = NULL;
467 struct port *p_ptr = tipc_port_lock(msg_destport(msg));
470 if (p_ptr->publ.connected)
471 abuf = port_build_self_abort_msg(p_ptr, err);
472 tipc_port_unlock(p_ptr);
474 tipc_net_route_msg(abuf);
477 /* send rejected message */
479 tipc_net_route_msg(rbuf);
483 int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
484 struct iovec const *msg_sect, u32 num_sect,
490 res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
491 !p_ptr->user_port, &buf);
495 return tipc_reject_msg(buf, err);
498 static void port_timeout(unsigned long ref)
500 struct port *p_ptr = tipc_port_lock(ref);
501 struct sk_buff *buf = NULL;
506 if (!p_ptr->publ.connected) {
507 tipc_port_unlock(p_ptr);
511 /* Last probe answered ? */
512 if (p_ptr->probing_state == PROBING) {
513 buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
515 buf = port_build_proto_msg(port_peerport(p_ptr),
516 port_peernode(p_ptr),
522 port_out_seqno(p_ptr),
524 port_incr_out_seqno(p_ptr);
525 p_ptr->probing_state = PROBING;
526 k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
528 tipc_port_unlock(p_ptr);
529 tipc_net_route_msg(buf);
533 static void port_handle_node_down(unsigned long ref)
535 struct port *p_ptr = tipc_port_lock(ref);
536 struct sk_buff* buf = NULL;
540 buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_NODE);
541 tipc_port_unlock(p_ptr);
542 tipc_net_route_msg(buf);
546 static struct sk_buff *port_build_self_abort_msg(struct port *p_ptr, u32 err)
548 u32 imp = msg_importance(&p_ptr->publ.phdr);
550 if (!p_ptr->publ.connected)
552 if (imp < TIPC_CRITICAL_IMPORTANCE)
554 return port_build_proto_msg(p_ptr->publ.ref,
556 port_peerport(p_ptr),
557 port_peernode(p_ptr),
561 p_ptr->last_in_seqno + 1,
566 static struct sk_buff *port_build_peer_abort_msg(struct port *p_ptr, u32 err)
568 u32 imp = msg_importance(&p_ptr->publ.phdr);
570 if (!p_ptr->publ.connected)
572 if (imp < TIPC_CRITICAL_IMPORTANCE)
574 return port_build_proto_msg(port_peerport(p_ptr),
575 port_peernode(p_ptr),
581 port_out_seqno(p_ptr),
585 void tipc_port_recv_proto_msg(struct sk_buff *buf)
587 struct tipc_msg *msg = buf_msg(buf);
588 struct port *p_ptr = tipc_port_lock(msg_destport(msg));
590 struct sk_buff *r_buf = NULL;
591 struct sk_buff *abort_buf = NULL;
593 msg_dbg(msg, "PORT<RECV<:");
596 err = TIPC_ERR_NO_PORT;
597 } else if (p_ptr->publ.connected) {
598 if (port_peernode(p_ptr) != msg_orignode(msg))
599 err = TIPC_ERR_NO_PORT;
600 if (port_peerport(p_ptr) != msg_origport(msg))
601 err = TIPC_ERR_NO_PORT;
602 if (!err && msg_routed(msg)) {
603 u32 seqno = msg_transp_seqno(msg);
604 u32 myno = ++p_ptr->last_in_seqno;
606 err = TIPC_ERR_NO_PORT;
607 abort_buf = port_build_self_abort_msg(p_ptr, err);
610 if (msg_type(msg) == CONN_ACK) {
611 int wakeup = tipc_port_congested(p_ptr) &&
612 p_ptr->publ.congested &&
614 p_ptr->acked += msg_msgcnt(msg);
615 if (tipc_port_congested(p_ptr))
617 p_ptr->publ.congested = 0;
620 p_ptr->wakeup(&p_ptr->publ);
623 } else if (p_ptr->publ.published) {
624 err = TIPC_ERR_NO_PORT;
627 r_buf = port_build_proto_msg(msg_origport(msg),
631 TIPC_HIGH_IMPORTANCE,
640 if (msg_type(msg) == CONN_PROBE) {
641 r_buf = port_build_proto_msg(msg_origport(msg),
648 port_out_seqno(p_ptr),
651 p_ptr->probing_state = CONFIRMED;
652 port_incr_out_seqno(p_ptr);
655 tipc_port_unlock(p_ptr);
656 tipc_net_route_msg(r_buf);
657 tipc_net_route_msg(abort_buf);
661 static void port_print(struct port *p_ptr, struct print_buf *buf, int full_id)
663 struct publication *publ;
666 tipc_printf(buf, "<%u.%u.%u:%u>:",
667 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
668 tipc_node(tipc_own_addr), p_ptr->publ.ref);
670 tipc_printf(buf, "%-10u:", p_ptr->publ.ref);
672 if (p_ptr->publ.connected) {
673 u32 dport = port_peerport(p_ptr);
674 u32 destnode = port_peernode(p_ptr);
676 tipc_printf(buf, " connected to <%u.%u.%u:%u>",
677 tipc_zone(destnode), tipc_cluster(destnode),
678 tipc_node(destnode), dport);
679 if (p_ptr->publ.conn_type != 0)
680 tipc_printf(buf, " via {%u,%u}",
681 p_ptr->publ.conn_type,
682 p_ptr->publ.conn_instance);
684 else if (p_ptr->publ.published) {
685 tipc_printf(buf, " bound to");
686 list_for_each_entry(publ, &p_ptr->publications, pport_list) {
687 if (publ->lower == publ->upper)
688 tipc_printf(buf, " {%u,%u}", publ->type,
691 tipc_printf(buf, " {%u,%u,%u}", publ->type,
692 publ->lower, publ->upper);
695 tipc_printf(buf, "\n");
698 #define MAX_PORT_QUERY 32768
700 struct sk_buff *tipc_port_get_ports(void)
703 struct tlv_desc *rep_tlv;
708 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_QUERY));
711 rep_tlv = (struct tlv_desc *)buf->data;
713 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_QUERY);
714 spin_lock_bh(&tipc_port_list_lock);
715 list_for_each_entry(p_ptr, &ports, port_list) {
716 spin_lock_bh(p_ptr->publ.lock);
717 port_print(p_ptr, &pb, 0);
718 spin_unlock_bh(p_ptr->publ.lock);
720 spin_unlock_bh(&tipc_port_list_lock);
721 str_len = tipc_printbuf_validate(&pb);
723 skb_put(buf, TLV_SPACE(str_len));
724 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
731 #define MAX_PORT_STATS 2000
733 struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space)
738 struct tlv_desc *rep_tlv;
742 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_PORT_REF))
743 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
745 ref = *(u32 *)TLV_DATA(req_tlv_area);
748 p_ptr = tipc_port_lock(ref);
750 return cfg_reply_error_string("port not found");
752 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_STATS));
754 tipc_port_unlock(p_ptr);
757 rep_tlv = (struct tlv_desc *)buf->data;
759 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_STATS);
760 port_print(p_ptr, &pb, 1);
761 /* NEED TO FILL IN ADDITIONAL PORT STATISTICS HERE */
762 tipc_port_unlock(p_ptr);
763 str_len = tipc_printbuf_validate(&pb);
765 skb_put(buf, TLV_SPACE(str_len));
766 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
773 void tipc_port_reinit(void)
776 struct tipc_msg *msg;
778 spin_lock_bh(&tipc_port_list_lock);
779 list_for_each_entry(p_ptr, &ports, port_list) {
780 msg = &p_ptr->publ.phdr;
781 if (msg_orignode(msg) == tipc_own_addr)
783 msg_set_prevnode(msg, tipc_own_addr);
784 msg_set_orignode(msg, tipc_own_addr);
786 spin_unlock_bh(&tipc_port_list_lock);
791 * port_dispatcher_sigh(): Signal handler for messages destinated
792 * to the tipc_port interface.
795 static void port_dispatcher_sigh(void *dummy)
799 spin_lock_bh(&queue_lock);
800 buf = msg_queue_head;
801 msg_queue_head = NULL;
802 spin_unlock_bh(&queue_lock);
806 struct user_port *up_ptr;
807 struct tipc_portid orig;
808 struct tipc_name_seq dseq;
814 struct sk_buff *next = buf->next;
815 struct tipc_msg *msg = buf_msg(buf);
816 u32 dref = msg_destport(msg);
818 message_type = msg_type(msg);
819 if (message_type > TIPC_DIRECT_MSG)
820 goto reject; /* Unsupported message type */
822 p_ptr = tipc_port_lock(dref);
824 goto reject; /* Port deleted while msg in queue */
826 orig.ref = msg_origport(msg);
827 orig.node = msg_orignode(msg);
828 up_ptr = p_ptr->user_port;
829 usr_handle = up_ptr->usr_handle;
830 connected = p_ptr->publ.connected;
831 published = p_ptr->publ.published;
833 if (unlikely(msg_errcode(msg)))
836 switch (message_type) {
839 tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
840 u32 peer_port = port_peerport(p_ptr);
841 u32 peer_node = port_peernode(p_ptr);
843 tipc_port_unlock(p_ptr);
844 if (unlikely(!connected)) {
845 if (unlikely(published))
847 tipc_connect2port(dref,&orig);
849 if (unlikely(msg_origport(msg) != peer_port))
851 if (unlikely(msg_orignode(msg) != peer_node))
855 if (unlikely(++p_ptr->publ.conn_unacked >=
856 TIPC_FLOW_CONTROL_WIN))
857 tipc_acknowledge(dref,
858 p_ptr->publ.conn_unacked);
859 skb_pull(buf, msg_hdr_sz(msg));
860 cb(usr_handle, dref, &buf, msg_data(msg),
864 case TIPC_DIRECT_MSG:{
865 tipc_msg_event cb = up_ptr->msg_cb;
867 tipc_port_unlock(p_ptr);
868 if (unlikely(connected))
872 skb_pull(buf, msg_hdr_sz(msg));
873 cb(usr_handle, dref, &buf, msg_data(msg),
874 msg_data_sz(msg), msg_importance(msg),
879 case TIPC_NAMED_MSG:{
880 tipc_named_msg_event cb = up_ptr->named_msg_cb;
882 tipc_port_unlock(p_ptr);
883 if (unlikely(connected))
887 if (unlikely(!published))
889 dseq.type = msg_nametype(msg);
890 dseq.lower = msg_nameinst(msg);
891 dseq.upper = (message_type == TIPC_NAMED_MSG)
892 ? dseq.lower : msg_nameupper(msg);
893 skb_pull(buf, msg_hdr_sz(msg));
894 cb(usr_handle, dref, &buf, msg_data(msg),
895 msg_data_sz(msg), msg_importance(msg),
905 switch (message_type) {
908 tipc_conn_shutdown_event cb =
910 u32 peer_port = port_peerport(p_ptr);
911 u32 peer_node = port_peernode(p_ptr);
913 tipc_port_unlock(p_ptr);
914 if (!connected || !cb)
916 if (msg_origport(msg) != peer_port)
918 if (msg_orignode(msg) != peer_node)
920 tipc_disconnect(dref);
921 skb_pull(buf, msg_hdr_sz(msg));
922 cb(usr_handle, dref, &buf, msg_data(msg),
923 msg_data_sz(msg), msg_errcode(msg));
926 case TIPC_DIRECT_MSG:{
927 tipc_msg_err_event cb = up_ptr->err_cb;
929 tipc_port_unlock(p_ptr);
930 if (connected || !cb)
932 skb_pull(buf, msg_hdr_sz(msg));
933 cb(usr_handle, dref, &buf, msg_data(msg),
934 msg_data_sz(msg), msg_errcode(msg), &orig);
938 case TIPC_NAMED_MSG:{
939 tipc_named_msg_err_event cb =
940 up_ptr->named_err_cb;
942 tipc_port_unlock(p_ptr);
943 if (connected || !cb)
945 dseq.type = msg_nametype(msg);
946 dseq.lower = msg_nameinst(msg);
947 dseq.upper = (message_type == TIPC_NAMED_MSG)
948 ? dseq.lower : msg_nameupper(msg);
949 skb_pull(buf, msg_hdr_sz(msg));
950 cb(usr_handle, dref, &buf, msg_data(msg),
951 msg_data_sz(msg), msg_errcode(msg), &dseq);
960 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
966 * port_dispatcher(): Dispatcher for messages destinated
967 * to the tipc_port interface. Called with port locked.
970 static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
973 spin_lock_bh(&queue_lock);
974 if (msg_queue_head) {
975 msg_queue_tail->next = buf;
976 msg_queue_tail = buf;
978 msg_queue_tail = msg_queue_head = buf;
979 tipc_k_signal((Handler)port_dispatcher_sigh, 0);
981 spin_unlock_bh(&queue_lock);
986 * Wake up port after congestion: Called with port locked,
990 static void port_wakeup_sh(unsigned long ref)
993 struct user_port *up_ptr;
994 tipc_continue_event cb = NULL;
997 p_ptr = tipc_port_lock(ref);
999 up_ptr = p_ptr->user_port;
1001 cb = up_ptr->continue_event_cb;
1002 uh = up_ptr->usr_handle;
1004 tipc_port_unlock(p_ptr);
1011 static void port_wakeup(struct tipc_port *p_ptr)
1013 tipc_k_signal((Handler)port_wakeup_sh, p_ptr->ref);
1016 void tipc_acknowledge(u32 ref, u32 ack)
1019 struct sk_buff *buf = NULL;
1021 p_ptr = tipc_port_lock(ref);
1024 if (p_ptr->publ.connected) {
1025 p_ptr->publ.conn_unacked -= ack;
1026 buf = port_build_proto_msg(port_peerport(p_ptr),
1027 port_peernode(p_ptr),
1033 port_out_seqno(p_ptr),
1036 tipc_port_unlock(p_ptr);
1037 tipc_net_route_msg(buf);
1041 * tipc_createport(): user level call. Will add port to
1042 * registry if non-zero user_ref.
1045 int tipc_createport(u32 user_ref,
1047 unsigned int importance,
1048 tipc_msg_err_event error_cb,
1049 tipc_named_msg_err_event named_error_cb,
1050 tipc_conn_shutdown_event conn_error_cb,
1051 tipc_msg_event msg_cb,
1052 tipc_named_msg_event named_msg_cb,
1053 tipc_conn_msg_event conn_msg_cb,
1054 tipc_continue_event continue_event_cb,/* May be zero */
1057 struct user_port *up_ptr;
1059 struct tipc_port *tp_ptr;
1062 up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
1064 warn("Port creation failed, no memory\n");
1067 ref = tipc_createport_raw(NULL, port_dispatcher, port_wakeup,
1068 importance, &tp_ptr);
1073 p_ptr = (struct port *)tp_ptr;
1075 p_ptr->user_port = up_ptr;
1076 up_ptr->user_ref = user_ref;
1077 up_ptr->usr_handle = usr_handle;
1078 up_ptr->ref = p_ptr->publ.ref;
1079 up_ptr->err_cb = error_cb;
1080 up_ptr->named_err_cb = named_error_cb;
1081 up_ptr->conn_err_cb = conn_error_cb;
1082 up_ptr->msg_cb = msg_cb;
1083 up_ptr->named_msg_cb = named_msg_cb;
1084 up_ptr->conn_msg_cb = conn_msg_cb;
1085 up_ptr->continue_event_cb = continue_event_cb;
1086 INIT_LIST_HEAD(&up_ptr->uport_list);
1087 tipc_reg_add_port(up_ptr);
1088 *portref = p_ptr->publ.ref;
1089 dbg(" tipc_createport: %x with ref %u\n", p_ptr, p_ptr->publ.ref);
1090 tipc_port_unlock(p_ptr);
1094 int tipc_ownidentity(u32 ref, struct tipc_portid *id)
1097 id->node = tipc_own_addr;
1101 int tipc_portimportance(u32 ref, unsigned int *importance)
1105 p_ptr = tipc_port_lock(ref);
1108 *importance = (unsigned int)msg_importance(&p_ptr->publ.phdr);
1109 tipc_port_unlock(p_ptr);
1113 int tipc_set_portimportance(u32 ref, unsigned int imp)
1117 if (imp > TIPC_CRITICAL_IMPORTANCE)
1120 p_ptr = tipc_port_lock(ref);
1123 msg_set_importance(&p_ptr->publ.phdr, (u32)imp);
1124 tipc_port_unlock(p_ptr);
1129 int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1132 struct publication *publ;
1136 p_ptr = tipc_port_lock(ref);
1140 dbg("tipc_publ %u, p_ptr = %x, conn = %x, scope = %x, "
1141 "lower = %u, upper = %u\n",
1142 ref, p_ptr, p_ptr->publ.connected, scope, seq->lower, seq->upper);
1143 if (p_ptr->publ.connected)
1145 if (seq->lower > seq->upper)
1147 if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE))
1149 key = ref + p_ptr->pub_count + 1;
1154 publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
1155 scope, p_ptr->publ.ref, key);
1157 list_add(&publ->pport_list, &p_ptr->publications);
1159 p_ptr->publ.published = 1;
1163 tipc_port_unlock(p_ptr);
1167 int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1170 struct publication *publ;
1171 struct publication *tpubl;
1174 p_ptr = tipc_port_lock(ref);
1178 list_for_each_entry_safe(publ, tpubl,
1179 &p_ptr->publications, pport_list) {
1180 tipc_nametbl_withdraw(publ->type, publ->lower,
1181 publ->ref, publ->key);
1185 list_for_each_entry_safe(publ, tpubl,
1186 &p_ptr->publications, pport_list) {
1187 if (publ->scope != scope)
1189 if (publ->type != seq->type)
1191 if (publ->lower != seq->lower)
1193 if (publ->upper != seq->upper)
1195 tipc_nametbl_withdraw(publ->type, publ->lower,
1196 publ->ref, publ->key);
1201 if (list_empty(&p_ptr->publications))
1202 p_ptr->publ.published = 0;
1203 tipc_port_unlock(p_ptr);
1207 int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
1210 struct tipc_msg *msg;
1213 p_ptr = tipc_port_lock(ref);
1216 if (p_ptr->publ.published || p_ptr->publ.connected)
1221 msg = &p_ptr->publ.phdr;
1222 msg_set_destnode(msg, peer->node);
1223 msg_set_destport(msg, peer->ref);
1224 msg_set_orignode(msg, tipc_own_addr);
1225 msg_set_origport(msg, p_ptr->publ.ref);
1226 msg_set_transp_seqno(msg, 42);
1227 msg_set_type(msg, TIPC_CONN_MSG);
1228 if (!may_route(peer->node))
1229 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1231 msg_set_hdr_sz(msg, LONG_H_SIZE);
1233 p_ptr->probing_interval = PROBING_INTERVAL;
1234 p_ptr->probing_state = CONFIRMED;
1235 p_ptr->publ.connected = 1;
1236 k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
1238 tipc_nodesub_subscribe(&p_ptr->subscription,peer->node,
1239 (void *)(unsigned long)ref,
1240 (net_ev_handler)port_handle_node_down);
1243 tipc_port_unlock(p_ptr);
1244 p_ptr->publ.max_pkt = tipc_link_get_max_pkt(peer->node, ref);
1249 * tipc_disconnect_port - disconnect port from peer
1251 * Port must be locked.
1254 int tipc_disconnect_port(struct tipc_port *tp_ptr)
1258 if (tp_ptr->connected) {
1259 tp_ptr->connected = 0;
1260 /* let timer expire on it's own to avoid deadlock! */
1261 tipc_nodesub_unsubscribe(
1262 &((struct port *)tp_ptr)->subscription);
1271 * tipc_disconnect(): Disconnect port form peer.
1272 * This is a node local operation.
1275 int tipc_disconnect(u32 ref)
1280 p_ptr = tipc_port_lock(ref);
1283 res = tipc_disconnect_port((struct tipc_port *)p_ptr);
1284 tipc_port_unlock(p_ptr);
1289 * tipc_shutdown(): Send a SHUTDOWN msg to peer and disconnect
1291 int tipc_shutdown(u32 ref)
1294 struct sk_buff *buf = NULL;
1296 p_ptr = tipc_port_lock(ref);
1300 if (p_ptr->publ.connected) {
1301 u32 imp = msg_importance(&p_ptr->publ.phdr);
1302 if (imp < TIPC_CRITICAL_IMPORTANCE)
1304 buf = port_build_proto_msg(port_peerport(p_ptr),
1305 port_peernode(p_ptr),
1311 port_out_seqno(p_ptr),
1314 tipc_port_unlock(p_ptr);
1315 tipc_net_route_msg(buf);
1316 return tipc_disconnect(ref);
1319 int tipc_isconnected(u32 ref, int *isconnected)
1323 p_ptr = tipc_port_lock(ref);
1326 *isconnected = p_ptr->publ.connected;
1327 tipc_port_unlock(p_ptr);
1331 int tipc_peer(u32 ref, struct tipc_portid *peer)
1336 p_ptr = tipc_port_lock(ref);
1339 if (p_ptr->publ.connected) {
1340 peer->ref = port_peerport(p_ptr);
1341 peer->node = port_peernode(p_ptr);
1345 tipc_port_unlock(p_ptr);
1349 int tipc_ref_valid(u32 ref)
1351 /* Works irrespective of type */
1352 return !!tipc_ref_deref(ref);
1357 * tipc_port_recv_sections(): Concatenate and deliver sectioned
1358 * message for this node.
1361 int tipc_port_recv_sections(struct port *sender, unsigned int num_sect,
1362 struct iovec const *msg_sect)
1364 struct sk_buff *buf;
1367 res = msg_build(&sender->publ.phdr, msg_sect, num_sect,
1368 MAX_MSG_SIZE, !sender->user_port, &buf);
1370 tipc_port_recv_msg(buf);
1375 * tipc_send - send message sections on connection
1378 int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
1384 p_ptr = tipc_port_deref(ref);
1385 if (!p_ptr || !p_ptr->publ.connected)
1388 p_ptr->publ.congested = 1;
1389 if (!tipc_port_congested(p_ptr)) {
1390 destnode = port_peernode(p_ptr);
1391 if (likely(destnode != tipc_own_addr))
1392 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
1395 res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
1397 if (likely(res != -ELINKCONG)) {
1398 port_incr_out_seqno(p_ptr);
1399 p_ptr->publ.congested = 0;
1404 if (port_unreliable(p_ptr)) {
1405 p_ptr->publ.congested = 0;
1406 /* Just calculate msg length and return */
1407 return msg_calc_data_size(msg_sect, num_sect);
1413 * tipc_send_buf - send message buffer on connection
1416 int tipc_send_buf(u32 ref, struct sk_buff *buf, unsigned int dsz)
1419 struct tipc_msg *msg;
1425 p_ptr = tipc_port_deref(ref);
1426 if (!p_ptr || !p_ptr->publ.connected)
1429 msg = &p_ptr->publ.phdr;
1430 hsz = msg_hdr_sz(msg);
1432 msg_set_size(msg, sz);
1433 if (skb_cow(buf, hsz))
1437 skb_copy_to_linear_data(buf, msg, hsz);
1438 destnode = msg_destnode(msg);
1439 p_ptr->publ.congested = 1;
1440 if (!tipc_port_congested(p_ptr)) {
1441 if (likely(destnode != tipc_own_addr))
1442 res = tipc_send_buf_fast(buf, destnode);
1444 tipc_port_recv_msg(buf);
1447 if (likely(res != -ELINKCONG)) {
1448 port_incr_out_seqno(p_ptr);
1450 p_ptr->publ.congested = 0;
1454 if (port_unreliable(p_ptr)) {
1455 p_ptr->publ.congested = 0;
1462 * tipc_forward2name - forward message sections to port name
1465 int tipc_forward2name(u32 ref,
1466 struct tipc_name const *name,
1469 struct iovec const *msg_sect,
1470 struct tipc_portid const *orig,
1471 unsigned int importance)
1474 struct tipc_msg *msg;
1475 u32 destnode = domain;
1479 p_ptr = tipc_port_deref(ref);
1480 if (!p_ptr || p_ptr->publ.connected)
1483 msg = &p_ptr->publ.phdr;
1484 msg_set_type(msg, TIPC_NAMED_MSG);
1485 msg_set_orignode(msg, orig->node);
1486 msg_set_origport(msg, orig->ref);
1487 msg_set_hdr_sz(msg, LONG_H_SIZE);
1488 msg_set_nametype(msg, name->type);
1489 msg_set_nameinst(msg, name->instance);
1490 msg_set_lookup_scope(msg, addr_scope(domain));
1491 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1492 msg_set_importance(msg,importance);
1493 destport = tipc_nametbl_translate(name->type, name->instance, &destnode);
1494 msg_set_destnode(msg, destnode);
1495 msg_set_destport(msg, destport);
1497 if (likely(destport || destnode)) {
1499 if (likely(destnode == tipc_own_addr))
1500 return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
1501 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
1503 if (likely(res != -ELINKCONG))
1505 if (port_unreliable(p_ptr)) {
1506 /* Just calculate msg length and return */
1507 return msg_calc_data_size(msg_sect, num_sect);
1511 return tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
1516 * tipc_send2name - send message sections to port name
1519 int tipc_send2name(u32 ref,
1520 struct tipc_name const *name,
1521 unsigned int domain,
1522 unsigned int num_sect,
1523 struct iovec const *msg_sect)
1525 struct tipc_portid orig;
1528 orig.node = tipc_own_addr;
1529 return tipc_forward2name(ref, name, domain, num_sect, msg_sect, &orig,
1530 TIPC_PORT_IMPORTANCE);
1534 * tipc_forward_buf2name - forward message buffer to port name
1537 int tipc_forward_buf2name(u32 ref,
1538 struct tipc_name const *name,
1540 struct sk_buff *buf,
1542 struct tipc_portid const *orig,
1543 unsigned int importance)
1546 struct tipc_msg *msg;
1547 u32 destnode = domain;
1551 p_ptr = (struct port *)tipc_ref_deref(ref);
1552 if (!p_ptr || p_ptr->publ.connected)
1555 msg = &p_ptr->publ.phdr;
1556 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1557 msg_set_importance(msg, importance);
1558 msg_set_type(msg, TIPC_NAMED_MSG);
1559 msg_set_orignode(msg, orig->node);
1560 msg_set_origport(msg, orig->ref);
1561 msg_set_nametype(msg, name->type);
1562 msg_set_nameinst(msg, name->instance);
1563 msg_set_lookup_scope(msg, addr_scope(domain));
1564 msg_set_hdr_sz(msg, LONG_H_SIZE);
1565 msg_set_size(msg, LONG_H_SIZE + dsz);
1566 destport = tipc_nametbl_translate(name->type, name->instance, &destnode);
1567 msg_set_destnode(msg, destnode);
1568 msg_set_destport(msg, destport);
1569 msg_dbg(msg, "forw2name ==> ");
1570 if (skb_cow(buf, LONG_H_SIZE))
1572 skb_push(buf, LONG_H_SIZE);
1573 skb_copy_to_linear_data(buf, msg, LONG_H_SIZE);
1574 msg_dbg(buf_msg(buf),"PREP:");
1575 if (likely(destport || destnode)) {
1577 if (destnode == tipc_own_addr)
1578 return tipc_port_recv_msg(buf);
1579 res = tipc_send_buf_fast(buf, destnode);
1580 if (likely(res != -ELINKCONG))
1582 if (port_unreliable(p_ptr))
1586 return tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
1590 * tipc_send_buf2name - send message buffer to port name
1593 int tipc_send_buf2name(u32 ref,
1594 struct tipc_name const *dest,
1596 struct sk_buff *buf,
1599 struct tipc_portid orig;
1602 orig.node = tipc_own_addr;
1603 return tipc_forward_buf2name(ref, dest, domain, buf, dsz, &orig,
1604 TIPC_PORT_IMPORTANCE);
1608 * tipc_forward2port - forward message sections to port identity
1611 int tipc_forward2port(u32 ref,
1612 struct tipc_portid const *dest,
1613 unsigned int num_sect,
1614 struct iovec const *msg_sect,
1615 struct tipc_portid const *orig,
1616 unsigned int importance)
1619 struct tipc_msg *msg;
1622 p_ptr = tipc_port_deref(ref);
1623 if (!p_ptr || p_ptr->publ.connected)
1626 msg = &p_ptr->publ.phdr;
1627 msg_set_type(msg, TIPC_DIRECT_MSG);
1628 msg_set_orignode(msg, orig->node);
1629 msg_set_origport(msg, orig->ref);
1630 msg_set_destnode(msg, dest->node);
1631 msg_set_destport(msg, dest->ref);
1632 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
1633 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1634 msg_set_importance(msg, importance);
1636 if (dest->node == tipc_own_addr)
1637 return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
1638 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, dest->node);
1639 if (likely(res != -ELINKCONG))
1641 if (port_unreliable(p_ptr)) {
1642 /* Just calculate msg length and return */
1643 return msg_calc_data_size(msg_sect, num_sect);
1649 * tipc_send2port - send message sections to port identity
1652 int tipc_send2port(u32 ref,
1653 struct tipc_portid const *dest,
1654 unsigned int num_sect,
1655 struct iovec const *msg_sect)
1657 struct tipc_portid orig;
1660 orig.node = tipc_own_addr;
1661 return tipc_forward2port(ref, dest, num_sect, msg_sect, &orig,
1662 TIPC_PORT_IMPORTANCE);
1666 * tipc_forward_buf2port - forward message buffer to port identity
1668 int tipc_forward_buf2port(u32 ref,
1669 struct tipc_portid const *dest,
1670 struct sk_buff *buf,
1672 struct tipc_portid const *orig,
1673 unsigned int importance)
1676 struct tipc_msg *msg;
1679 p_ptr = (struct port *)tipc_ref_deref(ref);
1680 if (!p_ptr || p_ptr->publ.connected)
1683 msg = &p_ptr->publ.phdr;
1684 msg_set_type(msg, TIPC_DIRECT_MSG);
1685 msg_set_orignode(msg, orig->node);
1686 msg_set_origport(msg, orig->ref);
1687 msg_set_destnode(msg, dest->node);
1688 msg_set_destport(msg, dest->ref);
1689 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
1690 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1691 msg_set_importance(msg, importance);
1692 msg_set_size(msg, DIR_MSG_H_SIZE + dsz);
1693 if (skb_cow(buf, DIR_MSG_H_SIZE))
1696 skb_push(buf, DIR_MSG_H_SIZE);
1697 skb_copy_to_linear_data(buf, msg, DIR_MSG_H_SIZE);
1698 msg_dbg(msg, "buf2port: ");
1700 if (dest->node == tipc_own_addr)
1701 return tipc_port_recv_msg(buf);
1702 res = tipc_send_buf_fast(buf, dest->node);
1703 if (likely(res != -ELINKCONG))
1705 if (port_unreliable(p_ptr))
1711 * tipc_send_buf2port - send message buffer to port identity
1714 int tipc_send_buf2port(u32 ref,
1715 struct tipc_portid const *dest,
1716 struct sk_buff *buf,
1719 struct tipc_portid orig;
1722 orig.node = tipc_own_addr;
1723 return tipc_forward_buf2port(ref, dest, buf, dsz, &orig,
1724 TIPC_PORT_IMPORTANCE);