1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/crc32c.h>
4 #include <linux/ctype.h>
5 #include <linux/highmem.h>
6 #include <linux/inet.h>
7 #include <linux/kthread.h>
9 #include <linux/slab.h>
10 #include <linux/socket.h>
11 #include <linux/string.h>
12 #include <linux/bio.h>
13 #include <linux/blkdev.h>
14 #include <linux/dns_resolver.h>
17 #include <linux/ceph/libceph.h>
18 #include <linux/ceph/messenger.h>
19 #include <linux/ceph/decode.h>
20 #include <linux/ceph/pagelist.h>
21 #include <linux/export.h>
24 * Ceph uses the messenger to exchange ceph_msg messages with other
25 * hosts in the system. The messenger provides ordered and reliable
26 * delivery. We tolerate TCP disconnects by reconnecting (with
27 * exponential backoff) in the case of a fault (disconnection, bad
28 * crc, protocol error). Acks allow sent messages to be discarded by
32 /* static tag bytes (protocol control messages) */
33 static char tag_msg = CEPH_MSGR_TAG_MSG;
34 static char tag_ack = CEPH_MSGR_TAG_ACK;
35 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
38 static struct lock_class_key socket_class;
42 static void queue_con(struct ceph_connection *con);
43 static void con_work(struct work_struct *);
44 static void ceph_fault(struct ceph_connection *con);
47 * nicely render a sockaddr as a string.
49 #define MAX_ADDR_STR 20
50 #define MAX_ADDR_STR_LEN 60
51 static char addr_str[MAX_ADDR_STR][MAX_ADDR_STR_LEN];
52 static DEFINE_SPINLOCK(addr_str_lock);
53 static int last_addr_str;
55 const char *ceph_pr_addr(const struct sockaddr_storage *ss)
59 struct sockaddr_in *in4 = (void *)ss;
60 struct sockaddr_in6 *in6 = (void *)ss;
62 spin_lock(&addr_str_lock);
64 if (last_addr_str == MAX_ADDR_STR)
66 spin_unlock(&addr_str_lock);
69 switch (ss->ss_family) {
71 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%u", &in4->sin_addr,
72 (unsigned int)ntohs(in4->sin_port));
76 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%u", &in6->sin6_addr,
77 (unsigned int)ntohs(in6->sin6_port));
81 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %d)",
87 EXPORT_SYMBOL(ceph_pr_addr);
89 static void encode_my_addr(struct ceph_messenger *msgr)
91 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr));
92 ceph_encode_addr(&msgr->my_enc_addr);
96 * work queue for all reading and writing to/from the socket.
98 struct workqueue_struct *ceph_msgr_wq;
100 int ceph_msgr_init(void)
103 * The number of active work items is limited by the number of
104 * connections, so leave @max_active at default.
106 ceph_msgr_wq = alloc_workqueue("ceph-msgr",
107 WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
109 pr_err("msgr_init failed to create workqueue\n");
114 EXPORT_SYMBOL(ceph_msgr_init);
116 void ceph_msgr_exit(void)
118 destroy_workqueue(ceph_msgr_wq);
120 EXPORT_SYMBOL(ceph_msgr_exit);
122 void ceph_msgr_flush(void)
124 flush_workqueue(ceph_msgr_wq);
126 EXPORT_SYMBOL(ceph_msgr_flush);
130 * socket callback functions
133 /* data available on socket, or listen socket received a connect */
134 static void ceph_data_ready(struct sock *sk, int count_unused)
136 struct ceph_connection *con =
137 (struct ceph_connection *)sk->sk_user_data;
138 if (sk->sk_state != TCP_CLOSE_WAIT) {
139 dout("ceph_data_ready on %p state = %lu, queueing work\n",
145 /* socket has buffer space for writing */
146 static void ceph_write_space(struct sock *sk)
148 struct ceph_connection *con =
149 (struct ceph_connection *)sk->sk_user_data;
151 /* only queue to workqueue if there is data we want to write. */
152 if (test_bit(WRITE_PENDING, &con->state)) {
153 dout("ceph_write_space %p queueing write work\n", con);
156 dout("ceph_write_space %p nothing to write\n", con);
159 /* since we have our own write_space, clear the SOCK_NOSPACE flag */
160 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
163 /* socket's state has changed */
164 static void ceph_state_change(struct sock *sk)
166 struct ceph_connection *con =
167 (struct ceph_connection *)sk->sk_user_data;
169 dout("ceph_state_change %p state = %lu sk_state = %u\n",
170 con, con->state, sk->sk_state);
172 if (test_bit(CLOSED, &con->state))
175 switch (sk->sk_state) {
177 dout("ceph_state_change TCP_CLOSE\n");
179 dout("ceph_state_change TCP_CLOSE_WAIT\n");
180 if (test_and_set_bit(SOCK_CLOSED, &con->state) == 0) {
181 if (test_bit(CONNECTING, &con->state))
182 con->error_msg = "connection failed";
184 con->error_msg = "socket closed";
188 case TCP_ESTABLISHED:
189 dout("ceph_state_change TCP_ESTABLISHED\n");
196 * set up socket callbacks
198 static void set_sock_callbacks(struct socket *sock,
199 struct ceph_connection *con)
201 struct sock *sk = sock->sk;
202 sk->sk_user_data = (void *)con;
203 sk->sk_data_ready = ceph_data_ready;
204 sk->sk_write_space = ceph_write_space;
205 sk->sk_state_change = ceph_state_change;
214 * initiate connection to a remote socket.
216 static struct socket *ceph_tcp_connect(struct ceph_connection *con)
218 struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
223 ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM,
228 sock->sk->sk_allocation = GFP_NOFS;
230 #ifdef CONFIG_LOCKDEP
231 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
234 set_sock_callbacks(sock, con);
236 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr));
238 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
240 if (ret == -EINPROGRESS) {
241 dout("connect %s EINPROGRESS sk_state = %u\n",
242 ceph_pr_addr(&con->peer_addr.in_addr),
247 pr_err("connect %s error %d\n",
248 ceph_pr_addr(&con->peer_addr.in_addr), ret);
251 con->error_msg = "connect error";
259 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
261 struct kvec iov = {buf, len};
262 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
265 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
272 * write something. @more is true if caller will be sending more data
275 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
276 size_t kvlen, size_t len, int more)
278 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
282 msg.msg_flags |= MSG_MORE;
284 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
286 r = kernel_sendmsg(sock, &msg, iov, kvlen, len);
292 static int __ceph_tcp_sendpage(struct socket *sock, struct page *page,
293 int offset, size_t size, bool more)
295 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
298 ret = kernel_sendpage(sock, page, offset, size, flags);
305 static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
306 int offset, size_t size, bool more)
311 /* sendpage cannot properly handle pages with page_count == 0,
312 * we need to fallback to sendmsg if that's the case */
313 if (page_count(page) >= 1)
314 return __ceph_tcp_sendpage(sock, page, offset, size, more);
316 iov.iov_base = kmap(page) + offset;
318 ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more);
325 * Shutdown/close the socket for the given connection.
327 static int con_close_socket(struct ceph_connection *con)
331 dout("con_close_socket on %p sock %p\n", con, con->sock);
334 set_bit(SOCK_CLOSED, &con->state);
335 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
336 sock_release(con->sock);
338 clear_bit(SOCK_CLOSED, &con->state);
343 * Reset a connection. Discard all incoming and outgoing messages
344 * and clear *_seq state.
346 static void ceph_msg_remove(struct ceph_msg *msg)
348 list_del_init(&msg->list_head);
351 static void ceph_msg_remove_list(struct list_head *head)
353 while (!list_empty(head)) {
354 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
356 ceph_msg_remove(msg);
360 static void reset_connection(struct ceph_connection *con)
362 /* reset connection, out_queue, msg_ and connect_seq */
363 /* discard existing out_queue and msg_seq */
364 ceph_msg_remove_list(&con->out_queue);
365 ceph_msg_remove_list(&con->out_sent);
368 ceph_msg_put(con->in_msg);
372 con->connect_seq = 0;
375 ceph_msg_put(con->out_msg);
379 con->in_seq_acked = 0;
383 * mark a peer down. drop any open connections.
385 void ceph_con_close(struct ceph_connection *con)
387 dout("con_close %p peer %s\n", con,
388 ceph_pr_addr(&con->peer_addr.in_addr));
389 set_bit(CLOSED, &con->state); /* in case there's queued work */
390 clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */
391 clear_bit(LOSSYTX, &con->state); /* so we retry next connect */
392 clear_bit(KEEPALIVE_PENDING, &con->state);
393 clear_bit(WRITE_PENDING, &con->state);
394 mutex_lock(&con->mutex);
395 reset_connection(con);
396 con->peer_global_seq = 0;
397 cancel_delayed_work(&con->work);
398 mutex_unlock(&con->mutex);
401 EXPORT_SYMBOL(ceph_con_close);
404 * Reopen a closed connection, with a new peer address.
406 void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr)
408 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
409 set_bit(OPENING, &con->state);
410 clear_bit(CLOSED, &con->state);
411 memcpy(&con->peer_addr, addr, sizeof(*addr));
412 con->delay = 0; /* reset backoff memory */
415 EXPORT_SYMBOL(ceph_con_open);
418 * return true if this connection ever successfully opened
420 bool ceph_con_opened(struct ceph_connection *con)
422 return con->connect_seq > 0;
428 struct ceph_connection *ceph_con_get(struct ceph_connection *con)
430 dout("con_get %p nref = %d -> %d\n", con,
431 atomic_read(&con->nref), atomic_read(&con->nref) + 1);
432 if (atomic_inc_not_zero(&con->nref))
437 void ceph_con_put(struct ceph_connection *con)
439 dout("con_put %p nref = %d -> %d\n", con,
440 atomic_read(&con->nref), atomic_read(&con->nref) - 1);
441 BUG_ON(atomic_read(&con->nref) == 0);
442 if (atomic_dec_and_test(&con->nref)) {
449 * initialize a new connection.
451 void ceph_con_init(struct ceph_messenger *msgr, struct ceph_connection *con)
453 dout("con_init %p\n", con);
454 memset(con, 0, sizeof(*con));
455 atomic_set(&con->nref, 1);
457 mutex_init(&con->mutex);
458 INIT_LIST_HEAD(&con->out_queue);
459 INIT_LIST_HEAD(&con->out_sent);
460 INIT_DELAYED_WORK(&con->work, con_work);
462 EXPORT_SYMBOL(ceph_con_init);
466 * We maintain a global counter to order connection attempts. Get
467 * a unique seq greater than @gt.
469 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
473 spin_lock(&msgr->global_seq_lock);
474 if (msgr->global_seq < gt)
475 msgr->global_seq = gt;
476 ret = ++msgr->global_seq;
477 spin_unlock(&msgr->global_seq_lock);
483 * Prepare footer for currently outgoing message, and finish things
484 * off. Assumes out_kvec* are already valid.. we just add on to the end.
486 static void prepare_write_message_footer(struct ceph_connection *con, int v)
488 struct ceph_msg *m = con->out_msg;
490 dout("prepare_write_message_footer %p\n", con);
491 con->out_kvec_is_msg = true;
492 con->out_kvec[v].iov_base = &m->footer;
493 con->out_kvec[v].iov_len = sizeof(m->footer);
494 con->out_kvec_bytes += sizeof(m->footer);
495 con->out_kvec_left++;
496 con->out_more = m->more_to_follow;
497 con->out_msg_done = true;
501 * Prepare headers for the next outgoing message.
503 static void prepare_write_message(struct ceph_connection *con)
508 con->out_kvec_bytes = 0;
509 con->out_kvec_is_msg = true;
510 con->out_msg_done = false;
512 /* Sneak an ack in there first? If we can get it into the same
513 * TCP packet that's a good thing. */
514 if (con->in_seq > con->in_seq_acked) {
515 con->in_seq_acked = con->in_seq;
516 con->out_kvec[v].iov_base = &tag_ack;
517 con->out_kvec[v++].iov_len = 1;
518 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
519 con->out_kvec[v].iov_base = &con->out_temp_ack;
520 con->out_kvec[v++].iov_len = sizeof(con->out_temp_ack);
521 con->out_kvec_bytes = 1 + sizeof(con->out_temp_ack);
524 m = list_first_entry(&con->out_queue,
525 struct ceph_msg, list_head);
528 /* put message on sent list */
530 list_move_tail(&m->list_head, &con->out_sent);
533 * only assign outgoing seq # if we haven't sent this message
534 * yet. if it is requeued, resend with it's original seq.
536 if (m->needs_out_seq) {
537 m->hdr.seq = cpu_to_le64(++con->out_seq);
538 m->needs_out_seq = false;
541 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
542 m, con->out_seq, le16_to_cpu(m->hdr.type),
543 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
544 le32_to_cpu(m->hdr.data_len),
546 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len);
548 /* tag + hdr + front + middle */
549 con->out_kvec[v].iov_base = &tag_msg;
550 con->out_kvec[v++].iov_len = 1;
551 con->out_kvec[v].iov_base = &m->hdr;
552 con->out_kvec[v++].iov_len = sizeof(m->hdr);
553 con->out_kvec[v++] = m->front;
555 con->out_kvec[v++] = m->middle->vec;
556 con->out_kvec_left = v;
557 con->out_kvec_bytes += 1 + sizeof(m->hdr) + m->front.iov_len +
558 (m->middle ? m->middle->vec.iov_len : 0);
559 con->out_kvec_cur = con->out_kvec;
561 /* fill in crc (except data pages), footer */
562 con->out_msg->hdr.crc =
563 cpu_to_le32(crc32c(0, (void *)&m->hdr,
564 sizeof(m->hdr) - sizeof(m->hdr.crc)));
565 con->out_msg->footer.flags = CEPH_MSG_FOOTER_COMPLETE;
566 con->out_msg->footer.front_crc =
567 cpu_to_le32(crc32c(0, m->front.iov_base, m->front.iov_len));
569 con->out_msg->footer.middle_crc =
570 cpu_to_le32(crc32c(0, m->middle->vec.iov_base,
571 m->middle->vec.iov_len));
573 con->out_msg->footer.middle_crc = 0;
574 con->out_msg->footer.data_crc = 0;
575 dout("prepare_write_message front_crc %u data_crc %u\n",
576 le32_to_cpu(con->out_msg->footer.front_crc),
577 le32_to_cpu(con->out_msg->footer.middle_crc));
579 /* is there a data payload? */
580 if (le32_to_cpu(m->hdr.data_len) > 0) {
581 /* initialize page iterator */
582 con->out_msg_pos.page = 0;
584 con->out_msg_pos.page_pos = m->page_alignment;
586 con->out_msg_pos.page_pos = 0;
587 con->out_msg_pos.data_pos = 0;
588 con->out_msg_pos.did_page_crc = 0;
589 con->out_more = 1; /* data + footer will follow */
591 /* no, queue up footer too and be done */
592 prepare_write_message_footer(con, v);
595 set_bit(WRITE_PENDING, &con->state);
601 static void prepare_write_ack(struct ceph_connection *con)
603 dout("prepare_write_ack %p %llu -> %llu\n", con,
604 con->in_seq_acked, con->in_seq);
605 con->in_seq_acked = con->in_seq;
607 con->out_kvec[0].iov_base = &tag_ack;
608 con->out_kvec[0].iov_len = 1;
609 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
610 con->out_kvec[1].iov_base = &con->out_temp_ack;
611 con->out_kvec[1].iov_len = sizeof(con->out_temp_ack);
612 con->out_kvec_left = 2;
613 con->out_kvec_bytes = 1 + sizeof(con->out_temp_ack);
614 con->out_kvec_cur = con->out_kvec;
615 con->out_more = 1; /* more will follow.. eventually.. */
616 set_bit(WRITE_PENDING, &con->state);
620 * Prepare to write keepalive byte.
622 static void prepare_write_keepalive(struct ceph_connection *con)
624 dout("prepare_write_keepalive %p\n", con);
625 con->out_kvec[0].iov_base = &tag_keepalive;
626 con->out_kvec[0].iov_len = 1;
627 con->out_kvec_left = 1;
628 con->out_kvec_bytes = 1;
629 con->out_kvec_cur = con->out_kvec;
630 set_bit(WRITE_PENDING, &con->state);
634 * Connection negotiation.
637 static int prepare_connect_authorizer(struct ceph_connection *con)
641 int auth_protocol = 0;
643 mutex_unlock(&con->mutex);
644 if (con->ops->get_authorizer)
645 con->ops->get_authorizer(con, &auth_buf, &auth_len,
646 &auth_protocol, &con->auth_reply_buf,
647 &con->auth_reply_buf_len,
649 mutex_lock(&con->mutex);
651 if (test_bit(CLOSED, &con->state) ||
652 test_bit(OPENING, &con->state))
655 con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol);
656 con->out_connect.authorizer_len = cpu_to_le32(auth_len);
659 con->out_kvec[con->out_kvec_left].iov_base = auth_buf;
660 con->out_kvec[con->out_kvec_left].iov_len = auth_len;
661 con->out_kvec_left++;
662 con->out_kvec_bytes += auth_len;
668 * We connected to a peer and are saying hello.
670 static void prepare_write_banner(struct ceph_messenger *msgr,
671 struct ceph_connection *con)
673 int len = strlen(CEPH_BANNER);
675 con->out_kvec[0].iov_base = CEPH_BANNER;
676 con->out_kvec[0].iov_len = len;
677 con->out_kvec[1].iov_base = &msgr->my_enc_addr;
678 con->out_kvec[1].iov_len = sizeof(msgr->my_enc_addr);
679 con->out_kvec_left = 2;
680 con->out_kvec_bytes = len + sizeof(msgr->my_enc_addr);
681 con->out_kvec_cur = con->out_kvec;
683 set_bit(WRITE_PENDING, &con->state);
686 static int prepare_write_connect(struct ceph_messenger *msgr,
687 struct ceph_connection *con,
690 unsigned global_seq = get_global_seq(con->msgr, 0);
693 switch (con->peer_name.type) {
694 case CEPH_ENTITY_TYPE_MON:
695 proto = CEPH_MONC_PROTOCOL;
697 case CEPH_ENTITY_TYPE_OSD:
698 proto = CEPH_OSDC_PROTOCOL;
700 case CEPH_ENTITY_TYPE_MDS:
701 proto = CEPH_MDSC_PROTOCOL;
707 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
708 con->connect_seq, global_seq, proto);
710 con->out_connect.features = cpu_to_le64(msgr->supported_features);
711 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
712 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
713 con->out_connect.global_seq = cpu_to_le32(global_seq);
714 con->out_connect.protocol_version = cpu_to_le32(proto);
715 con->out_connect.flags = 0;
718 con->out_kvec_left = 0;
719 con->out_kvec_bytes = 0;
721 con->out_kvec[con->out_kvec_left].iov_base = &con->out_connect;
722 con->out_kvec[con->out_kvec_left].iov_len = sizeof(con->out_connect);
723 con->out_kvec_left++;
724 con->out_kvec_bytes += sizeof(con->out_connect);
725 con->out_kvec_cur = con->out_kvec;
727 set_bit(WRITE_PENDING, &con->state);
729 return prepare_connect_authorizer(con);
734 * write as much of pending kvecs to the socket as we can.
736 * 0 -> socket full, but more to do
739 static int write_partial_kvec(struct ceph_connection *con)
743 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes);
744 while (con->out_kvec_bytes > 0) {
745 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur,
746 con->out_kvec_left, con->out_kvec_bytes,
750 con->out_kvec_bytes -= ret;
751 if (con->out_kvec_bytes == 0)
754 if (ret >= con->out_kvec_cur->iov_len) {
755 ret -= con->out_kvec_cur->iov_len;
757 con->out_kvec_left--;
759 con->out_kvec_cur->iov_len -= ret;
760 con->out_kvec_cur->iov_base += ret;
766 con->out_kvec_left = 0;
767 con->out_kvec_is_msg = false;
770 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
771 con->out_kvec_bytes, con->out_kvec_left, ret);
772 return ret; /* done! */
776 static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg)
787 static void iter_bio_next(struct bio **bio_iter, int *seg)
789 if (*bio_iter == NULL)
792 BUG_ON(*seg >= (*bio_iter)->bi_vcnt);
795 if (*seg == (*bio_iter)->bi_vcnt)
796 init_bio_iter((*bio_iter)->bi_next, bio_iter, seg);
801 * Write as much message data payload as we can. If we finish, queue
803 * 1 -> done, footer is now queued in out_kvec[].
804 * 0 -> socket full, but more to do
807 static int write_partial_msg_pages(struct ceph_connection *con)
809 struct ceph_msg *msg = con->out_msg;
810 unsigned data_len = le32_to_cpu(msg->hdr.data_len);
812 int crc = con->msgr->nocrc;
816 size_t trail_len = (msg->trail ? msg->trail->length : 0);
818 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
819 con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages,
820 con->out_msg_pos.page_pos);
823 if (msg->bio && !msg->bio_iter)
824 init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg);
827 while (data_len > con->out_msg_pos.data_pos) {
828 struct page *page = NULL;
830 int max_write = PAGE_SIZE;
833 total_max_write = data_len - trail_len -
834 con->out_msg_pos.data_pos;
837 * if we are calculating the data crc (the default), we need
838 * to map the page. if our pages[] has been revoked, use the
842 /* have we reached the trail part of the data? */
843 if (con->out_msg_pos.data_pos >= data_len - trail_len) {
846 total_max_write = data_len - con->out_msg_pos.data_pos;
848 page = list_first_entry(&msg->trail->head,
852 max_write = PAGE_SIZE;
853 } else if (msg->pages) {
854 page = msg->pages[con->out_msg_pos.page];
857 } else if (msg->pagelist) {
858 page = list_first_entry(&msg->pagelist->head,
863 } else if (msg->bio) {
866 bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg);
868 page_shift = bv->bv_offset;
870 kaddr = kmap(page) + page_shift;
871 max_write = bv->bv_len;
874 page = con->msgr->zero_page;
876 kaddr = page_address(con->msgr->zero_page);
878 len = min_t(int, max_write - con->out_msg_pos.page_pos,
881 if (crc && !con->out_msg_pos.did_page_crc) {
882 void *base = kaddr + con->out_msg_pos.page_pos;
883 u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc);
885 BUG_ON(kaddr == NULL);
886 con->out_msg->footer.data_crc =
887 cpu_to_le32(crc32c(tmpcrc, base, len));
888 con->out_msg_pos.did_page_crc = 1;
890 ret = ceph_tcp_sendpage(con->sock, page,
891 con->out_msg_pos.page_pos + page_shift,
895 (msg->pages || msg->pagelist || msg->bio || in_trail))
901 con->out_msg_pos.data_pos += ret;
902 con->out_msg_pos.page_pos += ret;
904 con->out_msg_pos.page_pos = 0;
905 con->out_msg_pos.page++;
906 con->out_msg_pos.did_page_crc = 0;
908 list_move_tail(&page->lru,
910 else if (msg->pagelist)
911 list_move_tail(&page->lru,
912 &msg->pagelist->head);
915 iter_bio_next(&msg->bio_iter, &msg->bio_seg);
920 dout("write_partial_msg_pages %p msg %p done\n", con, msg);
922 /* prepare and queue up footer, too */
924 con->out_msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
925 con->out_kvec_bytes = 0;
926 con->out_kvec_left = 0;
927 con->out_kvec_cur = con->out_kvec;
928 prepare_write_message_footer(con, 0);
937 static int write_partial_skip(struct ceph_connection *con)
941 while (con->out_skip > 0) {
943 .iov_base = page_address(con->msgr->zero_page),
944 .iov_len = min(con->out_skip, (int)PAGE_CACHE_SIZE)
947 ret = ceph_tcp_sendmsg(con->sock, &iov, 1, iov.iov_len, 1);
950 con->out_skip -= ret;
958 * Prepare to read connection handshake, or an ack.
960 static void prepare_read_banner(struct ceph_connection *con)
962 dout("prepare_read_banner %p\n", con);
963 con->in_base_pos = 0;
966 static void prepare_read_connect(struct ceph_connection *con)
968 dout("prepare_read_connect %p\n", con);
969 con->in_base_pos = 0;
972 static void prepare_read_ack(struct ceph_connection *con)
974 dout("prepare_read_ack %p\n", con);
975 con->in_base_pos = 0;
978 static void prepare_read_tag(struct ceph_connection *con)
980 dout("prepare_read_tag %p\n", con);
981 con->in_base_pos = 0;
982 con->in_tag = CEPH_MSGR_TAG_READY;
986 * Prepare to read a message.
988 static int prepare_read_message(struct ceph_connection *con)
990 dout("prepare_read_message %p\n", con);
991 BUG_ON(con->in_msg != NULL);
992 con->in_base_pos = 0;
993 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0;
998 static int read_partial(struct ceph_connection *con,
999 int *to, int size, void *object)
1002 while (con->in_base_pos < *to) {
1003 int left = *to - con->in_base_pos;
1004 int have = size - left;
1005 int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
1008 con->in_base_pos += ret;
1015 * Read all or part of the connect-side handshake on a new connection
1017 static int read_partial_banner(struct ceph_connection *con)
1021 dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
1024 ret = read_partial(con, &to, strlen(CEPH_BANNER), con->in_banner);
1027 ret = read_partial(con, &to, sizeof(con->actual_peer_addr),
1028 &con->actual_peer_addr);
1031 ret = read_partial(con, &to, sizeof(con->peer_addr_for_me),
1032 &con->peer_addr_for_me);
1039 static int read_partial_connect(struct ceph_connection *con)
1043 dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
1045 ret = read_partial(con, &to, sizeof(con->in_reply), &con->in_reply);
1048 ret = read_partial(con, &to, le32_to_cpu(con->in_reply.authorizer_len),
1049 con->auth_reply_buf);
1053 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
1054 con, (int)con->in_reply.tag,
1055 le32_to_cpu(con->in_reply.connect_seq),
1056 le32_to_cpu(con->in_reply.global_seq));
1063 * Verify the hello banner looks okay.
1065 static int verify_hello(struct ceph_connection *con)
1067 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
1068 pr_err("connect to %s got bad banner\n",
1069 ceph_pr_addr(&con->peer_addr.in_addr));
1070 con->error_msg = "protocol error, bad banner";
1076 static bool addr_is_blank(struct sockaddr_storage *ss)
1078 switch (ss->ss_family) {
1080 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0;
1083 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 &&
1084 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 &&
1085 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 &&
1086 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0;
1091 static int addr_port(struct sockaddr_storage *ss)
1093 switch (ss->ss_family) {
1095 return ntohs(((struct sockaddr_in *)ss)->sin_port);
1097 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port);
1102 static void addr_set_port(struct sockaddr_storage *ss, int p)
1104 switch (ss->ss_family) {
1106 ((struct sockaddr_in *)ss)->sin_port = htons(p);
1109 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
1115 * Unlike other *_pton function semantics, zero indicates success.
1117 static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss,
1118 char delim, const char **ipend)
1120 struct sockaddr_in *in4 = (void *)ss;
1121 struct sockaddr_in6 *in6 = (void *)ss;
1123 memset(ss, 0, sizeof(*ss));
1125 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) {
1126 ss->ss_family = AF_INET;
1130 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) {
1131 ss->ss_family = AF_INET6;
1139 * Extract hostname string and resolve using kernel DNS facility.
1141 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
1142 static int ceph_dns_resolve_name(const char *name, size_t namelen,
1143 struct sockaddr_storage *ss, char delim, const char **ipend)
1145 const char *end, *delim_p;
1146 char *colon_p, *ip_addr = NULL;
1150 * The end of the hostname occurs immediately preceding the delimiter or
1151 * the port marker (':') where the delimiter takes precedence.
1153 delim_p = memchr(name, delim, namelen);
1154 colon_p = memchr(name, ':', namelen);
1156 if (delim_p && colon_p)
1157 end = delim_p < colon_p ? delim_p : colon_p;
1158 else if (!delim_p && colon_p)
1162 if (!end) /* case: hostname:/ */
1163 end = name + namelen;
1169 /* do dns_resolve upcall */
1170 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL);
1172 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL);
1180 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name,
1181 ret, ret ? "failed" : ceph_pr_addr(ss));
1186 static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
1187 struct sockaddr_storage *ss, char delim, const char **ipend)
1194 * Parse a server name (IP or hostname). If a valid IP address is not found
1195 * then try to extract a hostname to resolve using userspace DNS upcall.
1197 static int ceph_parse_server_name(const char *name, size_t namelen,
1198 struct sockaddr_storage *ss, char delim, const char **ipend)
1202 ret = ceph_pton(name, namelen, ss, delim, ipend);
1204 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend);
1210 * Parse an ip[:port] list into an addr array. Use the default
1211 * monitor port if a port isn't specified.
1213 int ceph_parse_ips(const char *c, const char *end,
1214 struct ceph_entity_addr *addr,
1215 int max_count, int *count)
1217 int i, ret = -EINVAL;
1220 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1221 for (i = 0; i < max_count; i++) {
1223 struct sockaddr_storage *ss = &addr[i].in_addr;
1232 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend);
1241 dout("missing matching ']'\n");
1248 if (p < end && *p == ':') {
1251 while (p < end && *p >= '0' && *p <= '9') {
1252 port = (port * 10) + (*p - '0');
1255 if (port > 65535 || port == 0)
1258 port = CEPH_MON_PORT;
1261 addr_set_port(ss, port);
1263 dout("parse_ips got %s\n", ceph_pr_addr(ss));
1280 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
1283 EXPORT_SYMBOL(ceph_parse_ips);
1285 static int process_banner(struct ceph_connection *con)
1287 dout("process_banner on %p\n", con);
1289 if (verify_hello(con) < 0)
1292 ceph_decode_addr(&con->actual_peer_addr);
1293 ceph_decode_addr(&con->peer_addr_for_me);
1296 * Make sure the other end is who we wanted. note that the other
1297 * end may not yet know their ip address, so if it's 0.0.0.0, give
1298 * them the benefit of the doubt.
1300 if (memcmp(&con->peer_addr, &con->actual_peer_addr,
1301 sizeof(con->peer_addr)) != 0 &&
1302 !(addr_is_blank(&con->actual_peer_addr.in_addr) &&
1303 con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
1304 pr_warning("wrong peer, want %s/%d, got %s/%d\n",
1305 ceph_pr_addr(&con->peer_addr.in_addr),
1306 (int)le32_to_cpu(con->peer_addr.nonce),
1307 ceph_pr_addr(&con->actual_peer_addr.in_addr),
1308 (int)le32_to_cpu(con->actual_peer_addr.nonce));
1309 con->error_msg = "wrong peer at address";
1314 * did we learn our address?
1316 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) {
1317 int port = addr_port(&con->msgr->inst.addr.in_addr);
1319 memcpy(&con->msgr->inst.addr.in_addr,
1320 &con->peer_addr_for_me.in_addr,
1321 sizeof(con->peer_addr_for_me.in_addr));
1322 addr_set_port(&con->msgr->inst.addr.in_addr, port);
1323 encode_my_addr(con->msgr);
1324 dout("process_banner learned my addr is %s\n",
1325 ceph_pr_addr(&con->msgr->inst.addr.in_addr));
1328 set_bit(NEGOTIATING, &con->state);
1329 prepare_read_connect(con);
1333 static void fail_protocol(struct ceph_connection *con)
1335 reset_connection(con);
1336 set_bit(CLOSED, &con->state); /* in case there's queued work */
1338 mutex_unlock(&con->mutex);
1339 if (con->ops->bad_proto)
1340 con->ops->bad_proto(con);
1341 mutex_lock(&con->mutex);
1344 static int process_connect(struct ceph_connection *con)
1346 u64 sup_feat = con->msgr->supported_features;
1347 u64 req_feat = con->msgr->required_features;
1348 u64 server_feat = le64_to_cpu(con->in_reply.features);
1351 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
1353 if (con->auth_reply_buf) {
1355 * Any connection that defines ->get_authorizer()
1356 * should also define ->verify_authorizer_reply().
1357 * See get_connect_authorizer().
1359 ret = con->ops->verify_authorizer_reply(con, 0);
1361 con->error_msg = "bad authorize reply";
1366 switch (con->in_reply.tag) {
1367 case CEPH_MSGR_TAG_FEATURES:
1368 pr_err("%s%lld %s feature set mismatch,"
1369 " my %llx < server's %llx, missing %llx\n",
1370 ENTITY_NAME(con->peer_name),
1371 ceph_pr_addr(&con->peer_addr.in_addr),
1372 sup_feat, server_feat, server_feat & ~sup_feat);
1373 con->error_msg = "missing required protocol features";
1377 case CEPH_MSGR_TAG_BADPROTOVER:
1378 pr_err("%s%lld %s protocol version mismatch,"
1379 " my %d != server's %d\n",
1380 ENTITY_NAME(con->peer_name),
1381 ceph_pr_addr(&con->peer_addr.in_addr),
1382 le32_to_cpu(con->out_connect.protocol_version),
1383 le32_to_cpu(con->in_reply.protocol_version));
1384 con->error_msg = "protocol version mismatch";
1388 case CEPH_MSGR_TAG_BADAUTHORIZER:
1390 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con,
1392 if (con->auth_retry == 2) {
1393 con->error_msg = "connect authorization failure";
1396 con->auth_retry = 1;
1397 ret = prepare_write_connect(con->msgr, con, 0);
1400 prepare_read_connect(con);
1403 case CEPH_MSGR_TAG_RESETSESSION:
1405 * If we connected with a large connect_seq but the peer
1406 * has no record of a session with us (no connection, or
1407 * connect_seq == 0), they will send RESETSESION to indicate
1408 * that they must have reset their session, and may have
1411 dout("process_connect got RESET peer seq %u\n",
1412 le32_to_cpu(con->in_connect.connect_seq));
1413 pr_err("%s%lld %s connection reset\n",
1414 ENTITY_NAME(con->peer_name),
1415 ceph_pr_addr(&con->peer_addr.in_addr));
1416 reset_connection(con);
1417 prepare_write_connect(con->msgr, con, 0);
1418 prepare_read_connect(con);
1420 /* Tell ceph about it. */
1421 mutex_unlock(&con->mutex);
1422 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name));
1423 if (con->ops->peer_reset)
1424 con->ops->peer_reset(con);
1425 mutex_lock(&con->mutex);
1426 if (test_bit(CLOSED, &con->state) ||
1427 test_bit(OPENING, &con->state))
1431 case CEPH_MSGR_TAG_RETRY_SESSION:
1433 * If we sent a smaller connect_seq than the peer has, try
1434 * again with a larger value.
1436 dout("process_connect got RETRY my seq = %u, peer_seq = %u\n",
1437 le32_to_cpu(con->out_connect.connect_seq),
1438 le32_to_cpu(con->in_connect.connect_seq));
1439 con->connect_seq = le32_to_cpu(con->in_connect.connect_seq);
1440 prepare_write_connect(con->msgr, con, 0);
1441 prepare_read_connect(con);
1444 case CEPH_MSGR_TAG_RETRY_GLOBAL:
1446 * If we sent a smaller global_seq than the peer has, try
1447 * again with a larger value.
1449 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
1450 con->peer_global_seq,
1451 le32_to_cpu(con->in_connect.global_seq));
1452 get_global_seq(con->msgr,
1453 le32_to_cpu(con->in_connect.global_seq));
1454 prepare_write_connect(con->msgr, con, 0);
1455 prepare_read_connect(con);
1458 case CEPH_MSGR_TAG_READY:
1459 if (req_feat & ~server_feat) {
1460 pr_err("%s%lld %s protocol feature mismatch,"
1461 " my required %llx > server's %llx, need %llx\n",
1462 ENTITY_NAME(con->peer_name),
1463 ceph_pr_addr(&con->peer_addr.in_addr),
1464 req_feat, server_feat, req_feat & ~server_feat);
1465 con->error_msg = "missing required protocol features";
1469 clear_bit(CONNECTING, &con->state);
1470 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
1472 con->peer_features = server_feat;
1473 dout("process_connect got READY gseq %d cseq %d (%d)\n",
1474 con->peer_global_seq,
1475 le32_to_cpu(con->in_reply.connect_seq),
1477 WARN_ON(con->connect_seq !=
1478 le32_to_cpu(con->in_reply.connect_seq));
1480 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
1481 set_bit(LOSSYTX, &con->state);
1483 prepare_read_tag(con);
1486 case CEPH_MSGR_TAG_WAIT:
1488 * If there is a connection race (we are opening
1489 * connections to each other), one of us may just have
1490 * to WAIT. This shouldn't happen if we are the
1493 pr_err("process_connect got WAIT as client\n");
1494 con->error_msg = "protocol error, got WAIT as client";
1498 pr_err("connect protocol error, will retry\n");
1499 con->error_msg = "protocol error, garbage tag during connect";
1507 * read (part of) an ack
1509 static int read_partial_ack(struct ceph_connection *con)
1513 return read_partial(con, &to, sizeof(con->in_temp_ack),
1519 * We can finally discard anything that's been acked.
1521 static void process_ack(struct ceph_connection *con)
1524 u64 ack = le64_to_cpu(con->in_temp_ack);
1527 while (!list_empty(&con->out_sent)) {
1528 m = list_first_entry(&con->out_sent, struct ceph_msg,
1530 seq = le64_to_cpu(m->hdr.seq);
1533 dout("got ack for seq %llu type %d at %p\n", seq,
1534 le16_to_cpu(m->hdr.type), m);
1535 m->ack_stamp = jiffies;
1538 prepare_read_tag(con);
1544 static int read_partial_message_section(struct ceph_connection *con,
1545 struct kvec *section,
1546 unsigned int sec_len, u32 *crc)
1552 while (section->iov_len < sec_len) {
1553 BUG_ON(section->iov_base == NULL);
1554 left = sec_len - section->iov_len;
1555 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base +
1556 section->iov_len, left);
1559 section->iov_len += ret;
1560 if (section->iov_len == sec_len)
1561 *crc = crc32c(0, section->iov_base,
1568 static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
1569 struct ceph_msg_header *hdr,
1573 static int read_partial_message_pages(struct ceph_connection *con,
1574 struct page **pages,
1575 unsigned data_len, int datacrc)
1581 left = min((int)(data_len - con->in_msg_pos.data_pos),
1582 (int)(PAGE_SIZE - con->in_msg_pos.page_pos));
1584 BUG_ON(pages == NULL);
1585 p = kmap(pages[con->in_msg_pos.page]);
1586 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
1588 if (ret > 0 && datacrc)
1590 crc32c(con->in_data_crc,
1591 p + con->in_msg_pos.page_pos, ret);
1592 kunmap(pages[con->in_msg_pos.page]);
1595 con->in_msg_pos.data_pos += ret;
1596 con->in_msg_pos.page_pos += ret;
1597 if (con->in_msg_pos.page_pos == PAGE_SIZE) {
1598 con->in_msg_pos.page_pos = 0;
1599 con->in_msg_pos.page++;
1606 static int read_partial_message_bio(struct ceph_connection *con,
1607 struct bio **bio_iter, int *bio_seg,
1608 unsigned data_len, int datacrc)
1610 struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg);
1617 left = min((int)(data_len - con->in_msg_pos.data_pos),
1618 (int)(bv->bv_len - con->in_msg_pos.page_pos));
1620 p = kmap(bv->bv_page) + bv->bv_offset;
1622 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
1624 if (ret > 0 && datacrc)
1626 crc32c(con->in_data_crc,
1627 p + con->in_msg_pos.page_pos, ret);
1628 kunmap(bv->bv_page);
1631 con->in_msg_pos.data_pos += ret;
1632 con->in_msg_pos.page_pos += ret;
1633 if (con->in_msg_pos.page_pos == bv->bv_len) {
1634 con->in_msg_pos.page_pos = 0;
1635 iter_bio_next(bio_iter, bio_seg);
1643 * read (part of) a message.
1645 static int read_partial_message(struct ceph_connection *con)
1647 struct ceph_msg *m = con->in_msg;
1650 unsigned front_len, middle_len, data_len;
1651 int datacrc = con->msgr->nocrc;
1655 dout("read_partial_message con %p msg %p\n", con, m);
1658 while (con->in_base_pos < sizeof(con->in_hdr)) {
1659 left = sizeof(con->in_hdr) - con->in_base_pos;
1660 ret = ceph_tcp_recvmsg(con->sock,
1661 (char *)&con->in_hdr + con->in_base_pos,
1665 con->in_base_pos += ret;
1666 if (con->in_base_pos == sizeof(con->in_hdr)) {
1667 u32 crc = crc32c(0, (void *)&con->in_hdr,
1668 sizeof(con->in_hdr) - sizeof(con->in_hdr.crc));
1669 if (crc != le32_to_cpu(con->in_hdr.crc)) {
1670 pr_err("read_partial_message bad hdr "
1671 " crc %u != expected %u\n",
1672 crc, con->in_hdr.crc);
1677 front_len = le32_to_cpu(con->in_hdr.front_len);
1678 if (front_len > CEPH_MSG_MAX_FRONT_LEN)
1680 middle_len = le32_to_cpu(con->in_hdr.middle_len);
1681 if (middle_len > CEPH_MSG_MAX_DATA_LEN)
1683 data_len = le32_to_cpu(con->in_hdr.data_len);
1684 if (data_len > CEPH_MSG_MAX_DATA_LEN)
1688 seq = le64_to_cpu(con->in_hdr.seq);
1689 if ((s64)seq - (s64)con->in_seq < 1) {
1690 pr_info("skipping %s%lld %s seq %lld expected %lld\n",
1691 ENTITY_NAME(con->peer_name),
1692 ceph_pr_addr(&con->peer_addr.in_addr),
1693 seq, con->in_seq + 1);
1694 con->in_base_pos = -front_len - middle_len - data_len -
1696 con->in_tag = CEPH_MSGR_TAG_READY;
1698 } else if ((s64)seq - (s64)con->in_seq > 1) {
1699 pr_err("read_partial_message bad seq %lld expected %lld\n",
1700 seq, con->in_seq + 1);
1701 con->error_msg = "bad message sequence # for incoming message";
1705 /* allocate message? */
1707 dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
1708 con->in_hdr.front_len, con->in_hdr.data_len);
1710 con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip);
1712 /* skip this message */
1713 dout("alloc_msg said skip message\n");
1714 BUG_ON(con->in_msg);
1715 con->in_base_pos = -front_len - middle_len - data_len -
1717 con->in_tag = CEPH_MSGR_TAG_READY;
1723 "error allocating memory for incoming message";
1727 m->front.iov_len = 0; /* haven't read it yet */
1729 m->middle->vec.iov_len = 0;
1731 con->in_msg_pos.page = 0;
1733 con->in_msg_pos.page_pos = m->page_alignment;
1735 con->in_msg_pos.page_pos = 0;
1736 con->in_msg_pos.data_pos = 0;
1740 ret = read_partial_message_section(con, &m->front, front_len,
1741 &con->in_front_crc);
1747 ret = read_partial_message_section(con, &m->middle->vec,
1749 &con->in_middle_crc);
1754 if (m->bio && !m->bio_iter)
1755 init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg);
1759 while (con->in_msg_pos.data_pos < data_len) {
1761 ret = read_partial_message_pages(con, m->pages,
1766 } else if (m->bio) {
1768 ret = read_partial_message_bio(con,
1769 &m->bio_iter, &m->bio_seg,
1780 to = sizeof(m->hdr) + sizeof(m->footer);
1781 while (con->in_base_pos < to) {
1782 left = to - con->in_base_pos;
1783 ret = ceph_tcp_recvmsg(con->sock, (char *)&m->footer +
1784 (con->in_base_pos - sizeof(m->hdr)),
1788 con->in_base_pos += ret;
1790 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
1791 m, front_len, m->footer.front_crc, middle_len,
1792 m->footer.middle_crc, data_len, m->footer.data_crc);
1795 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
1796 pr_err("read_partial_message %p front crc %u != exp. %u\n",
1797 m, con->in_front_crc, m->footer.front_crc);
1800 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) {
1801 pr_err("read_partial_message %p middle crc %u != exp %u\n",
1802 m, con->in_middle_crc, m->footer.middle_crc);
1806 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 &&
1807 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
1808 pr_err("read_partial_message %p data crc %u != exp. %u\n", m,
1809 con->in_data_crc, le32_to_cpu(m->footer.data_crc));
1813 return 1; /* done! */
1817 * Process message. This happens in the worker thread. The callback should
1818 * be careful not to do anything that waits on other incoming messages or it
1821 static void process_message(struct ceph_connection *con)
1823 struct ceph_msg *msg;
1828 /* if first message, set peer_name */
1829 if (con->peer_name.type == 0)
1830 con->peer_name = msg->hdr.src;
1833 mutex_unlock(&con->mutex);
1835 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
1836 msg, le64_to_cpu(msg->hdr.seq),
1837 ENTITY_NAME(msg->hdr.src),
1838 le16_to_cpu(msg->hdr.type),
1839 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
1840 le32_to_cpu(msg->hdr.front_len),
1841 le32_to_cpu(msg->hdr.data_len),
1842 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
1843 con->ops->dispatch(con, msg);
1845 mutex_lock(&con->mutex);
1846 prepare_read_tag(con);
1851 * Write something to the socket. Called in a worker thread when the
1852 * socket appears to be writeable and we have something ready to send.
1854 static int try_write(struct ceph_connection *con)
1856 struct ceph_messenger *msgr = con->msgr;
1859 dout("try_write start %p state %lu nref %d\n", con, con->state,
1860 atomic_read(&con->nref));
1863 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
1865 /* open the socket first? */
1866 if (con->sock == NULL) {
1867 prepare_write_banner(msgr, con);
1868 prepare_write_connect(msgr, con, 1);
1869 prepare_read_banner(con);
1870 set_bit(CONNECTING, &con->state);
1871 clear_bit(NEGOTIATING, &con->state);
1873 BUG_ON(con->in_msg);
1874 con->in_tag = CEPH_MSGR_TAG_READY;
1875 dout("try_write initiating connect on %p new state %lu\n",
1877 con->sock = ceph_tcp_connect(con);
1878 if (IS_ERR(con->sock)) {
1880 con->error_msg = "connect error";
1887 /* kvec data queued? */
1888 if (con->out_skip) {
1889 ret = write_partial_skip(con);
1893 if (con->out_kvec_left) {
1894 ret = write_partial_kvec(con);
1901 if (con->out_msg_done) {
1902 ceph_msg_put(con->out_msg);
1903 con->out_msg = NULL; /* we're done with this one */
1907 ret = write_partial_msg_pages(con);
1909 goto more_kvec; /* we need to send the footer, too! */
1913 dout("try_write write_partial_msg_pages err %d\n",
1920 if (!test_bit(CONNECTING, &con->state)) {
1921 /* is anything else pending? */
1922 if (!list_empty(&con->out_queue)) {
1923 prepare_write_message(con);
1926 if (con->in_seq > con->in_seq_acked) {
1927 prepare_write_ack(con);
1930 if (test_and_clear_bit(KEEPALIVE_PENDING, &con->state)) {
1931 prepare_write_keepalive(con);
1936 /* Nothing to do! */
1937 clear_bit(WRITE_PENDING, &con->state);
1938 dout("try_write nothing else to write.\n");
1941 dout("try_write done on %p ret %d\n", con, ret);
1948 * Read what we can from the socket.
1950 static int try_read(struct ceph_connection *con)
1957 if (test_bit(STANDBY, &con->state))
1960 dout("try_read start on %p\n", con);
1963 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
1967 * process_connect and process_message drop and re-take
1968 * con->mutex. make sure we handle a racing close or reopen.
1970 if (test_bit(CLOSED, &con->state) ||
1971 test_bit(OPENING, &con->state)) {
1976 if (test_bit(CONNECTING, &con->state)) {
1977 if (!test_bit(NEGOTIATING, &con->state)) {
1978 dout("try_read connecting\n");
1979 ret = read_partial_banner(con);
1982 ret = process_banner(con);
1986 ret = read_partial_connect(con);
1989 ret = process_connect(con);
1995 if (con->in_base_pos < 0) {
1997 * skipping + discarding content.
1999 * FIXME: there must be a better way to do this!
2001 static char buf[1024];
2002 int skip = min(1024, -con->in_base_pos);
2003 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
2004 ret = ceph_tcp_recvmsg(con->sock, buf, skip);
2007 con->in_base_pos += ret;
2008 if (con->in_base_pos)
2011 if (con->in_tag == CEPH_MSGR_TAG_READY) {
2015 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
2018 dout("try_read got tag %d\n", (int)con->in_tag);
2019 switch (con->in_tag) {
2020 case CEPH_MSGR_TAG_MSG:
2021 prepare_read_message(con);
2023 case CEPH_MSGR_TAG_ACK:
2024 prepare_read_ack(con);
2026 case CEPH_MSGR_TAG_CLOSE:
2027 set_bit(CLOSED, &con->state); /* fixme */
2033 if (con->in_tag == CEPH_MSGR_TAG_MSG) {
2034 ret = read_partial_message(con);
2038 con->error_msg = "bad crc";
2042 con->error_msg = "io error";
2047 if (con->in_tag == CEPH_MSGR_TAG_READY)
2049 process_message(con);
2052 if (con->in_tag == CEPH_MSGR_TAG_ACK) {
2053 ret = read_partial_ack(con);
2061 dout("try_read done on %p ret %d\n", con, ret);
2065 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag);
2066 con->error_msg = "protocol error, garbage tag";
2073 * Atomically queue work on a connection. Bump @con reference to
2074 * avoid races with connection teardown.
2076 static void queue_con(struct ceph_connection *con)
2078 if (test_bit(DEAD, &con->state)) {
2079 dout("queue_con %p ignoring: DEAD\n",
2084 if (!con->ops->get(con)) {
2085 dout("queue_con %p ref count 0\n", con);
2089 if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) {
2090 dout("queue_con %p - already queued\n", con);
2093 dout("queue_con %p\n", con);
2098 * Do some work on a connection. Drop a connection ref when we're done.
2100 static void con_work(struct work_struct *work)
2102 struct ceph_connection *con = container_of(work, struct ceph_connection,
2106 mutex_lock(&con->mutex);
2108 if (test_and_clear_bit(BACKOFF, &con->state)) {
2109 dout("con_work %p backing off\n", con);
2110 if (queue_delayed_work(ceph_msgr_wq, &con->work,
2111 round_jiffies_relative(con->delay))) {
2112 dout("con_work %p backoff %lu\n", con, con->delay);
2113 mutex_unlock(&con->mutex);
2117 dout("con_work %p FAILED to back off %lu\n", con,
2122 if (test_bit(STANDBY, &con->state)) {
2123 dout("con_work %p STANDBY\n", con);
2126 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */
2127 dout("con_work CLOSED\n");
2128 con_close_socket(con);
2131 if (test_and_clear_bit(OPENING, &con->state)) {
2132 /* reopen w/ new peer */
2133 dout("con_work OPENING\n");
2134 con_close_socket(con);
2137 if (test_and_clear_bit(SOCK_CLOSED, &con->state))
2140 ret = try_read(con);
2146 ret = try_write(con);
2153 mutex_unlock(&con->mutex);
2159 mutex_unlock(&con->mutex);
2160 ceph_fault(con); /* error/fault path */
2166 * Generic error/fault handler. A retry mechanism is used with
2167 * exponential backoff
2169 static void ceph_fault(struct ceph_connection *con)
2171 pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
2172 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
2173 dout("fault %p state %lu to peer %s\n",
2174 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
2176 if (test_bit(LOSSYTX, &con->state)) {
2177 dout("fault on LOSSYTX channel\n");
2181 mutex_lock(&con->mutex);
2182 if (test_bit(CLOSED, &con->state))
2185 con_close_socket(con);
2188 ceph_msg_put(con->in_msg);
2192 /* Requeue anything that hasn't been acked */
2193 list_splice_init(&con->out_sent, &con->out_queue);
2195 /* If there are no messages queued or keepalive pending, place
2196 * the connection in a STANDBY state */
2197 if (list_empty(&con->out_queue) &&
2198 !test_bit(KEEPALIVE_PENDING, &con->state)) {
2199 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
2200 clear_bit(WRITE_PENDING, &con->state);
2201 set_bit(STANDBY, &con->state);
2203 /* retry after a delay. */
2204 if (con->delay == 0)
2205 con->delay = BASE_DELAY_INTERVAL;
2206 else if (con->delay < MAX_DELAY_INTERVAL)
2209 if (queue_delayed_work(ceph_msgr_wq, &con->work,
2210 round_jiffies_relative(con->delay))) {
2211 dout("fault queued %p delay %lu\n", con, con->delay);
2214 dout("fault failed to queue %p delay %lu, backoff\n",
2217 * In many cases we see a socket state change
2218 * while con_work is running and end up
2219 * queuing (non-delayed) work, such that we
2220 * can't backoff with a delay. Set a flag so
2221 * that when con_work restarts we schedule the
2224 set_bit(BACKOFF, &con->state);
2229 mutex_unlock(&con->mutex);
2232 * in case we faulted due to authentication, invalidate our
2233 * current tickets so that we can get new ones.
2235 if (con->auth_retry && con->ops->invalidate_authorizer) {
2236 dout("calling invalidate_authorizer()\n");
2237 con->ops->invalidate_authorizer(con);
2240 if (con->ops->fault)
2241 con->ops->fault(con);
2247 * create a new messenger instance
2249 struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr,
2250 u32 supported_features,
2251 u32 required_features)
2253 struct ceph_messenger *msgr;
2255 msgr = kzalloc(sizeof(*msgr), GFP_KERNEL);
2257 return ERR_PTR(-ENOMEM);
2259 msgr->supported_features = supported_features;
2260 msgr->required_features = required_features;
2262 spin_lock_init(&msgr->global_seq_lock);
2264 /* the zero page is needed if a request is "canceled" while the message
2265 * is being written over the socket */
2266 msgr->zero_page = __page_cache_alloc(GFP_KERNEL | __GFP_ZERO);
2267 if (!msgr->zero_page) {
2269 return ERR_PTR(-ENOMEM);
2271 kmap(msgr->zero_page);
2274 msgr->inst.addr = *myaddr;
2276 /* select a random nonce */
2277 msgr->inst.addr.type = 0;
2278 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
2279 encode_my_addr(msgr);
2281 dout("messenger_create %p\n", msgr);
2284 EXPORT_SYMBOL(ceph_messenger_create);
2286 void ceph_messenger_destroy(struct ceph_messenger *msgr)
2288 dout("destroy %p\n", msgr);
2289 kunmap(msgr->zero_page);
2290 __free_page(msgr->zero_page);
2292 dout("destroyed messenger %p\n", msgr);
2294 EXPORT_SYMBOL(ceph_messenger_destroy);
2296 static void clear_standby(struct ceph_connection *con)
2298 /* come back from STANDBY? */
2299 if (test_and_clear_bit(STANDBY, &con->state)) {
2300 mutex_lock(&con->mutex);
2301 dout("clear_standby %p and ++connect_seq\n", con);
2303 WARN_ON(test_bit(WRITE_PENDING, &con->state));
2304 WARN_ON(test_bit(KEEPALIVE_PENDING, &con->state));
2305 mutex_unlock(&con->mutex);
2310 * Queue up an outgoing message on the given connection.
2312 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
2314 if (test_bit(CLOSED, &con->state)) {
2315 dout("con_send %p closed, dropping %p\n", con, msg);
2321 msg->hdr.src = con->msgr->inst.name;
2323 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
2325 msg->needs_out_seq = true;
2328 mutex_lock(&con->mutex);
2329 BUG_ON(!list_empty(&msg->list_head));
2330 list_add_tail(&msg->list_head, &con->out_queue);
2331 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
2332 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
2333 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
2334 le32_to_cpu(msg->hdr.front_len),
2335 le32_to_cpu(msg->hdr.middle_len),
2336 le32_to_cpu(msg->hdr.data_len));
2337 mutex_unlock(&con->mutex);
2339 /* if there wasn't anything waiting to send before, queue
2342 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2345 EXPORT_SYMBOL(ceph_con_send);
2348 * Revoke a message that was previously queued for send
2350 void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg)
2352 mutex_lock(&con->mutex);
2353 if (!list_empty(&msg->list_head)) {
2354 dout("con_revoke %p msg %p - was on queue\n", con, msg);
2355 list_del_init(&msg->list_head);
2359 if (con->out_msg == msg) {
2360 dout("con_revoke %p msg %p - was sending\n", con, msg);
2361 con->out_msg = NULL;
2362 if (con->out_kvec_is_msg) {
2363 con->out_skip = con->out_kvec_bytes;
2364 con->out_kvec_is_msg = false;
2369 mutex_unlock(&con->mutex);
2373 * Revoke a message that we may be reading data into
2375 void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
2377 mutex_lock(&con->mutex);
2378 if (con->in_msg && con->in_msg == msg) {
2379 unsigned front_len = le32_to_cpu(con->in_hdr.front_len);
2380 unsigned middle_len = le32_to_cpu(con->in_hdr.middle_len);
2381 unsigned data_len = le32_to_cpu(con->in_hdr.data_len);
2383 /* skip rest of message */
2384 dout("con_revoke_pages %p msg %p revoked\n", con, msg);
2385 con->in_base_pos = con->in_base_pos -
2386 sizeof(struct ceph_msg_header) -
2390 sizeof(struct ceph_msg_footer);
2391 ceph_msg_put(con->in_msg);
2393 con->in_tag = CEPH_MSGR_TAG_READY;
2396 dout("con_revoke_pages %p msg %p pages %p no-op\n",
2397 con, con->in_msg, msg);
2399 mutex_unlock(&con->mutex);
2403 * Queue a keepalive byte to ensure the tcp connection is alive.
2405 void ceph_con_keepalive(struct ceph_connection *con)
2407 dout("con_keepalive %p\n", con);
2409 if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 &&
2410 test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2413 EXPORT_SYMBOL(ceph_con_keepalive);
2417 * construct a new message with given type, size
2418 * the new msg has a ref count of 1.
2420 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
2425 m = kmalloc(sizeof(*m), flags);
2428 kref_init(&m->kref);
2429 INIT_LIST_HEAD(&m->list_head);
2432 m->hdr.type = cpu_to_le16(type);
2433 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
2435 m->hdr.front_len = cpu_to_le32(front_len);
2436 m->hdr.middle_len = 0;
2437 m->hdr.data_len = 0;
2438 m->hdr.data_off = 0;
2439 m->hdr.reserved = 0;
2440 m->footer.front_crc = 0;
2441 m->footer.middle_crc = 0;
2442 m->footer.data_crc = 0;
2443 m->footer.flags = 0;
2444 m->front_alloc_len = front_len;
2445 m->front_is_vmalloc = false;
2446 m->more_to_follow = false;
2455 m->page_alignment = 0;
2465 if (front_len > PAGE_CACHE_SIZE) {
2466 m->front.iov_base = __vmalloc(front_len, flags,
2468 m->front_is_vmalloc = true;
2470 m->front.iov_base = kmalloc(front_len, flags);
2472 if (m->front.iov_base == NULL) {
2473 dout("ceph_msg_new can't allocate %d bytes\n",
2478 m->front.iov_base = NULL;
2480 m->front.iov_len = front_len;
2482 dout("ceph_msg_new %p front %d\n", m, front_len);
2489 pr_err("msg_new can't create type %d front %d\n", type,
2493 dout("msg_new can't create type %d front %d\n", type,
2498 EXPORT_SYMBOL(ceph_msg_new);
2501 * Allocate "middle" portion of a message, if it is needed and wasn't
2502 * allocated by alloc_msg. This allows us to read a small fixed-size
2503 * per-type header in the front and then gracefully fail (i.e.,
2504 * propagate the error to the caller based on info in the front) when
2505 * the middle is too large.
2507 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
2509 int type = le16_to_cpu(msg->hdr.type);
2510 int middle_len = le32_to_cpu(msg->hdr.middle_len);
2512 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
2513 ceph_msg_type_name(type), middle_len);
2514 BUG_ON(!middle_len);
2515 BUG_ON(msg->middle);
2517 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
2524 * Generic message allocator, for incoming messages.
2526 static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
2527 struct ceph_msg_header *hdr,
2530 int type = le16_to_cpu(hdr->type);
2531 int front_len = le32_to_cpu(hdr->front_len);
2532 int middle_len = le32_to_cpu(hdr->middle_len);
2533 struct ceph_msg *msg = NULL;
2536 if (con->ops->alloc_msg) {
2537 mutex_unlock(&con->mutex);
2538 msg = con->ops->alloc_msg(con, hdr, skip);
2539 mutex_lock(&con->mutex);
2545 msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
2547 pr_err("unable to allocate msg type %d len %d\n",
2551 msg->page_alignment = le16_to_cpu(hdr->data_off);
2553 memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
2555 if (middle_len && !msg->middle) {
2556 ret = ceph_alloc_middle(con, msg);
2568 * Free a generically kmalloc'd message.
2570 void ceph_msg_kfree(struct ceph_msg *m)
2572 dout("msg_kfree %p\n", m);
2573 if (m->front_is_vmalloc)
2574 vfree(m->front.iov_base);
2576 kfree(m->front.iov_base);
2581 * Drop a msg ref. Destroy as needed.
2583 void ceph_msg_last_put(struct kref *kref)
2585 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
2587 dout("ceph_msg_put last one on %p\n", m);
2588 WARN_ON(!list_empty(&m->list_head));
2590 /* drop middle, data, if any */
2592 ceph_buffer_put(m->middle);
2599 ceph_pagelist_release(m->pagelist);
2607 ceph_msgpool_put(m->pool, m);
2611 EXPORT_SYMBOL(ceph_msg_last_put);
2613 void ceph_msg_dump(struct ceph_msg *msg)
2615 pr_debug("msg_dump %p (front_alloc_len %d nr_pages %d)\n", msg,
2616 msg->front_alloc_len, msg->nr_pages);
2617 print_hex_dump(KERN_DEBUG, "header: ",
2618 DUMP_PREFIX_OFFSET, 16, 1,
2619 &msg->hdr, sizeof(msg->hdr), true);
2620 print_hex_dump(KERN_DEBUG, " front: ",
2621 DUMP_PREFIX_OFFSET, 16, 1,
2622 msg->front.iov_base, msg->front.iov_len, true);
2624 print_hex_dump(KERN_DEBUG, "middle: ",
2625 DUMP_PREFIX_OFFSET, 16, 1,
2626 msg->middle->vec.iov_base,
2627 msg->middle->vec.iov_len, true);
2628 print_hex_dump(KERN_DEBUG, "footer: ",
2629 DUMP_PREFIX_OFFSET, 16, 1,
2630 &msg->footer, sizeof(msg->footer), true);
2632 EXPORT_SYMBOL(ceph_msg_dump);