1 #include "ceph_debug.h"
3 #include <linux/types.h>
4 #include <linux/random.h>
5 #include <linux/sched.h>
7 #include "mon_client.h"
12 * Interact with Ceph monitor cluster. Handle requests for new map
13 * versions, and periodically resend as needed. Also implement
14 * statfs() and umount().
16 * A small cluster of Ceph "monitors" are responsible for managing critical
17 * cluster configuration and state information. An odd number (e.g., 3, 5)
18 * of cmon daemons use a modified version of the Paxos part-time parliament
19 * algorithm to manage the MDS map (mds cluster membership), OSD map, and
20 * list of clients who have mounted the file system.
22 * We maintain an open, active session with a monitor at all times in order to
23 * receive timely MDSMap updates. We periodically send a keepalive byte on the
24 * TCP socket to ensure we detect a failure. If the connection does break, we
25 * randomly hunt for a new monitor. Once the connection is reestablished, we
26 * resend any outstanding requests.
29 const static struct ceph_connection_operations mon_con_ops;
32 * Decode a monmap blob (e.g., during mount).
34 struct ceph_monmap *ceph_monmap_decode(void *p, void *end)
36 struct ceph_monmap *m = NULL;
38 struct ceph_fsid fsid;
42 dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p));
44 ceph_decode_16_safe(&p, end, version, bad);
46 ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad);
47 ceph_decode_copy(&p, &fsid, sizeof(fsid));
48 epoch = ceph_decode_32(&p);
50 num_mon = ceph_decode_32(&p);
51 ceph_decode_need(&p, end, num_mon*sizeof(m->mon_inst[0]), bad);
53 if (num_mon >= CEPH_MAX_MON)
55 m = kmalloc(sizeof(*m) + sizeof(m->mon_inst[0])*num_mon, GFP_NOFS);
57 return ERR_PTR(-ENOMEM);
61 ceph_decode_copy(&p, m->mon_inst, num_mon*sizeof(m->mon_inst[0]));
63 dout("monmap_decode epoch %d, num_mon %d\n", m->epoch,
65 for (i = 0; i < m->num_mon; i++)
66 dout("monmap_decode mon%d is %s\n", i,
67 pr_addr(&m->mon_inst[i].addr.in_addr));
71 dout("monmap_decode failed with %d\n", err);
77 * return true if *addr is included in the monmap.
79 int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr)
83 for (i = 0; i < m->num_mon; i++)
84 if (ceph_entity_addr_equal(addr, &m->mon_inst[i].addr))
90 * Close monitor session, if any.
92 static void __close_session(struct ceph_mon_client *monc)
95 dout("__close_session closing mon%d\n", monc->cur_mon);
96 ceph_con_close(monc->con);
102 * Open a session with a (new) monitor.
104 static int __open_session(struct ceph_mon_client *monc)
108 if (monc->cur_mon < 0) {
109 get_random_bytes(&r, 1);
110 monc->cur_mon = r % monc->monmap->num_mon;
111 dout("open_session num=%d r=%d -> mon%d\n",
112 monc->monmap->num_mon, r, monc->cur_mon);
114 monc->sub_renew_after = jiffies; /* i.e., expired */
115 monc->want_next_osdmap = !!monc->want_next_osdmap;
117 dout("open_session mon%d opening\n", monc->cur_mon);
118 monc->con->peer_name.type = CEPH_ENTITY_TYPE_MON;
119 monc->con->peer_name.num = cpu_to_le64(monc->cur_mon);
120 ceph_con_open(monc->con,
121 &monc->monmap->mon_inst[monc->cur_mon].addr);
123 dout("open_session mon%d already open\n", monc->cur_mon);
128 static bool __sub_expired(struct ceph_mon_client *monc)
130 return time_after_eq(jiffies, monc->sub_renew_after);
134 * Reschedule delayed work timer.
136 static void __schedule_delayed(struct ceph_mon_client *monc)
140 if (monc->cur_mon < 0 || monc->want_mount || __sub_expired(monc))
144 dout("__schedule_delayed after %u\n", delay);
145 schedule_delayed_work(&monc->delayed_work, delay);
149 * Send subscribe request for mdsmap and/or osdmap.
151 static void __send_subscribe(struct ceph_mon_client *monc)
153 dout("__send_subscribe sub_sent=%u exp=%u want_osd=%d\n",
154 (unsigned)monc->sub_sent, __sub_expired(monc),
155 monc->want_next_osdmap);
156 if ((__sub_expired(monc) && !monc->sub_sent) ||
157 monc->want_next_osdmap == 1) {
158 struct ceph_msg *msg;
159 struct ceph_mon_subscribe_item *i;
162 msg = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 64, 0, 0, NULL);
166 p = msg->front.iov_base;
167 end = p + msg->front.iov_len;
169 dout("__send_subscribe to 'mdsmap' %u+\n",
170 (unsigned)monc->have_mdsmap);
171 if (monc->want_next_osdmap) {
172 dout("__send_subscribe to 'osdmap' %u\n",
173 (unsigned)monc->have_osdmap);
174 ceph_encode_32(&p, 2);
175 ceph_encode_string(&p, end, "osdmap", 6);
177 i->have = cpu_to_le64(monc->have_osdmap);
180 monc->want_next_osdmap = 2; /* requested */
182 ceph_encode_32(&p, 1);
184 ceph_encode_string(&p, end, "mdsmap", 6);
186 i->have = cpu_to_le64(monc->have_mdsmap);
190 msg->front.iov_len = p - msg->front.iov_base;
191 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
192 ceph_con_send(monc->con, msg);
194 monc->sub_sent = jiffies | 1; /* never 0 */
198 static void handle_subscribe_ack(struct ceph_mon_client *monc,
199 struct ceph_msg *msg)
202 struct ceph_mon_subscribe_ack *h = msg->front.iov_base;
204 if (msg->front.iov_len < sizeof(*h))
206 seconds = le32_to_cpu(h->duration);
208 mutex_lock(&monc->mutex);
210 pr_info("mon%d %s session established\n",
211 monc->cur_mon, pr_addr(&monc->con->peer_addr.in_addr));
212 monc->hunting = false;
214 dout("handle_subscribe_ack after %d seconds\n", seconds);
215 monc->sub_renew_after = monc->sub_sent + (seconds >> 1)*HZ - 1;
217 mutex_unlock(&monc->mutex);
220 pr_err("got corrupt subscribe-ack msg\n");
224 * Keep track of which maps we have
226 int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 got)
228 mutex_lock(&monc->mutex);
229 monc->have_mdsmap = got;
230 mutex_unlock(&monc->mutex);
234 int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 got)
236 mutex_lock(&monc->mutex);
237 monc->have_osdmap = got;
238 monc->want_next_osdmap = 0;
239 mutex_unlock(&monc->mutex);
244 * Register interest in the next osdmap
246 void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc)
248 dout("request_next_osdmap have %u\n", monc->have_osdmap);
249 mutex_lock(&monc->mutex);
250 if (!monc->want_next_osdmap)
251 monc->want_next_osdmap = 1;
252 if (monc->want_next_osdmap < 2)
253 __send_subscribe(monc);
254 mutex_unlock(&monc->mutex);
261 static void __request_mount(struct ceph_mon_client *monc)
263 struct ceph_msg *msg;
264 struct ceph_client_mount *h;
267 dout("__request_mount\n");
268 err = __open_session(monc);
271 msg = ceph_msg_new(CEPH_MSG_CLIENT_MOUNT, sizeof(*h), 0, 0, NULL);
274 h = msg->front.iov_base;
275 h->monhdr.have_version = 0;
276 h->monhdr.session_mon = cpu_to_le16(-1);
277 h->monhdr.session_mon_tid = 0;
278 ceph_con_send(monc->con, msg);
281 int ceph_monc_request_mount(struct ceph_mon_client *monc)
284 monc->con = kmalloc(sizeof(*monc->con), GFP_KERNEL);
287 ceph_con_init(monc->client->msgr, monc->con);
288 monc->con->private = monc;
289 monc->con->ops = &mon_con_ops;
292 mutex_lock(&monc->mutex);
293 __request_mount(monc);
294 __schedule_delayed(monc);
295 mutex_unlock(&monc->mutex);
300 * The monitor responds with mount ack indicate mount success. The
301 * included client ticket allows the client to talk to MDSs and OSDs.
303 static void handle_mount_ack(struct ceph_mon_client *monc, struct ceph_msg *msg)
305 struct ceph_client *client = monc->client;
306 struct ceph_monmap *monmap = NULL, *old = monc->monmap;
313 if (client->whoami >= 0) {
314 dout("handle_mount_ack - already mounted\n");
318 mutex_lock(&monc->mutex);
320 dout("handle_mount_ack\n");
321 p = msg->front.iov_base;
322 end = p + msg->front.iov_len;
324 ceph_decode_64_safe(&p, end, cnum, bad);
325 ceph_decode_32_safe(&p, end, result, bad);
326 ceph_decode_32_safe(&p, end, len, bad);
328 pr_err("mount denied: %.*s (%d)\n", len, (char *)p,
335 ceph_decode_32_safe(&p, end, len, bad);
336 ceph_decode_need(&p, end, len, bad);
337 monmap = ceph_monmap_decode(p, p + len);
338 if (IS_ERR(monmap)) {
339 pr_err("problem decoding monmap, %d\n",
340 (int)PTR_ERR(monmap));
346 client->monc.monmap = monmap;
349 client->signed_ticket = NULL;
350 client->signed_ticket_len = 0;
352 monc->want_mount = false;
354 client->whoami = cnum;
355 client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT;
356 client->msgr->inst.name.num = cpu_to_le64(cnum);
357 pr_info("client%lld fsid " FSID_FORMAT "\n",
358 client->whoami, PR_FSID(&client->monc.monmap->fsid));
360 ceph_debugfs_client_init(client);
361 __send_subscribe(monc);
367 pr_err("error decoding mount_ack message\n");
369 client->mount_err = err;
370 mutex_unlock(&monc->mutex);
371 wake_up(&client->mount_wq);
380 static void handle_statfs_reply(struct ceph_mon_client *monc,
381 struct ceph_msg *msg)
383 struct ceph_mon_statfs_request *req;
384 struct ceph_mon_statfs_reply *reply = msg->front.iov_base;
387 if (msg->front.iov_len != sizeof(*reply))
389 tid = le64_to_cpu(reply->tid);
390 dout("handle_statfs_reply %p tid %llu\n", msg, tid);
392 mutex_lock(&monc->mutex);
393 req = radix_tree_lookup(&monc->statfs_request_tree, tid);
395 *req->buf = reply->st;
398 mutex_unlock(&monc->mutex);
400 complete(&req->completion);
404 pr_err("corrupt statfs reply, no tid\n");
408 * (re)send a statfs request
410 static int send_statfs(struct ceph_mon_client *monc,
411 struct ceph_mon_statfs_request *req)
413 struct ceph_msg *msg;
414 struct ceph_mon_statfs *h;
417 dout("send_statfs tid %llu\n", req->tid);
418 err = __open_session(monc);
421 msg = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), 0, 0, NULL);
425 h = msg->front.iov_base;
426 h->monhdr.have_version = 0;
427 h->monhdr.session_mon = cpu_to_le16(-1);
428 h->monhdr.session_mon_tid = 0;
429 h->fsid = monc->monmap->fsid;
430 h->tid = cpu_to_le64(req->tid);
431 ceph_con_send(monc->con, msg);
436 * Do a synchronous statfs().
438 int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
440 struct ceph_mon_statfs_request req;
444 init_completion(&req.completion);
446 /* allocate memory for reply */
447 err = ceph_msgpool_resv(&monc->msgpool_statfs_reply, 1);
451 /* register request */
452 mutex_lock(&monc->mutex);
453 req.tid = ++monc->last_tid;
454 req.last_attempt = jiffies;
455 req.delay = BASE_DELAY_INTERVAL;
456 if (radix_tree_insert(&monc->statfs_request_tree, req.tid, &req) < 0) {
457 mutex_unlock(&monc->mutex);
458 pr_err("ENOMEM in do_statfs\n");
461 monc->num_statfs_requests++;
462 mutex_unlock(&monc->mutex);
464 /* send request and wait */
465 err = send_statfs(monc, &req);
467 err = wait_for_completion_interruptible(&req.completion);
469 mutex_lock(&monc->mutex);
470 radix_tree_delete(&monc->statfs_request_tree, req.tid);
471 monc->num_statfs_requests--;
472 ceph_msgpool_resv(&monc->msgpool_statfs_reply, -1);
473 mutex_unlock(&monc->mutex);
481 * Resend pending statfs requests.
483 static void __resend_statfs(struct ceph_mon_client *monc)
488 struct ceph_mon_statfs_request *req;
491 got = radix_tree_gang_lookup(&monc->statfs_request_tree,
497 next_tid = req->tid + 1;
499 send_statfs(monc, req);
504 * Delayed work. If we haven't mounted yet, retry. Otherwise,
505 * renew/retry subscription as needed (in case it is timing out, or we
506 * got an ENOMEM). And keep the monitor connection alive.
508 static void delayed_work(struct work_struct *work)
510 struct ceph_mon_client *monc =
511 container_of(work, struct ceph_mon_client, delayed_work.work);
513 dout("monc delayed_work\n");
514 mutex_lock(&monc->mutex);
515 if (monc->want_mount) {
516 __request_mount(monc);
519 __close_session(monc);
520 __open_session(monc); /* continue hunting */
522 ceph_con_keepalive(monc->con);
525 __send_subscribe(monc);
526 __schedule_delayed(monc);
527 mutex_unlock(&monc->mutex);
531 * On startup, we build a temporary monmap populated with the IPs
532 * provided by mount(2).
534 static int build_initial_monmap(struct ceph_mon_client *monc)
536 struct ceph_mount_args *args = monc->client->mount_args;
537 struct ceph_entity_addr *mon_addr = args->mon_addr;
538 int num_mon = args->num_mon;
541 /* build initial monmap */
542 monc->monmap = kzalloc(sizeof(*monc->monmap) +
543 num_mon*sizeof(monc->monmap->mon_inst[0]),
547 for (i = 0; i < num_mon; i++) {
548 monc->monmap->mon_inst[i].addr = mon_addr[i];
549 monc->monmap->mon_inst[i].addr.erank = 0;
550 monc->monmap->mon_inst[i].addr.nonce = 0;
551 monc->monmap->mon_inst[i].name.type =
552 CEPH_ENTITY_TYPE_MON;
553 monc->monmap->mon_inst[i].name.num = cpu_to_le64(i);
555 monc->monmap->num_mon = num_mon;
557 /* release addr memory */
558 kfree(args->mon_addr);
559 args->mon_addr = NULL;
564 int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
569 memset(monc, 0, sizeof(*monc));
572 mutex_init(&monc->mutex);
574 err = build_initial_monmap(monc);
581 err = ceph_msgpool_init(&monc->msgpool_mount_ack, 4096, 1, false);
584 err = ceph_msgpool_init(&monc->msgpool_subscribe_ack,
585 sizeof(struct ceph_mon_subscribe_ack), 1, false);
588 err = ceph_msgpool_init(&monc->msgpool_statfs_reply,
589 sizeof(struct ceph_mon_statfs_reply), 0, false);
594 monc->hunting = false; /* not really */
595 monc->sub_renew_after = jiffies;
598 INIT_DELAYED_WORK(&monc->delayed_work, delayed_work);
599 INIT_RADIX_TREE(&monc->statfs_request_tree, GFP_NOFS);
600 monc->num_statfs_requests = 0;
603 monc->have_mdsmap = 0;
604 monc->have_osdmap = 0;
605 monc->want_next_osdmap = 1;
606 monc->want_mount = true;
611 void ceph_monc_stop(struct ceph_mon_client *monc)
614 cancel_delayed_work_sync(&monc->delayed_work);
616 mutex_lock(&monc->mutex);
617 __close_session(monc);
619 monc->con->private = NULL;
620 monc->con->ops->put(monc->con);
623 mutex_unlock(&monc->mutex);
625 ceph_msgpool_destroy(&monc->msgpool_mount_ack);
626 ceph_msgpool_destroy(&monc->msgpool_subscribe_ack);
627 ceph_msgpool_destroy(&monc->msgpool_statfs_reply);
634 * handle incoming message
636 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
638 struct ceph_mon_client *monc = con->private;
639 int type = le16_to_cpu(msg->hdr.type);
645 case CEPH_MSG_CLIENT_MOUNT_ACK:
646 handle_mount_ack(monc, msg);
649 case CEPH_MSG_MON_SUBSCRIBE_ACK:
650 handle_subscribe_ack(monc, msg);
653 case CEPH_MSG_STATFS_REPLY:
654 handle_statfs_reply(monc, msg);
657 case CEPH_MSG_MDS_MAP:
658 ceph_mdsc_handle_map(&monc->client->mdsc, msg);
661 case CEPH_MSG_OSD_MAP:
662 ceph_osdc_handle_map(&monc->client->osdc, msg);
666 pr_err("received unknown message type %d %s\n", type,
667 ceph_msg_type_name(type));
673 * Allocate memory for incoming message
675 static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
676 struct ceph_msg_header *hdr)
678 struct ceph_mon_client *monc = con->private;
679 int type = le16_to_cpu(hdr->type);
680 int front = le32_to_cpu(hdr->front_len);
683 case CEPH_MSG_CLIENT_MOUNT_ACK:
684 return ceph_msgpool_get(&monc->msgpool_mount_ack, front);
685 case CEPH_MSG_MON_SUBSCRIBE_ACK:
686 return ceph_msgpool_get(&monc->msgpool_subscribe_ack, front);
687 case CEPH_MSG_STATFS_REPLY:
688 return ceph_msgpool_get(&monc->msgpool_statfs_reply, front);
690 return ceph_alloc_msg(con, hdr);
694 * If the monitor connection resets, pick a new monitor and resubmit
695 * any pending requests.
697 static void mon_fault(struct ceph_connection *con)
699 struct ceph_mon_client *monc = con->private;
705 mutex_lock(&monc->mutex);
709 if (monc->con && !monc->hunting)
710 pr_info("mon%d %s session lost, "
711 "hunting for new mon\n", monc->cur_mon,
712 pr_addr(&monc->con->peer_addr.in_addr));
714 __close_session(monc);
715 if (!monc->hunting) {
717 monc->hunting = true;
718 if (__open_session(monc) == 0) {
719 __send_subscribe(monc);
720 __resend_statfs(monc);
723 /* already hunting, let's wait a bit */
724 __schedule_delayed(monc);
727 mutex_unlock(&monc->mutex);
730 const static struct ceph_connection_operations mon_con_ops = {
733 .dispatch = dispatch,
735 .alloc_msg = mon_alloc_msg,
736 .alloc_middle = ceph_alloc_middle,