ceph: skip mds sync on forced unmount
[pandora-kernel.git] / fs / ceph / mds_client.c
index 2b19da3..0d451a8 100644 (file)
@@ -1,6 +1,7 @@
 #include "ceph_debug.h"
 
 #include <linux/wait.h>
+#include <linux/slab.h>
 #include <linux/sched.h>
 
 #include "mds_client.h"
@@ -8,6 +9,8 @@
 #include "super.h"
 #include "messenger.h"
 #include "decode.h"
+#include "auth.h"
+#include "pagelist.h"
 
 /*
  * A cluster of MDS (metadata server) daemons is responsible for
@@ -253,6 +256,7 @@ static const char *session_state_name(int s)
        case CEPH_MDS_SESSION_OPEN: return "open";
        case CEPH_MDS_SESSION_HUNG: return "hung";
        case CEPH_MDS_SESSION_CLOSING: return "closing";
+       case CEPH_MDS_SESSION_RESTARTING: return "restarting";
        case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
        default: return "???";
        }
@@ -275,7 +279,9 @@ void ceph_put_mds_session(struct ceph_mds_session *s)
        dout("mdsc put_session %p %d -> %d\n", s,
             atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
        if (atomic_dec_and_test(&s->s_ref)) {
-               ceph_con_shutdown(&s->s_con);
+               if (s->s_authorizer)
+                       s->s_mdsc->client->monc.auth->ops->destroy_authorizer(
+                               s->s_mdsc->client->monc.auth, s->s_authorizer);
                kfree(s);
        }
 }
@@ -304,6 +310,15 @@ static bool __have_session(struct ceph_mds_client *mdsc, int mds)
        return mdsc->sessions[mds];
 }
 
+static int __verify_registered_session(struct ceph_mds_client *mdsc,
+                                      struct ceph_mds_session *s)
+{
+       if (s->s_mds >= mdsc->max_sessions ||
+           mdsc->sessions[s->s_mds] != s)
+               return -ENOENT;
+       return 0;
+}
+
 /*
  * create+register a new session for given mds.
  * called under mdsc->mutex.
@@ -314,6 +329,8 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
        struct ceph_mds_session *s;
 
        s = kzalloc(sizeof(*s), GFP_NOFS);
+       if (!s)
+               return ERR_PTR(-ENOMEM);
        s->s_mdsc = mdsc;
        s->s_mds = mds;
        s->s_state = CEPH_MDS_SESSION_NEW;
@@ -326,7 +343,6 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
        s->s_con.ops = &mds_con_ops;
        s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS;
        s->s_con.peer_name.num = cpu_to_le64(mds);
-       ceph_con_open(&s->s_con, ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
 
        spin_lock_init(&s->s_cap_lock);
        s->s_cap_gen = 0;
@@ -335,10 +351,12 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
        s->s_renew_seq = 0;
        INIT_LIST_HEAD(&s->s_caps);
        s->s_nr_caps = 0;
+       s->s_trim_caps = 0;
        atomic_set(&s->s_ref, 1);
        INIT_LIST_HEAD(&s->s_waiting);
        INIT_LIST_HEAD(&s->s_unsafe);
        s->s_num_cap_releases = 0;
+       s->s_cap_iterator = NULL;
        INIT_LIST_HEAD(&s->s_cap_releases);
        INIT_LIST_HEAD(&s->s_cap_releases_done);
        INIT_LIST_HEAD(&s->s_cap_flushing);
@@ -352,7 +370,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
                dout("register_session realloc to %d\n", newmax);
                sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
                if (sa == NULL)
-                       return ERR_PTR(-ENOMEM);
+                       goto fail_realloc;
                if (mdsc->sessions) {
                        memcpy(sa, mdsc->sessions,
                               mdsc->max_sessions * sizeof(void *));
@@ -363,17 +381,27 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
        }
        mdsc->sessions[mds] = s;
        atomic_inc(&s->s_ref);  /* one ref to sessions[], one to caller */
+
+       ceph_con_open(&s->s_con, ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
+
        return s;
+
+fail_realloc:
+       kfree(s);
+       return ERR_PTR(-ENOMEM);
 }
 
 /*
  * called under mdsc->mutex
  */
-static void unregister_session(struct ceph_mds_client *mdsc, int mds)
+static void __unregister_session(struct ceph_mds_client *mdsc,
+                              struct ceph_mds_session *s)
 {
-       dout("unregister_session mds%d %p\n", mds, mdsc->sessions[mds]);
-       ceph_put_mds_session(mdsc->sessions[mds]);
-       mdsc->sessions[mds] = NULL;
+       dout("__unregister_session mds%d %p\n", s->s_mds, s);
+       BUG_ON(mdsc->sessions[s->s_mds] != s);
+       mdsc->sessions[s->s_mds] = NULL;
+       ceph_con_close(&s->s_con);
+       ceph_put_mds_session(s);
 }
 
 /*
@@ -389,41 +417,40 @@ static void put_request_session(struct ceph_mds_request *req)
        }
 }
 
-void ceph_mdsc_put_request(struct ceph_mds_request *req)
+void ceph_mdsc_release_request(struct kref *kref)
 {
-       dout("mdsc put_request %p %d -> %d\n", req,
-            atomic_read(&req->r_ref), atomic_read(&req->r_ref)-1);
-       if (atomic_dec_and_test(&req->r_ref)) {
-               if (req->r_request)
-                       ceph_msg_put(req->r_request);
-               if (req->r_reply) {
-                       ceph_msg_put(req->r_reply);
-                       destroy_reply_info(&req->r_reply_info);
-               }
-               if (req->r_inode) {
-                       ceph_put_cap_refs(ceph_inode(req->r_inode),
-                                         CEPH_CAP_PIN);
-                       iput(req->r_inode);
-               }
-               if (req->r_locked_dir)
-                       ceph_put_cap_refs(ceph_inode(req->r_locked_dir),
-                                         CEPH_CAP_PIN);
-               if (req->r_target_inode)
-                       iput(req->r_target_inode);
-               if (req->r_dentry)
-                       dput(req->r_dentry);
-               if (req->r_old_dentry) {
-                       ceph_put_cap_refs(
-                            ceph_inode(req->r_old_dentry->d_parent->d_inode),
-                            CEPH_CAP_PIN);
-                       dput(req->r_old_dentry);
-               }
-               kfree(req->r_path1);
-               kfree(req->r_path2);
-               put_request_session(req);
-               ceph_unreserve_caps(&req->r_caps_reservation);
-               kfree(req);
+       struct ceph_mds_request *req = container_of(kref,
+                                                   struct ceph_mds_request,
+                                                   r_kref);
+       if (req->r_request)
+               ceph_msg_put(req->r_request);
+       if (req->r_reply) {
+               ceph_msg_put(req->r_reply);
+               destroy_reply_info(&req->r_reply_info);
        }
+       if (req->r_inode) {
+               ceph_put_cap_refs(ceph_inode(req->r_inode),
+                                 CEPH_CAP_PIN);
+               iput(req->r_inode);
+       }
+       if (req->r_locked_dir)
+               ceph_put_cap_refs(ceph_inode(req->r_locked_dir),
+                                 CEPH_CAP_PIN);
+       if (req->r_target_inode)
+               iput(req->r_target_inode);
+       if (req->r_dentry)
+               dput(req->r_dentry);
+       if (req->r_old_dentry) {
+               ceph_put_cap_refs(
+                       ceph_inode(req->r_old_dentry->d_parent->d_inode),
+                       CEPH_CAP_PIN);
+               dput(req->r_old_dentry);
+       }
+       kfree(req->r_path1);
+       kfree(req->r_path2);
+       put_request_session(req);
+       ceph_unreserve_caps(&req->r_caps_reservation);
+       kfree(req);
 }
 
 /*
@@ -435,10 +462,42 @@ static struct ceph_mds_request *__lookup_request(struct ceph_mds_client *mdsc,
                                             u64 tid)
 {
        struct ceph_mds_request *req;
-       req = radix_tree_lookup(&mdsc->request_tree, tid);
-       if (req)
-               ceph_mdsc_get_request(req);
-       return req;
+       struct rb_node *n = mdsc->request_tree.rb_node;
+
+       while (n) {
+               req = rb_entry(n, struct ceph_mds_request, r_node);
+               if (tid < req->r_tid)
+                       n = n->rb_left;
+               else if (tid > req->r_tid)
+                       n = n->rb_right;
+               else {
+                       ceph_mdsc_get_request(req);
+                       return req;
+               }
+       }
+       return NULL;
+}
+
+static void __insert_request(struct ceph_mds_client *mdsc,
+                            struct ceph_mds_request *new)
+{
+       struct rb_node **p = &mdsc->request_tree.rb_node;
+       struct rb_node *parent = NULL;
+       struct ceph_mds_request *req = NULL;
+
+       while (*p) {
+               parent = *p;
+               req = rb_entry(parent, struct ceph_mds_request, r_node);
+               if (new->r_tid < req->r_tid)
+                       p = &(*p)->rb_left;
+               else if (new->r_tid > req->r_tid)
+                       p = &(*p)->rb_right;
+               else
+                       BUG();
+       }
+
+       rb_link_node(&new->r_node, parent, p);
+       rb_insert_color(&new->r_node, &mdsc->request_tree);
 }
 
 /*
@@ -456,7 +515,7 @@ static void __register_request(struct ceph_mds_client *mdsc,
                ceph_reserve_caps(&req->r_caps_reservation, req->r_num_caps);
        dout("__register_request %p tid %lld\n", req, req->r_tid);
        ceph_mdsc_get_request(req);
-       radix_tree_insert(&mdsc->request_tree, req->r_tid, (void *)req);
+       __insert_request(mdsc, req);
 
        if (dir) {
                struct ceph_inode_info *ci = ceph_inode(dir);
@@ -472,8 +531,8 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
                                 struct ceph_mds_request *req)
 {
        dout("__unregister_request %p tid %lld\n", req, req->r_tid);
-       radix_tree_delete(&mdsc->request_tree, req->r_tid);
-       ceph_mdsc_put_request(req);
+       rb_erase(&req->r_node, &mdsc->request_tree);
+       RB_CLEAR_NODE(&req->r_node);
 
        if (req->r_unsafe_dir) {
                struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
@@ -482,6 +541,8 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
                list_del_init(&req->r_unsafe_dir_item);
                spin_unlock(&ci->i_unsafe_lock);
        }
+
+       ceph_mdsc_put_request(req);
 }
 
 /*
@@ -604,10 +665,10 @@ static struct ceph_msg *create_session_msg(u32 op, u64 seq)
        struct ceph_msg *msg;
        struct ceph_mds_session_head *h;
 
-       msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), 0, 0, NULL);
-       if (IS_ERR(msg)) {
+       msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h));
+       if (!msg) {
                pr_err("create_session_msg ENOMEM creating msg\n");
-               return ERR_PTR(PTR_ERR(msg));
+               return NULL;
        }
        h = msg->front.iov_base;
        h->op = cpu_to_le32(op);
@@ -626,7 +687,6 @@ static int __open_session(struct ceph_mds_client *mdsc,
        struct ceph_msg *msg;
        int mstate;
        int mds = session->s_mds;
-       int err = 0;
 
        /* wait for mds to go active? */
        mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
@@ -637,13 +697,9 @@ static int __open_session(struct ceph_mds_client *mdsc,
 
        /* send connect message */
        msg = create_session_msg(CEPH_SESSION_REQUEST_OPEN, session->s_seq);
-       if (IS_ERR(msg)) {
-               err = PTR_ERR(msg);
-               goto out;
-       }
+       if (!msg)
+               return -ENOMEM;
        ceph_con_send(&session->s_con, msg);
-
-out:
        return 0;
 }
 
@@ -675,34 +731,71 @@ static void cleanup_cap_releases(struct ceph_mds_session *session)
 }
 
 /*
- * Helper to safely iterate over all caps associated with a session.
+ * Helper to safely iterate over all caps associated with a session, with
+ * special care taken to handle a racing __ceph_remove_cap().
  *
- * caller must hold session s_mutex
+ * Caller must hold session s_mutex.
  */
 static int iterate_session_caps(struct ceph_mds_session *session,
                                 int (*cb)(struct inode *, struct ceph_cap *,
                                            void *), void *arg)
 {
-       struct ceph_cap *cap, *ncap;
-       struct inode *inode;
+       struct list_head *p;
+       struct ceph_cap *cap;
+       struct inode *inode, *last_inode = NULL;
+       struct ceph_cap *old_cap = NULL;
        int ret;
 
        dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
        spin_lock(&session->s_cap_lock);
-       list_for_each_entry_safe(cap, ncap, &session->s_caps, session_caps) {
+       p = session->s_caps.next;
+       while (p != &session->s_caps) {
+               cap = list_entry(p, struct ceph_cap, session_caps);
                inode = igrab(&cap->ci->vfs_inode);
-               if (!inode)
+               if (!inode) {
+                       p = p->next;
                        continue;
+               }
+               session->s_cap_iterator = cap;
                spin_unlock(&session->s_cap_lock);
+
+               if (last_inode) {
+                       iput(last_inode);
+                       last_inode = NULL;
+               }
+               if (old_cap) {
+                       ceph_put_cap(old_cap);
+                       old_cap = NULL;
+               }
+
                ret = cb(inode, cap, arg);
-               iput(inode);
-               if (ret < 0)
-                       return ret;
+               last_inode = inode;
+
                spin_lock(&session->s_cap_lock);
+               p = p->next;
+               if (cap->ci == NULL) {
+                       dout("iterate_session_caps  finishing cap %p removal\n",
+                            cap);
+                       BUG_ON(cap->session != session);
+                       list_del_init(&cap->session_caps);
+                       session->s_nr_caps--;
+                       cap->session = NULL;
+                       old_cap = cap;  /* put_cap it w/o locks held */
+               }
+               if (ret < 0)
+                       goto out;
        }
+       ret = 0;
+out:
+       session->s_cap_iterator = NULL;
        spin_unlock(&session->s_cap_lock);
 
-       return 0;
+       if (last_inode)
+               iput(last_inode);
+       if (old_cap)
+               ceph_put_cap(old_cap);
+
+       return ret;
 }
 
 static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
@@ -735,24 +828,24 @@ static void remove_session_caps(struct ceph_mds_session *session)
 static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
                              void *arg)
 {
-       struct ceph_mds_session *session = arg;
+       struct ceph_inode_info *ci = ceph_inode(inode);
 
-       spin_lock(&inode->i_lock);
-       if (cap->gen != session->s_cap_gen) {
-               pr_err("failed reconnect %p %llx.%llx cap %p "
-                      "(gen %d < session %d)\n", inode, ceph_vinop(inode),
-                      cap, cap->gen, session->s_cap_gen);
-               __ceph_remove_cap(cap, NULL);
+       wake_up(&ci->i_cap_wq);
+       if (arg) {
+               spin_lock(&inode->i_lock);
+               ci->i_wanted_max_size = 0;
+               ci->i_requested_max_size = 0;
+               spin_unlock(&inode->i_lock);
        }
-       wake_up(&ceph_inode(inode)->i_cap_wq);
-       spin_unlock(&inode->i_lock);
        return 0;
 }
 
-static void wake_up_session_caps(struct ceph_mds_session *session)
+static void wake_up_session_caps(struct ceph_mds_session *session,
+                                int reconnect)
 {
        dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
-       iterate_session_caps(session, wake_up_session_cb, session);
+       iterate_session_caps(session, wake_up_session_cb,
+                            (void *)(unsigned long)reconnect);
 }
 
 /*
@@ -770,6 +863,7 @@ static int send_renew_caps(struct ceph_mds_client *mdsc,
        if (time_after_eq(jiffies, session->s_cap_ttl) &&
            time_after_eq(session->s_cap_ttl, session->s_renew_requested))
                pr_info("mds%d caps stale\n", session->s_mds);
+       session->s_renew_requested = jiffies;
 
        /* do not try to renew caps until a recovering mds has reconnected
         * with its clients. */
@@ -782,17 +876,18 @@ static int send_renew_caps(struct ceph_mds_client *mdsc,
 
        dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
                ceph_mds_state_name(state));
-       session->s_renew_requested = jiffies;
        msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
                                 ++session->s_renew_seq);
-       if (IS_ERR(msg))
-               return PTR_ERR(msg);
+       if (!msg)
+               return -ENOMEM;
        ceph_con_send(&session->s_con, msg);
        return 0;
 }
 
 /*
  * Note new cap ttl, and any transition from stale -> not stale (fresh?).
+ *
+ * Called under session->s_mutex
  */
 static void renewed_caps(struct ceph_mds_client *mdsc,
                         struct ceph_mds_session *session, int is_renew)
@@ -821,7 +916,7 @@ static void renewed_caps(struct ceph_mds_client *mdsc,
        spin_unlock(&session->s_cap_lock);
 
        if (wake)
-               wake_up_session_caps(session);
+               wake_up_session_caps(session, 0);
 }
 
 /*
@@ -831,17 +926,15 @@ static int request_close_session(struct ceph_mds_client *mdsc,
                                 struct ceph_mds_session *session)
 {
        struct ceph_msg *msg;
-       int err = 0;
 
        dout("request_close_session mds%d state %s seq %lld\n",
             session->s_mds, session_state_name(session->s_state),
             session->s_seq);
        msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
-       if (IS_ERR(msg))
-               err = PTR_ERR(msg);
-       else
-               ceph_con_send(&session->s_con, msg);
-       return err;
+       if (!msg)
+               return -ENOMEM;
+       ceph_con_send(&session->s_con, msg);
+       return 0;
 }
 
 /*
@@ -891,7 +984,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
        session->s_trim_caps--;
        if (oissued) {
                /* we aren't the only cap.. just remove us */
-               __ceph_remove_cap(cap, NULL);
+               __ceph_remove_cap(cap);
        } else {
                /* try to drop referring dentries */
                spin_unlock(&inode->i_lock);
@@ -923,6 +1016,7 @@ static int trim_caps(struct ceph_mds_client *mdsc,
                dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
                     session->s_mds, session->s_nr_caps, max_caps,
                        trim_caps - session->s_trim_caps);
+               session->s_trim_caps = 0;
        }
        return 0;
 }
@@ -943,7 +1037,7 @@ static int add_cap_releases(struct ceph_mds_client *mdsc,
        int err = -ENOMEM;
 
        if (extra < 0)
-               extra = mdsc->client->mount_args.cap_release_safety;
+               extra = mdsc->client->mount_args->cap_release_safety;
 
        spin_lock(&session->s_cap_lock);
 
@@ -957,8 +1051,7 @@ static int add_cap_releases(struct ceph_mds_client *mdsc,
 
        while (session->s_num_cap_releases < session->s_nr_caps + extra) {
                spin_unlock(&session->s_cap_lock);
-               msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE,
-                                  0, 0, NULL);
+               msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE);
                if (!msg)
                        goto out_unlocked;
                dout("add_cap_releases %p msg %p now %d\n", session, msg,
@@ -1080,11 +1173,12 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
        if (!req)
                return ERR_PTR(-ENOMEM);
 
+       mutex_init(&req->r_fill_mutex);
        req->r_started = jiffies;
        req->r_resend_mds = -1;
        INIT_LIST_HEAD(&req->r_unsafe_dir_item);
        req->r_fmode = -1;
-       atomic_set(&req->r_ref, 1);  /* one for request_tree, one for caller */
+       kref_init(&req->r_kref);
        INIT_LIST_HEAD(&req->r_wait);
        init_completion(&req->r_completion);
        init_completion(&req->r_safe_completion);
@@ -1096,17 +1190,25 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
 }
 
 /*
- * return oldest (lowest) tid in request tree, 0 if none.
+ * return oldest (lowest) request, tid in request tree, 0 if none.
  *
  * called under mdsc->mutex.
  */
+static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
+{
+       if (RB_EMPTY_ROOT(&mdsc->request_tree))
+               return NULL;
+       return rb_entry(rb_first(&mdsc->request_tree),
+                       struct ceph_mds_request, r_node);
+}
+
 static u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
 {
-       struct ceph_mds_request *first;
-       if (radix_tree_gang_lookup(&mdsc->request_tree,
-                                  (void **)&first, 0, 1) <= 0)
-               return 0;
-       return first->r_tid;
+       struct ceph_mds_request *req = __get_oldest_req(mdsc);
+
+       if (req)
+               return req->r_tid;
+       return 0;
 }
 
 /*
@@ -1158,7 +1260,7 @@ retry:
                struct inode *inode = temp->d_inode;
 
                if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
-                       dout("build_path_dentry path+%d: %p SNAPDIR\n",
+                       dout("build_path path+%d: %p SNAPDIR\n",
                             pos, temp);
                } else if (stop_on_nosnap && inode &&
                           ceph_snap(inode) == CEPH_NOSNAP) {
@@ -1169,20 +1271,18 @@ retry:
                                break;
                        strncpy(path + pos, temp->d_name.name,
                                temp->d_name.len);
-                       dout("build_path_dentry path+%d: %p '%.*s'\n",
-                            pos, temp, temp->d_name.len, path + pos);
                }
                if (pos)
                        path[--pos] = '/';
                temp = temp->d_parent;
                if (temp == NULL) {
-                       pr_err("build_path_dentry corrupt dentry\n");
+                       pr_err("build_path corrupt dentry\n");
                        kfree(path);
                        return ERR_PTR(-EINVAL);
                }
        }
        if (pos != 0) {
-               pr_err("build_path_dentry did not end path lookup where "
+               pr_err("build_path did not end path lookup where "
                       "expected, namelen is %d, pos is %d\n", len, pos);
                /* presumably this is only possible if racing with a
                   rename of one of the parent directories (we can not
@@ -1194,7 +1294,7 @@ retry:
 
        *base = ceph_ino(temp->d_inode);
        *plen = len;
-       dout("build_path_dentry on %p %d built %llx '%.*s'\n",
+       dout("build_path on %p %d built %llx '%.*s'\n",
             dentry, atomic_read(&dentry->d_count), *base, len, path);
        return path;
 }
@@ -1306,7 +1406,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
        }
 
        len = sizeof(*head) +
-               pathlen1 + pathlen2 + 2*(sizeof(u32) + sizeof(u64));
+               pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64));
 
        /* calculate (max) length for cap releases */
        len += sizeof(struct ceph_mds_request_release) *
@@ -1317,9 +1417,13 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
        if (req->r_old_dentry_drop)
                len += req->r_old_dentry->d_name.len;
 
-       msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, 0, 0, NULL);
-       if (IS_ERR(msg))
+       msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len);
+       if (!msg) {
+               msg = ERR_PTR(-ENOMEM);
                goto out_free2;
+       }
+
+       msg->hdr.tid = cpu_to_le64(req->r_tid);
 
        head = msg->front.iov_base;
        p = msg->front.iov_base + sizeof(*head);
@@ -1406,14 +1510,13 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc,
        }
        msg = create_request_message(mdsc, req, mds);
        if (IS_ERR(msg)) {
-               req->r_reply = ERR_PTR(PTR_ERR(msg));
+               req->r_err = PTR_ERR(msg);
                complete_request(mdsc, req);
-               return -PTR_ERR(msg);
+               return PTR_ERR(msg);
        }
        req->r_request = msg;
 
        rhead = msg->front.iov_base;
-       rhead->tid = cpu_to_le64(req->r_tid);
        rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
        if (req->r_got_unsafe)
                flags |= CEPH_MDS_FLAG_REPLAY;
@@ -1442,7 +1545,7 @@ static int __do_request(struct ceph_mds_client *mdsc,
        int mds = -1;
        int err = -EAGAIN;
 
-       if (req->r_reply)
+       if (req->r_err || req->r_got_result)
                goto out;
 
        if (req->r_timeout &&
@@ -1462,8 +1565,13 @@ static int __do_request(struct ceph_mds_client *mdsc,
 
        /* get, open session */
        session = __ceph_lookup_mds_session(mdsc, mds);
-       if (!session)
+       if (!session) {
                session = register_session(mdsc, mds);
+               if (IS_ERR(session)) {
+                       err = PTR_ERR(session);
+                       goto finish;
+               }
+       }
        dout("do_request mds%d session %p state %s\n", mds, session,
             session_state_name(session->s_state));
        if (session->s_state != CEPH_MDS_SESSION_OPEN &&
@@ -1494,7 +1602,7 @@ out:
        return err;
 
 finish:
-       req->r_reply = ERR_PTR(err);
+       req->r_err = err;
        complete_request(mdsc, req);
        goto out;
 }
@@ -1520,26 +1628,19 @@ static void __wake_requests(struct ceph_mds_client *mdsc,
  */
 static void kick_requests(struct ceph_mds_client *mdsc, int mds, int all)
 {
-       struct ceph_mds_request *reqs[10];
-       u64 nexttid = 0;
-       int i, got;
+       struct ceph_mds_request *req;
+       struct rb_node *p;
 
        dout("kick_requests mds%d\n", mds);
-       while (nexttid <= mdsc->last_tid) {
-               got = radix_tree_gang_lookup(&mdsc->request_tree,
-                                            (void **)&reqs, nexttid, 10);
-               if (got == 0)
-                       break;
-               nexttid = reqs[got-1]->r_tid + 1;
-               for (i = 0; i < got; i++) {
-                       if (reqs[i]->r_got_unsafe)
-                               continue;
-                       if (reqs[i]->r_session &&
-                           reqs[i]->r_session->s_mds == mds) {
-                               dout(" kicking tid %llu\n", reqs[i]->r_tid);
-                               put_request_session(reqs[i]);
-                               __do_request(mdsc, reqs[i]);
-                       }
+       for (p = rb_first(&mdsc->request_tree); p; p = rb_next(p)) {
+               req = rb_entry(p, struct ceph_mds_request, r_node);
+               if (req->r_got_unsafe)
+                       continue;
+               if (req->r_session &&
+                   req->r_session->s_mds == mds) {
+                       dout(" kicking tid %llu\n", req->r_tid);
+                       put_request_session(req);
+                       __do_request(mdsc, req);
                }
        }
 }
@@ -1581,38 +1682,66 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
        __register_request(mdsc, req, dir);
        __do_request(mdsc, req);
 
+       if (req->r_err) {
+               err = req->r_err;
+               __unregister_request(mdsc, req);
+               dout("do_request early error %d\n", err);
+               goto out;
+       }
+
        /* wait */
-       if (!req->r_reply) {
-               mutex_unlock(&mdsc->mutex);
-               if (req->r_timeout) {
-                       err = wait_for_completion_timeout(&req->r_completion,
-                                                         req->r_timeout);
-                       if (err > 0)
-                               err = 0;
-                       else if (err == 0)
-                               req->r_reply = ERR_PTR(-EIO);
-               } else {
-                       wait_for_completion(&req->r_completion);
-               }
-               mutex_lock(&mdsc->mutex);
+       mutex_unlock(&mdsc->mutex);
+       dout("do_request waiting\n");
+       if (req->r_timeout) {
+               err = (long)wait_for_completion_interruptible_timeout(
+                       &req->r_completion, req->r_timeout);
+               if (err == 0)
+                       err = -EIO;
+       } else {
+               err = wait_for_completion_interruptible(&req->r_completion);
        }
+       dout("do_request waited, got %d\n", err);
+       mutex_lock(&mdsc->mutex);
 
-       if (IS_ERR(req->r_reply)) {
-               err = PTR_ERR(req->r_reply);
-               req->r_reply = NULL;
+       /* only abort if we didn't race with a real reply */
+       if (req->r_got_result) {
+               err = le32_to_cpu(req->r_reply_info.head->result);
+       } else if (err < 0) {
+               dout("aborted request %lld with %d\n", req->r_tid, err);
 
-               /* clean up */
-               __unregister_request(mdsc, req);
-               if (!list_empty(&req->r_unsafe_item))
-                       list_del_init(&req->r_unsafe_item);
-               complete(&req->r_safe_completion);
-       } else if (req->r_err) {
-               err = req->r_err;
+               /*
+                * ensure we aren't running concurrently with
+                * ceph_fill_trace or ceph_readdir_prepopulate, which
+                * rely on locks (dir mutex) held by our caller.
+                */
+               mutex_lock(&req->r_fill_mutex);
+               req->r_err = err;
+               req->r_aborted = true;
+               mutex_unlock(&req->r_fill_mutex);
+
+               if (req->r_locked_dir &&
+                   (req->r_op & CEPH_MDS_OP_WRITE)) {
+                       struct ceph_inode_info *ci =
+                               ceph_inode(req->r_locked_dir);
+
+                       dout("aborted, clearing I_COMPLETE on %p, leases\n",
+                            req->r_locked_dir);
+                       spin_lock(&req->r_locked_dir->i_lock);
+                       ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
+                       ci->i_release_count++;
+                       spin_unlock(&req->r_locked_dir->i_lock);
+
+                       if (req->r_dentry)
+                               ceph_invalidate_dentry_lease(req->r_dentry);
+                       if (req->r_old_dentry)
+                               ceph_invalidate_dentry_lease(req->r_old_dentry);
+               }
        } else {
-               err = le32_to_cpu(req->r_reply_info.head->result);
+               err = req->r_err;
        }
-       mutex_unlock(&mdsc->mutex);
 
+out:
+       mutex_unlock(&mdsc->mutex);
        dout("do_request %p done, result %d\n", req, err);
        return err;
 }
@@ -1632,17 +1761,16 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
        struct ceph_mds_reply_info_parsed *rinfo;  /* parsed reply info */
        u64 tid;
        int err, result;
-       int mds;
+       int mds = session->s_mds;
 
-       if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
-               return;
        if (msg->front.iov_len < sizeof(*head)) {
                pr_err("mdsc_handle_reply got corrupt (short) reply\n");
+               ceph_msg_dump(msg);
                return;
        }
 
        /* get request, session */
-       tid = le64_to_cpu(head->tid);
+       tid = le64_to_cpu(msg->hdr.tid);
        mutex_lock(&mdsc->mutex);
        req = __lookup_request(mdsc, tid);
        if (!req) {
@@ -1651,10 +1779,9 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
                return;
        }
        dout("handle_reply %p\n", req);
-       mds = le64_to_cpu(msg->hdr.src.name.num);
 
        /* correct session? */
-       if (!req->r_session && req->r_session != session) {
+       if (req->r_session != session) {
                pr_err("mdsc_handle_reply got %llu on session mds%d"
                       " not mds%d\n", tid, session->s_mds,
                       req->r_session ? req->r_session->s_mds : -1);
@@ -1706,16 +1833,12 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
                        list_del_init(&req->r_unsafe_item);
 
                        /* last unsafe request during umount? */
-                       if (mdsc->stopping && !__get_oldest_tid(mdsc))
+                       if (mdsc->stopping && !__get_oldest_req(mdsc))
                                complete(&mdsc->safe_umount_waiters);
                        mutex_unlock(&mdsc->mutex);
                        goto out;
                }
-       }
-
-       BUG_ON(req->r_reply);
-
-       if (!head->safe) {
+       } else {
                req->r_got_unsafe = true;
                list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
        }
@@ -1728,6 +1851,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
        mutex_lock(&session->s_mutex);
        if (err < 0) {
                pr_err("mdsc_handle_reply got corrupt reply mds%d\n", mds);
+               ceph_msg_dump(msg);
                goto out_err;
        }
 
@@ -1743,21 +1867,30 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
        }
 
        /* insert trace into our cache */
+       mutex_lock(&req->r_fill_mutex);
        err = ceph_fill_trace(mdsc->client->sb, req, req->r_session);
        if (err == 0) {
                if (result == 0 && rinfo->dir_nr)
                        ceph_readdir_prepopulate(req, req->r_session);
                ceph_unreserve_caps(&req->r_caps_reservation);
        }
+       mutex_unlock(&req->r_fill_mutex);
 
        up_read(&mdsc->snap_rwsem);
 out_err:
-       if (err) {
-               req->r_err = err;
+       mutex_lock(&mdsc->mutex);
+       if (!req->r_aborted) {
+               if (err) {
+                       req->r_err = err;
+               } else {
+                       req->r_reply = msg;
+                       ceph_msg_get(msg);
+                       req->r_got_result = true;
+               }
        } else {
-               req->r_reply = msg;
-               ceph_msg_get(msg);
+               dout("reply arrived after request %lld was aborted\n", tid);
        }
+       mutex_unlock(&mdsc->mutex);
 
        add_cap_releases(mdsc, req->r_session, -1);
        mutex_unlock(&session->s_mutex);
@@ -1774,38 +1907,29 @@ out:
 /*
  * handle mds notification that our request has been forwarded.
  */
-static void handle_forward(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
+static void handle_forward(struct ceph_mds_client *mdsc,
+                          struct ceph_mds_session *session,
+                          struct ceph_msg *msg)
 {
        struct ceph_mds_request *req;
-       u64 tid;
+       u64 tid = le64_to_cpu(msg->hdr.tid);
        u32 next_mds;
        u32 fwd_seq;
-       u8 must_resend;
        int err = -EINVAL;
        void *p = msg->front.iov_base;
        void *end = p + msg->front.iov_len;
-       int from_mds, state;
 
-       if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
-               goto bad;
-       from_mds = le64_to_cpu(msg->hdr.src.name.num);
-
-       ceph_decode_need(&p, end, sizeof(u64)+2*sizeof(u32), bad);
-       tid = ceph_decode_64(&p);
+       ceph_decode_need(&p, end, 2*sizeof(u32), bad);
        next_mds = ceph_decode_32(&p);
        fwd_seq = ceph_decode_32(&p);
-       must_resend = ceph_decode_8(&p);
-
-       WARN_ON(must_resend);  /* shouldn't happen. */
 
        mutex_lock(&mdsc->mutex);
        req = __lookup_request(mdsc, tid);
        if (!req) {
-               dout("forward %llu dne\n", tid);
+               dout("forward %llu to mds%d - req dne\n", tid, next_mds);
                goto out;  /* dup reply? */
        }
 
-       state = mdsc->sessions[next_mds]->s_state;
        if (fwd_seq <= req->r_num_fwd) {
                dout("forward %llu to mds%d - old seq %d <= %d\n",
                     tid, next_mds, req->r_num_fwd, fwd_seq);
@@ -1835,14 +1959,10 @@ static void handle_session(struct ceph_mds_session *session,
        struct ceph_mds_client *mdsc = session->s_mdsc;
        u32 op;
        u64 seq;
-       int mds;
+       int mds = session->s_mds;
        struct ceph_mds_session_head *h = msg->front.iov_base;
        int wake = 0;
 
-       if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
-               return;
-       mds = le64_to_cpu(msg->hdr.src.name.num);
-
        /* decode */
        if (msg->front.iov_len != sizeof(*h))
                goto bad;
@@ -1850,6 +1970,8 @@ static void handle_session(struct ceph_mds_session *session,
        seq = le64_to_cpu(h->seq);
 
        mutex_lock(&mdsc->mutex);
+       if (op == CEPH_SESSION_CLOSE)
+               __unregister_session(mdsc, session);
        /* FIXME: this ttl calculation is generous */
        session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
        mutex_unlock(&mdsc->mutex);
@@ -1880,7 +2002,6 @@ static void handle_session(struct ceph_mds_session *session,
                break;
 
        case CEPH_SESSION_CLOSE:
-               unregister_session(mdsc, mds);
                remove_session_caps(session);
                wake = 1; /* for good measure */
                complete(&mdsc->session_close_waiters);
@@ -1917,6 +2038,7 @@ static void handle_session(struct ceph_mds_session *session,
 bad:
        pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
               (int)msg->front.iov_len);
+       ceph_msg_dump(msg);
        return;
 }
 
@@ -1946,20 +2068,12 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
 /*
  * Encode information about a cap for a reconnect with the MDS.
  */
-struct encode_caps_data {
-       void **pp;
-       void *end;
-       int *num_caps;
-};
-
 static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
                          void *arg)
 {
-       struct ceph_mds_cap_reconnect *rec;
+       struct ceph_mds_cap_reconnect rec;
        struct ceph_inode_info *ci;
-       struct encode_caps_data *data = (struct encode_caps_data *)arg;
-       void *p = *(data->pp);
-       void *end = data->end;
+       struct ceph_pagelist *pagelist = arg;
        char *path;
        int pathlen, err;
        u64 pathbase;
@@ -1970,8 +2084,9 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
        dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
             inode, ceph_vinop(inode), cap, cap->cap_id,
             ceph_cap_string(cap->issued));
-       ceph_decode_need(&p, end, sizeof(u64), needmore);
-       ceph_encode_64(&p, ceph_ino(inode));
+       err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
+       if (err)
+               return err;
 
        dentry = d_find_alias(inode);
        if (dentry) {
@@ -1984,33 +2099,29 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
                path = NULL;
                pathlen = 0;
        }
-       ceph_decode_need(&p, end, pathlen+4, needmore);
-       ceph_encode_string(&p, end, path, pathlen);
+       err = ceph_pagelist_encode_string(pagelist, path, pathlen);
+       if (err)
+               goto out;
 
-       ceph_decode_need(&p, end, sizeof(*rec), needmore);
-       rec = p;
-       p += sizeof(*rec);
-       BUG_ON(p > end);
        spin_lock(&inode->i_lock);
        cap->seq = 0;        /* reset cap seq */
        cap->issue_seq = 0;  /* and issue_seq */
-       rec->cap_id = cpu_to_le64(cap->cap_id);
-       rec->pathbase = cpu_to_le64(pathbase);
-       rec->wanted = cpu_to_le32(__ceph_caps_wanted(ci));
-       rec->issued = cpu_to_le32(cap->issued);
-       rec->size = cpu_to_le64(inode->i_size);
-       ceph_encode_timespec(&rec->mtime, &inode->i_mtime);
-       ceph_encode_timespec(&rec->atime, &inode->i_atime);
-       rec->snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
+       rec.cap_id = cpu_to_le64(cap->cap_id);
+       rec.pathbase = cpu_to_le64(pathbase);
+       rec.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
+       rec.issued = cpu_to_le32(cap->issued);
+       rec.size = cpu_to_le64(inode->i_size);
+       ceph_encode_timespec(&rec.mtime, &inode->i_mtime);
+       ceph_encode_timespec(&rec.atime, &inode->i_atime);
+       rec.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
        spin_unlock(&inode->i_lock);
 
+       err = ceph_pagelist_append(pagelist, &rec, sizeof(rec));
+
+out:
        kfree(path);
        dput(dentry);
-       (*data->num_caps)++;
-       *(data->pp) = p;
-       return 0;
-needmore:
-       return -ENOSPC;
+       return err;
 }
 
 
@@ -2028,19 +2139,24 @@ needmore:
  */
 static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds)
 {
-       struct ceph_mds_session *session;
+       struct ceph_mds_session *session = NULL;
        struct ceph_msg *reply;
-       int newlen, len = 4 + 1;
-       void *p, *end;
-       int err;
-       int num_caps, num_realms = 0;
-       int got;
-       u64 next_snap_ino = 0;
-       __le32 *pnum_caps, *pnum_realms;
-       struct encode_caps_data iter_args;
+       struct rb_node *p;
+       int err = -ENOMEM;
+       struct ceph_pagelist *pagelist;
 
        pr_info("reconnect to recovering mds%d\n", mds);
 
+       pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
+       if (!pagelist)
+               goto fail_nopagelist;
+       ceph_pagelist_init(pagelist);
+
+       err = -ENOMEM;
+       reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0);
+       if (!reply)
+               goto fail_nomsg;
+
        /* find session */
        session = __ceph_lookup_mds_session(mdsc, mds);
        mutex_unlock(&mdsc->mutex);    /* drop lock for duration */
@@ -2056,12 +2172,6 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds)
 
                /* replay unsafe requests */
                replay_unsafe_requests(mdsc, session);
-
-               /* estimate needed space */
-               len += session->s_nr_caps *
-                       (100+sizeof(struct ceph_mds_cap_reconnect));
-               pr_info("estimating i need %d bytes for %d caps\n",
-                    len, session->s_nr_caps);
        } else {
                dout("no session for mds%d, will send short reconnect\n",
                     mds);
@@ -2069,107 +2179,70 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds)
 
        down_read(&mdsc->snap_rwsem);
 
-retry:
-       /* build reply */
-       reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, len, 0, 0, NULL);
-       if (IS_ERR(reply)) {
-               err = PTR_ERR(reply);
-               pr_err("send_mds_reconnect ENOMEM on %d for mds%d\n",
-                      len, mds);
-               goto out;
-       }
-       p = reply->front.iov_base;
-       end = p + len;
-
-       if (!session) {
-               ceph_encode_8(&p, 1); /* session was closed */
-               ceph_encode_32(&p, 0);
+       if (!session)
                goto send;
-       }
        dout("session %p state %s\n", session,
             session_state_name(session->s_state));
 
        /* traverse this session's caps */
-       ceph_encode_8(&p, 0);
-       pnum_caps = p;
-       ceph_encode_32(&p, session->s_nr_caps);
-       num_caps = 0;
-
-       iter_args.pp = &p;
-       iter_args.end = end;
-       iter_args.num_caps = &num_caps;
-       err = iterate_session_caps(session, encode_caps_cb, &iter_args);
-       if (err == -ENOSPC)
-               goto needmore;
+       err = ceph_pagelist_encode_32(pagelist, session->s_nr_caps);
+       if (err)
+               goto fail;
+       err = iterate_session_caps(session, encode_caps_cb, pagelist);
        if (err < 0)
-               goto out;
-       *pnum_caps = cpu_to_le32(num_caps);
+               goto fail;
 
        /*
         * snaprealms.  we provide mds with the ino, seq (version), and
         * parent for all of our realms.  If the mds has any newer info,
         * it will tell us.
         */
-       next_snap_ino = 0;
-       /* save some space for the snaprealm count */
-       pnum_realms = p;
-       ceph_decode_need(&p, end, sizeof(*pnum_realms), needmore);
-       p += sizeof(*pnum_realms);
-       num_realms = 0;
-       while (1) {
-               struct ceph_snap_realm *realm;
-               struct ceph_mds_snaprealm_reconnect *sr_rec;
-               got = radix_tree_gang_lookup(&mdsc->snap_realms,
-                                            (void **)&realm, next_snap_ino, 1);
-               if (!got)
-                       break;
+       for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
+               struct ceph_snap_realm *realm =
+                       rb_entry(p, struct ceph_snap_realm, node);
+               struct ceph_mds_snaprealm_reconnect sr_rec;
 
                dout(" adding snap realm %llx seq %lld parent %llx\n",
                     realm->ino, realm->seq, realm->parent_ino);
-               ceph_decode_need(&p, end, sizeof(*sr_rec), needmore);
-               sr_rec = p;
-               sr_rec->ino = cpu_to_le64(realm->ino);
-               sr_rec->seq = cpu_to_le64(realm->seq);
-               sr_rec->parent = cpu_to_le64(realm->parent_ino);
-               p += sizeof(*sr_rec);
-               num_realms++;
-               next_snap_ino = realm->ino + 1;
+               sr_rec.ino = cpu_to_le64(realm->ino);
+               sr_rec.seq = cpu_to_le64(realm->seq);
+               sr_rec.parent = cpu_to_le64(realm->parent_ino);
+               err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
+               if (err)
+                       goto fail;
        }
-       *pnum_realms = cpu_to_le32(num_realms);
 
 send:
-       reply->front.iov_len = p - reply->front.iov_base;
-       reply->hdr.front_len = cpu_to_le32(reply->front.iov_len);
-       dout("final len was %u (guessed %d)\n",
-            (unsigned)reply->front.iov_len, len);
+       reply->pagelist = pagelist;
+       reply->hdr.data_len = cpu_to_le32(pagelist->length);
+       reply->nr_pages = calc_pages_for(0, pagelist->length);
        ceph_con_send(&session->s_con, reply);
 
-       if (session) {
-               session->s_state = CEPH_MDS_SESSION_OPEN;
-               __wake_requests(mdsc, &session->s_waiting);
-       }
+       session->s_state = CEPH_MDS_SESSION_OPEN;
+       mutex_unlock(&session->s_mutex);
+
+       mutex_lock(&mdsc->mutex);
+       __wake_requests(mdsc, &session->s_waiting);
+       mutex_unlock(&mdsc->mutex);
+
+       ceph_put_mds_session(session);
 
-out:
        up_read(&mdsc->snap_rwsem);
-       if (session) {
-               mutex_unlock(&session->s_mutex);
-               ceph_put_mds_session(session);
-       }
        mutex_lock(&mdsc->mutex);
        return;
 
-needmore:
-       /*
-        * we need a larger buffer.  this doesn't very accurately
-        * factor in snap realms, but it's safe.
-        */
-       num_caps += num_realms;
-       newlen = len * ((100 * (session->s_nr_caps+3)) / (num_caps + 1)) / 100;
-       pr_info("i guessed %d, and did %d of %d caps, retrying with %d\n",
-            len, num_caps, session->s_nr_caps, newlen);
-       len = newlen;
+fail:
        ceph_msg_put(reply);
-       goto retry;
+       up_read(&mdsc->snap_rwsem);
+       mutex_unlock(&session->s_mutex);
+       ceph_put_mds_session(session);
+fail_nomsg:
+       ceph_pagelist_release(pagelist);
+       kfree(pagelist);
+fail_nopagelist:
+       pr_err("error %d preparing reconnect for mds%d\n", err, mds);
+       mutex_lock(&mdsc->mutex);
+       return;
 }
 
 
@@ -2209,7 +2282,7 @@ static void check_new_map(struct ceph_mds_client *mdsc,
                                /* the session never opened, just close it
                                 * out now */
                                __wake_requests(mdsc, &s->s_waiting);
-                               unregister_session(mdsc, i);
+                               __unregister_session(mdsc, s);
                        } else {
                                /* just close it */
                                mutex_unlock(&mdsc->mutex);
@@ -2244,8 +2317,10 @@ static void check_new_map(struct ceph_mds_client *mdsc,
                 */
                if (oldstate < CEPH_MDS_STATE_ACTIVE &&
                    newstate >= CEPH_MDS_STATE_ACTIVE) {
+                       pr_info("mds%d reconnect completed\n", s->s_mds);
                        kick_requests(mdsc, i, 1);
                        ceph_kick_flushing_caps(mdsc, s);
+                       wake_up_session_caps(s, 1);
                }
        }
 }
@@ -2267,24 +2342,22 @@ void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
        di->lease_session = NULL;
 }
 
-static void handle_lease(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
+static void handle_lease(struct ceph_mds_client *mdsc,
+                        struct ceph_mds_session *session,
+                        struct ceph_msg *msg)
 {
        struct super_block *sb = mdsc->client->sb;
        struct inode *inode;
-       struct ceph_mds_session *session;
        struct ceph_inode_info *ci;
        struct dentry *parent, *dentry;
        struct ceph_dentry_info *di;
-       int mds;
+       int mds = session->s_mds;
        struct ceph_mds_lease *h = msg->front.iov_base;
        struct ceph_vino vino;
        int mask;
        struct qstr dname;
        int release = 0;
 
-       if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
-               return;
-       mds = le64_to_cpu(msg->hdr.src.name.num);
        dout("handle_lease from mds%d\n", mds);
 
        /* decode */
@@ -2298,15 +2371,6 @@ static void handle_lease(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
        if (dname.len != get_unaligned_le32(h+1))
                goto bad;
 
-       /* find session */
-       mutex_lock(&mdsc->mutex);
-       session = __ceph_lookup_mds_session(mdsc, mds);
-       mutex_unlock(&mdsc->mutex);
-       if (!session) {
-               pr_err("handle_lease got lease but no session mds%d\n", mds);
-               return;
-       }
-
        mutex_lock(&session->s_mutex);
        session->s_seq++;
 
@@ -2375,11 +2439,11 @@ release:
 out:
        iput(inode);
        mutex_unlock(&session->s_mutex);
-       ceph_put_mds_session(session);
        return;
 
 bad:
        pr_err("corrupt lease message\n");
+       ceph_msg_dump(msg);
 }
 
 void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
@@ -2397,8 +2461,8 @@ void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
        dnamelen = dentry->d_name.len;
        len += dnamelen;
 
-       msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, 0, 0, NULL);
-       if (IS_ERR(msg))
+       msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len);
+       if (!msg)
                return;
        lease = msg->front.iov_base;
        lease->action = action;
@@ -2504,7 +2568,7 @@ static void delayed_work(struct work_struct *work)
        int renew_caps;
 
        dout("mdsc delayed_work\n");
-       ceph_check_delayed_caps(mdsc, 0);
+       ceph_check_delayed_caps(mdsc);
 
        mutex_lock(&mdsc->mutex);
        renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
@@ -2555,11 +2619,14 @@ static void delayed_work(struct work_struct *work)
 }
 
 
-void ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
+int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
 {
        mdsc->client = client;
        mutex_init(&mdsc->mutex);
        mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
+       if (mdsc->mdsmap == NULL)
+               return -ENOMEM;
+
        init_completion(&mdsc->safe_umount_waiters);
        init_completion(&mdsc->session_close_waiters);
        INIT_LIST_HEAD(&mdsc->waiting_for_map);
@@ -2567,11 +2634,11 @@ void ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
        mdsc->max_sessions = 0;
        mdsc->stopping = 0;
        init_rwsem(&mdsc->snap_rwsem);
-       INIT_RADIX_TREE(&mdsc->snap_realms, GFP_NOFS);
+       mdsc->snap_realms = RB_ROOT;
        INIT_LIST_HEAD(&mdsc->snap_empty);
        spin_lock_init(&mdsc->snap_empty_lock);
        mdsc->last_tid = 0;
-       INIT_RADIX_TREE(&mdsc->request_tree, GFP_NOFS);
+       mdsc->request_tree = RB_ROOT;
        INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
        mdsc->last_renew_caps = jiffies;
        INIT_LIST_HEAD(&mdsc->cap_delay_list);
@@ -2585,6 +2652,8 @@ void ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
        init_waitqueue_head(&mdsc->cap_flushing_wq);
        spin_lock_init(&mdsc->dentry_lru_lock);
        INIT_LIST_HEAD(&mdsc->dentry_lru);
+
+       return 0;
 }
 
 /*
@@ -2597,20 +2666,19 @@ static void wait_requests(struct ceph_mds_client *mdsc)
        struct ceph_client *client = mdsc->client;
 
        mutex_lock(&mdsc->mutex);
-       if (__get_oldest_tid(mdsc)) {
+       if (__get_oldest_req(mdsc)) {
                mutex_unlock(&mdsc->mutex);
+
                dout("wait_requests waiting for requests\n");
                wait_for_completion_timeout(&mdsc->safe_umount_waiters,
-                                   client->mount_args.mount_timeout * HZ);
-               mutex_lock(&mdsc->mutex);
+                                   client->mount_args->mount_timeout * HZ);
 
                /* tear down remaining requests */
-               while (radix_tree_gang_lookup(&mdsc->request_tree,
-                                             (void **)&req, 0, 1)) {
+               mutex_lock(&mdsc->mutex);
+               while ((req = __get_oldest_req(mdsc))) {
                        dout("wait_requests timed out on tid %llu\n",
                             req->r_tid);
-                       radix_tree_delete(&mdsc->request_tree, req->r_tid);
-                       ceph_mdsc_put_request(req);
+                       __unregister_request(mdsc, req);
                }
        }
        mutex_unlock(&mdsc->mutex);
@@ -2627,7 +2695,7 @@ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
        mdsc->stopping = 1;
 
        drop_leases(mdsc);
-       ceph_check_delayed_caps(mdsc, 1);
+       ceph_flush_dirty_caps(mdsc);
        wait_requests(mdsc);
 }
 
@@ -2636,31 +2704,41 @@ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
  */
 static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
 {
-       struct ceph_mds_request *req;
-       u64 next_tid = 0;
-       int got;
+       struct ceph_mds_request *req = NULL, *nextreq;
+       struct rb_node *n;
 
        mutex_lock(&mdsc->mutex);
        dout("wait_unsafe_requests want %lld\n", want_tid);
-       while (1) {
-               got = radix_tree_gang_lookup(&mdsc->request_tree, (void **)&req,
-                                            next_tid, 1);
-               if (!got)
-                       break;
-               if (req->r_tid > want_tid)
-                       break;
-
-               next_tid = req->r_tid + 1;
-               if ((req->r_op & CEPH_MDS_OP_WRITE) == 0)
-                       continue;  /* not a write op */
-
-               ceph_mdsc_get_request(req);
-               mutex_unlock(&mdsc->mutex);
-               dout("wait_unsafe_requests  wait on %llu (want %llu)\n",
-                    req->r_tid, want_tid);
-               wait_for_completion(&req->r_safe_completion);
-               mutex_lock(&mdsc->mutex);
-               ceph_mdsc_put_request(req);
+restart:
+       req = __get_oldest_req(mdsc);
+       while (req && req->r_tid <= want_tid) {
+               /* find next request */
+               n = rb_next(&req->r_node);
+               if (n)
+                       nextreq = rb_entry(n, struct ceph_mds_request, r_node);
+               else
+                       nextreq = NULL;
+               if ((req->r_op & CEPH_MDS_OP_WRITE)) {
+                       /* write op */
+                       ceph_mdsc_get_request(req);
+                       if (nextreq)
+                               ceph_mdsc_get_request(nextreq);
+                       mutex_unlock(&mdsc->mutex);
+                       dout("wait_unsafe_requests  wait on %llu (want %llu)\n",
+                            req->r_tid, want_tid);
+                       wait_for_completion(&req->r_safe_completion);
+                       mutex_lock(&mdsc->mutex);
+                       ceph_mdsc_put_request(req);
+                       if (!nextreq)
+                               break;  /* next dne before, so we're done! */
+                       if (RB_EMPTY_NODE(&nextreq->r_node)) {
+                               /* next request was removed from tree */
+                               ceph_mdsc_put_request(nextreq);
+                               goto restart;
+                       }
+                       ceph_mdsc_put_request(nextreq);  /* won't go away */
+               }
+               req = nextreq;
        }
        mutex_unlock(&mdsc->mutex);
        dout("wait_unsafe_requests done\n");
@@ -2670,6 +2748,9 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
 {
        u64 want_tid, want_flush;
 
+       if (mdsc->client->mount_state == CEPH_MOUNT_SHUTDOWN)
+               return;
+
        dout("sync\n");
        mutex_lock(&mdsc->mutex);
        want_tid = mdsc->last_tid;
@@ -2677,7 +2758,7 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
        mutex_unlock(&mdsc->mutex);
        dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush);
 
-       ceph_check_delayed_caps(mdsc, 1);
+       ceph_flush_dirty_caps(mdsc);
 
        wait_unsafe_requests(mdsc, want_tid);
        wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush));
@@ -2693,7 +2774,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
        int i;
        int n;
        struct ceph_client *client = mdsc->client;
-       unsigned long started, timeout = client->mount_args.mount_timeout * HZ;
+       unsigned long started, timeout = client->mount_args->mount_timeout * HZ;
 
        dout("close_sessions\n");
 
@@ -2733,7 +2814,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
        for (i = 0; i < mdsc->max_sessions; i++) {
                if (mdsc->sessions[i]) {
                        session = get_session(mdsc->sessions[i]);
-                       unregister_session(mdsc, i);
+                       __unregister_session(mdsc, session);
                        mutex_unlock(&mdsc->mutex);
                        mutex_lock(&session->s_mutex);
                        remove_session_caps(session);
@@ -2779,10 +2860,8 @@ void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
 
        ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
        ceph_decode_copy(&p, &fsid, sizeof(fsid));
-       if (ceph_fsid_compare(&fsid, &mdsc->client->monc.monmap->fsid)) {
-               pr_err("got mdsmap with wrong fsid\n");
+       if (ceph_check_fsid(mdsc->client, &fsid) < 0)
                return;
-       }
        epoch = ceph_decode_32(&p);
        maplen = ceph_decode_32(&p);
        dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
@@ -2832,8 +2911,7 @@ static struct ceph_connection *con_get(struct ceph_connection *con)
        struct ceph_mds_session *s = con->private;
 
        if (get_session(s)) {
-               dout("mdsc con_get %p %d -> %d\n", s,
-                    atomic_read(&s->s_ref) - 1, atomic_read(&s->s_ref));
+               dout("mdsc con_get %p ok (%d)\n", s, atomic_read(&s->s_ref));
                return con;
        }
        dout("mdsc con_get %p FAIL\n", s);
@@ -2844,9 +2922,8 @@ static void con_put(struct ceph_connection *con)
 {
        struct ceph_mds_session *s = con->private;
 
-       dout("mdsc con_put %p %d -> %d\n", s, atomic_read(&s->s_ref),
-            atomic_read(&s->s_ref) - 1);
        ceph_put_mds_session(s);
+       dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref));
 }
 
 /*
@@ -2867,6 +2944,13 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
        struct ceph_mds_client *mdsc = s->s_mdsc;
        int type = le16_to_cpu(msg->hdr.type);
 
+       mutex_lock(&mdsc->mutex);
+       if (__verify_registered_session(mdsc, s) < 0) {
+               mutex_unlock(&mdsc->mutex);
+               goto out;
+       }
+       mutex_unlock(&mdsc->mutex);
+
        switch (type) {
        case CEPH_MSG_MDS_MAP:
                ceph_mdsc_handle_map(mdsc, msg);
@@ -2878,32 +2962,94 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
                handle_reply(s, msg);
                break;
        case CEPH_MSG_CLIENT_REQUEST_FORWARD:
-               handle_forward(mdsc, msg);
+               handle_forward(mdsc, s, msg);
                break;
        case CEPH_MSG_CLIENT_CAPS:
                ceph_handle_caps(s, msg);
                break;
        case CEPH_MSG_CLIENT_SNAP:
-               ceph_handle_snap(mdsc, msg);
+               ceph_handle_snap(mdsc, s, msg);
                break;
        case CEPH_MSG_CLIENT_LEASE:
-               handle_lease(mdsc, msg);
+               handle_lease(mdsc, s, msg);
                break;
 
        default:
                pr_err("received unknown message type %d %s\n", type,
                       ceph_msg_type_name(type));
        }
+out:
        ceph_msg_put(msg);
 }
 
+/*
+ * authentication
+ */
+static int get_authorizer(struct ceph_connection *con,
+                         void **buf, int *len, int *proto,
+                         void **reply_buf, int *reply_len, int force_new)
+{
+       struct ceph_mds_session *s = con->private;
+       struct ceph_mds_client *mdsc = s->s_mdsc;
+       struct ceph_auth_client *ac = mdsc->client->monc.auth;
+       int ret = 0;
+
+       if (force_new && s->s_authorizer) {
+               ac->ops->destroy_authorizer(ac, s->s_authorizer);
+               s->s_authorizer = NULL;
+       }
+       if (s->s_authorizer == NULL) {
+               if (ac->ops->create_authorizer) {
+                       ret = ac->ops->create_authorizer(
+                               ac, CEPH_ENTITY_TYPE_MDS,
+                               &s->s_authorizer,
+                               &s->s_authorizer_buf,
+                               &s->s_authorizer_buf_len,
+                               &s->s_authorizer_reply_buf,
+                               &s->s_authorizer_reply_buf_len);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       *proto = ac->protocol;
+       *buf = s->s_authorizer_buf;
+       *len = s->s_authorizer_buf_len;
+       *reply_buf = s->s_authorizer_reply_buf;
+       *reply_len = s->s_authorizer_reply_buf_len;
+       return 0;
+}
+
+
+static int verify_authorizer_reply(struct ceph_connection *con, int len)
+{
+       struct ceph_mds_session *s = con->private;
+       struct ceph_mds_client *mdsc = s->s_mdsc;
+       struct ceph_auth_client *ac = mdsc->client->monc.auth;
+
+       return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len);
+}
+
+static int invalidate_authorizer(struct ceph_connection *con)
+{
+       struct ceph_mds_session *s = con->private;
+       struct ceph_mds_client *mdsc = s->s_mdsc;
+       struct ceph_auth_client *ac = mdsc->client->monc.auth;
+
+       if (ac->ops->invalidate_authorizer)
+               ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
+
+       return ceph_monc_validate_auth(&mdsc->client->monc);
+}
+
 const static struct ceph_connection_operations mds_con_ops = {
        .get = con_get,
        .put = con_put,
        .dispatch = dispatch,
+       .get_authorizer = get_authorizer,
+       .verify_authorizer_reply = verify_authorizer_reply,
+       .invalidate_authorizer = invalidate_authorizer,
        .peer_reset = peer_reset,
-       .alloc_msg = ceph_alloc_msg,
-       .alloc_middle = ceph_alloc_middle,
 };