#include "ceph_debug.h"
#include <linux/wait.h>
+#include <linux/slab.h>
#include <linux/sched.h>
#include "mds_client.h"
case CEPH_MDS_SESSION_OPEN: return "open";
case CEPH_MDS_SESSION_HUNG: return "hung";
case CEPH_MDS_SESSION_CLOSING: return "closing";
+ case CEPH_MDS_SESSION_RESTARTING: return "restarting";
case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
default: return "???";
}
return mdsc->sessions[mds];
}
+static int __verify_registered_session(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *s)
+{
+ if (s->s_mds >= mdsc->max_sessions ||
+ mdsc->sessions[s->s_mds] != s)
+ return -ENOENT;
+ return 0;
+}
+
/*
* create+register a new session for given mds.
* called under mdsc->mutex.
struct ceph_mds_session *s;
s = kzalloc(sizeof(*s), GFP_NOFS);
+ if (!s)
+ return ERR_PTR(-ENOMEM);
s->s_mdsc = mdsc;
s->s_mds = mds;
s->s_state = CEPH_MDS_SESSION_NEW;
INIT_LIST_HEAD(&s->s_waiting);
INIT_LIST_HEAD(&s->s_unsafe);
s->s_num_cap_releases = 0;
- s->s_iterating_caps = false;
+ s->s_cap_iterator = NULL;
INIT_LIST_HEAD(&s->s_cap_releases);
INIT_LIST_HEAD(&s->s_cap_releases_done);
INIT_LIST_HEAD(&s->s_cap_flushing);
/*
* called under mdsc->mutex
*/
-static void unregister_session(struct ceph_mds_client *mdsc,
+static void __unregister_session(struct ceph_mds_client *mdsc,
struct ceph_mds_session *s)
{
- dout("unregister_session mds%d %p\n", s->s_mds, s);
+ dout("__unregister_session mds%d %p\n", s->s_mds, s);
+ BUG_ON(mdsc->sessions[s->s_mds] != s);
mdsc->sessions[s->s_mds] = NULL;
ceph_con_close(&s->s_con);
ceph_put_mds_session(s);
u64 tid)
{
struct ceph_mds_request *req;
- req = radix_tree_lookup(&mdsc->request_tree, tid);
- if (req)
- ceph_mdsc_get_request(req);
- return req;
+ struct rb_node *n = mdsc->request_tree.rb_node;
+
+ while (n) {
+ req = rb_entry(n, struct ceph_mds_request, r_node);
+ if (tid < req->r_tid)
+ n = n->rb_left;
+ else if (tid > req->r_tid)
+ n = n->rb_right;
+ else {
+ ceph_mdsc_get_request(req);
+ return req;
+ }
+ }
+ return NULL;
+}
+
+static void __insert_request(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *new)
+{
+ struct rb_node **p = &mdsc->request_tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct ceph_mds_request *req = NULL;
+
+ while (*p) {
+ parent = *p;
+ req = rb_entry(parent, struct ceph_mds_request, r_node);
+ if (new->r_tid < req->r_tid)
+ p = &(*p)->rb_left;
+ else if (new->r_tid > req->r_tid)
+ p = &(*p)->rb_right;
+ else
+ BUG();
+ }
+
+ rb_link_node(&new->r_node, parent, p);
+ rb_insert_color(&new->r_node, &mdsc->request_tree);
}
/*
ceph_reserve_caps(&req->r_caps_reservation, req->r_num_caps);
dout("__register_request %p tid %lld\n", req, req->r_tid);
ceph_mdsc_get_request(req);
- radix_tree_insert(&mdsc->request_tree, req->r_tid, (void *)req);
+ __insert_request(mdsc, req);
if (dir) {
struct ceph_inode_info *ci = ceph_inode(dir);
struct ceph_mds_request *req)
{
dout("__unregister_request %p tid %lld\n", req, req->r_tid);
- radix_tree_delete(&mdsc->request_tree, req->r_tid);
- ceph_mdsc_put_request(req);
+ rb_erase(&req->r_node, &mdsc->request_tree);
+ RB_CLEAR_NODE(&req->r_node);
if (req->r_unsafe_dir) {
struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
list_del_init(&req->r_unsafe_dir_item);
spin_unlock(&ci->i_unsafe_lock);
}
+
+ ceph_mdsc_put_request(req);
}
/*
struct ceph_msg *msg;
struct ceph_mds_session_head *h;
- msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), 0, 0, NULL);
- if (IS_ERR(msg)) {
+ msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h));
+ if (!msg) {
pr_err("create_session_msg ENOMEM creating msg\n");
- return ERR_PTR(PTR_ERR(msg));
+ return NULL;
}
h = msg->front.iov_base;
h->op = cpu_to_le32(op);
struct ceph_msg *msg;
int mstate;
int mds = session->s_mds;
- int err = 0;
/* wait for mds to go active? */
mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
/* send connect message */
msg = create_session_msg(CEPH_SESSION_REQUEST_OPEN, session->s_seq);
- if (IS_ERR(msg)) {
- err = PTR_ERR(msg);
- goto out;
- }
+ if (!msg)
+ return -ENOMEM;
ceph_con_send(&session->s_con, msg);
-
-out:
return 0;
}
}
/*
- * Helper to safely iterate over all caps associated with a session.
+ * Helper to safely iterate over all caps associated with a session, with
+ * special care taken to handle a racing __ceph_remove_cap().
*
- * caller must hold session s_mutex
+ * Caller must hold session s_mutex.
*/
static int iterate_session_caps(struct ceph_mds_session *session,
int (*cb)(struct inode *, struct ceph_cap *,
void *), void *arg)
{
- struct ceph_cap *cap, *ncap;
- struct inode *inode;
+ struct list_head *p;
+ struct ceph_cap *cap;
+ struct inode *inode, *last_inode = NULL;
+ struct ceph_cap *old_cap = NULL;
int ret;
dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
spin_lock(&session->s_cap_lock);
- session->s_iterating_caps = true;
- list_for_each_entry_safe(cap, ncap, &session->s_caps, session_caps) {
+ p = session->s_caps.next;
+ while (p != &session->s_caps) {
+ cap = list_entry(p, struct ceph_cap, session_caps);
inode = igrab(&cap->ci->vfs_inode);
- if (!inode)
+ if (!inode) {
+ p = p->next;
continue;
+ }
+ session->s_cap_iterator = cap;
spin_unlock(&session->s_cap_lock);
+
+ if (last_inode) {
+ iput(last_inode);
+ last_inode = NULL;
+ }
+ if (old_cap) {
+ ceph_put_cap(old_cap);
+ old_cap = NULL;
+ }
+
ret = cb(inode, cap, arg);
- iput(inode);
+ last_inode = inode;
+
spin_lock(&session->s_cap_lock);
+ p = p->next;
+ if (cap->ci == NULL) {
+ dout("iterate_session_caps finishing cap %p removal\n",
+ cap);
+ BUG_ON(cap->session != session);
+ list_del_init(&cap->session_caps);
+ session->s_nr_caps--;
+ cap->session = NULL;
+ old_cap = cap; /* put_cap it w/o locks held */
+ }
if (ret < 0)
goto out;
}
ret = 0;
out:
- session->s_iterating_caps = false;
+ session->s_cap_iterator = NULL;
spin_unlock(&session->s_cap_lock);
+
+ if (last_inode)
+ iput(last_inode);
+ if (old_cap)
+ ceph_put_cap(old_cap);
+
return ret;
}
if (time_after_eq(jiffies, session->s_cap_ttl) &&
time_after_eq(session->s_cap_ttl, session->s_renew_requested))
pr_info("mds%d caps stale\n", session->s_mds);
+ session->s_renew_requested = jiffies;
/* do not try to renew caps until a recovering mds has reconnected
* with its clients. */
dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
ceph_mds_state_name(state));
- session->s_renew_requested = jiffies;
msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
++session->s_renew_seq);
- if (IS_ERR(msg))
- return PTR_ERR(msg);
+ if (!msg)
+ return -ENOMEM;
ceph_con_send(&session->s_con, msg);
return 0;
}
struct ceph_mds_session *session)
{
struct ceph_msg *msg;
- int err = 0;
dout("request_close_session mds%d state %s seq %lld\n",
session->s_mds, session_state_name(session->s_state),
session->s_seq);
msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
- if (IS_ERR(msg))
- err = PTR_ERR(msg);
- else
- ceph_con_send(&session->s_con, msg);
- return err;
+ if (!msg)
+ return -ENOMEM;
+ ceph_con_send(&session->s_con, msg);
+ return 0;
}
/*
session->s_trim_caps--;
if (oissued) {
/* we aren't the only cap.. just remove us */
- __ceph_remove_cap(cap, NULL);
+ __ceph_remove_cap(cap);
} else {
/* try to drop referring dentries */
spin_unlock(&inode->i_lock);
while (session->s_num_cap_releases < session->s_nr_caps + extra) {
spin_unlock(&session->s_cap_lock);
- msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE,
- 0, 0, NULL);
+ msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE);
if (!msg)
goto out_unlocked;
dout("add_cap_releases %p msg %p now %d\n", session, msg,
if (!req)
return ERR_PTR(-ENOMEM);
+ mutex_init(&req->r_fill_mutex);
req->r_started = jiffies;
req->r_resend_mds = -1;
INIT_LIST_HEAD(&req->r_unsafe_dir_item);
}
/*
- * return oldest (lowest) tid in request tree, 0 if none.
+ * return oldest (lowest) request, tid in request tree, 0 if none.
*
* called under mdsc->mutex.
*/
+static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
+{
+ if (RB_EMPTY_ROOT(&mdsc->request_tree))
+ return NULL;
+ return rb_entry(rb_first(&mdsc->request_tree),
+ struct ceph_mds_request, r_node);
+}
+
static u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
{
- struct ceph_mds_request *first;
- if (radix_tree_gang_lookup(&mdsc->request_tree,
- (void **)&first, 0, 1) <= 0)
- return 0;
- return first->r_tid;
+ struct ceph_mds_request *req = __get_oldest_req(mdsc);
+
+ if (req)
+ return req->r_tid;
+ return 0;
}
/*
struct inode *inode = temp->d_inode;
if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
- dout("build_path_dentry path+%d: %p SNAPDIR\n",
+ dout("build_path path+%d: %p SNAPDIR\n",
pos, temp);
} else if (stop_on_nosnap && inode &&
ceph_snap(inode) == CEPH_NOSNAP) {
break;
strncpy(path + pos, temp->d_name.name,
temp->d_name.len);
- dout("build_path_dentry path+%d: %p '%.*s'\n",
- pos, temp, temp->d_name.len, path + pos);
}
if (pos)
path[--pos] = '/';
temp = temp->d_parent;
if (temp == NULL) {
- pr_err("build_path_dentry corrupt dentry\n");
+ pr_err("build_path corrupt dentry\n");
kfree(path);
return ERR_PTR(-EINVAL);
}
}
if (pos != 0) {
- pr_err("build_path_dentry did not end path lookup where "
+ pr_err("build_path did not end path lookup where "
"expected, namelen is %d, pos is %d\n", len, pos);
/* presumably this is only possible if racing with a
rename of one of the parent directories (we can not
*base = ceph_ino(temp->d_inode);
*plen = len;
- dout("build_path_dentry on %p %d built %llx '%.*s'\n",
+ dout("build_path on %p %d built %llx '%.*s'\n",
dentry, atomic_read(&dentry->d_count), *base, len, path);
return path;
}
}
len = sizeof(*head) +
- pathlen1 + pathlen2 + 2*(sizeof(u32) + sizeof(u64));
+ pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64));
/* calculate (max) length for cap releases */
len += sizeof(struct ceph_mds_request_release) *
if (req->r_old_dentry_drop)
len += req->r_old_dentry->d_name.len;
- msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, 0, 0, NULL);
- if (IS_ERR(msg))
+ msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len);
+ if (!msg) {
+ msg = ERR_PTR(-ENOMEM);
goto out_free2;
+ }
msg->hdr.tid = cpu_to_le64(req->r_tid);
}
msg = create_request_message(mdsc, req, mds);
if (IS_ERR(msg)) {
- req->r_reply = ERR_PTR(PTR_ERR(msg));
+ req->r_err = PTR_ERR(msg);
complete_request(mdsc, req);
- return -PTR_ERR(msg);
+ return PTR_ERR(msg);
}
req->r_request = msg;
int mds = -1;
int err = -EAGAIN;
- if (req->r_reply)
+ if (req->r_err || req->r_got_result)
goto out;
if (req->r_timeout &&
/* get, open session */
session = __ceph_lookup_mds_session(mdsc, mds);
- if (!session)
+ if (!session) {
session = register_session(mdsc, mds);
+ if (IS_ERR(session)) {
+ err = PTR_ERR(session);
+ goto finish;
+ }
+ }
dout("do_request mds%d session %p state %s\n", mds, session,
session_state_name(session->s_state));
if (session->s_state != CEPH_MDS_SESSION_OPEN &&
return err;
finish:
- req->r_reply = ERR_PTR(err);
+ req->r_err = err;
complete_request(mdsc, req);
goto out;
}
*/
static void kick_requests(struct ceph_mds_client *mdsc, int mds, int all)
{
- struct ceph_mds_request *reqs[10];
- u64 nexttid = 0;
- int i, got;
+ struct ceph_mds_request *req;
+ struct rb_node *p;
dout("kick_requests mds%d\n", mds);
- while (nexttid <= mdsc->last_tid) {
- got = radix_tree_gang_lookup(&mdsc->request_tree,
- (void **)&reqs, nexttid, 10);
- if (got == 0)
- break;
- nexttid = reqs[got-1]->r_tid + 1;
- for (i = 0; i < got; i++) {
- if (reqs[i]->r_got_unsafe)
- continue;
- if (reqs[i]->r_session &&
- reqs[i]->r_session->s_mds == mds) {
- dout(" kicking tid %llu\n", reqs[i]->r_tid);
- put_request_session(reqs[i]);
- __do_request(mdsc, reqs[i]);
- }
+ for (p = rb_first(&mdsc->request_tree); p; p = rb_next(p)) {
+ req = rb_entry(p, struct ceph_mds_request, r_node);
+ if (req->r_got_unsafe)
+ continue;
+ if (req->r_session &&
+ req->r_session->s_mds == mds) {
+ dout(" kicking tid %llu\n", req->r_tid);
+ put_request_session(req);
+ __do_request(mdsc, req);
}
}
}
__register_request(mdsc, req, dir);
__do_request(mdsc, req);
- /* wait */
- if (!req->r_reply) {
- mutex_unlock(&mdsc->mutex);
- if (req->r_timeout) {
- err = (long)wait_for_completion_interruptible_timeout(
- &req->r_completion, req->r_timeout);
- if (err == 0)
- req->r_reply = ERR_PTR(-EIO);
- else if (err < 0)
- req->r_reply = ERR_PTR(err);
- } else {
- err = wait_for_completion_interruptible(
- &req->r_completion);
- if (err)
- req->r_reply = ERR_PTR(err);
- }
- mutex_lock(&mdsc->mutex);
+ if (req->r_err) {
+ err = req->r_err;
+ __unregister_request(mdsc, req);
+ dout("do_request early error %d\n", err);
+ goto out;
}
- if (IS_ERR(req->r_reply)) {
- err = PTR_ERR(req->r_reply);
- req->r_reply = NULL;
+ /* wait */
+ mutex_unlock(&mdsc->mutex);
+ dout("do_request waiting\n");
+ if (req->r_timeout) {
+ err = (long)wait_for_completion_interruptible_timeout(
+ &req->r_completion, req->r_timeout);
+ if (err == 0)
+ err = -EIO;
+ } else {
+ err = wait_for_completion_interruptible(&req->r_completion);
+ }
+ dout("do_request waited, got %d\n", err);
+ mutex_lock(&mdsc->mutex);
- if (err == -ERESTARTSYS) {
- /* aborted */
- req->r_aborted = true;
+ /* only abort if we didn't race with a real reply */
+ if (req->r_got_result) {
+ err = le32_to_cpu(req->r_reply_info.head->result);
+ } else if (err < 0) {
+ dout("aborted request %lld with %d\n", req->r_tid, err);
- if (req->r_locked_dir &&
- (req->r_op & CEPH_MDS_OP_WRITE)) {
- struct ceph_inode_info *ci =
- ceph_inode(req->r_locked_dir);
+ /*
+ * ensure we aren't running concurrently with
+ * ceph_fill_trace or ceph_readdir_prepopulate, which
+ * rely on locks (dir mutex) held by our caller.
+ */
+ mutex_lock(&req->r_fill_mutex);
+ req->r_err = err;
+ req->r_aborted = true;
+ mutex_unlock(&req->r_fill_mutex);
- dout("aborted, clearing I_COMPLETE on %p\n",
- req->r_locked_dir);
- spin_lock(&req->r_locked_dir->i_lock);
- ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
- ci->i_release_count++;
- spin_unlock(&req->r_locked_dir->i_lock);
- }
- } else {
- /* clean up this request */
- __unregister_request(mdsc, req);
- if (!list_empty(&req->r_unsafe_item))
- list_del_init(&req->r_unsafe_item);
- complete(&req->r_safe_completion);
+ if (req->r_locked_dir &&
+ (req->r_op & CEPH_MDS_OP_WRITE)) {
+ struct ceph_inode_info *ci =
+ ceph_inode(req->r_locked_dir);
+
+ dout("aborted, clearing I_COMPLETE on %p, leases\n",
+ req->r_locked_dir);
+ spin_lock(&req->r_locked_dir->i_lock);
+ ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
+ ci->i_release_count++;
+ spin_unlock(&req->r_locked_dir->i_lock);
+
+ if (req->r_dentry)
+ ceph_invalidate_dentry_lease(req->r_dentry);
+ if (req->r_old_dentry)
+ ceph_invalidate_dentry_lease(req->r_old_dentry);
}
- } else if (req->r_err) {
- err = req->r_err;
} else {
- err = le32_to_cpu(req->r_reply_info.head->result);
+ err = req->r_err;
}
- mutex_unlock(&mdsc->mutex);
+out:
+ mutex_unlock(&mdsc->mutex);
dout("do_request %p done, result %d\n", req, err);
return err;
}
struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */
u64 tid;
int err, result;
- int mds;
+ int mds = session->s_mds;
- if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
- return;
if (msg->front.iov_len < sizeof(*head)) {
pr_err("mdsc_handle_reply got corrupt (short) reply\n");
ceph_msg_dump(msg);
return;
}
dout("handle_reply %p\n", req);
- mds = le64_to_cpu(msg->hdr.src.name.num);
/* correct session? */
- if (!req->r_session && req->r_session != session) {
+ if (req->r_session != session) {
pr_err("mdsc_handle_reply got %llu on session mds%d"
" not mds%d\n", tid, session->s_mds,
req->r_session ? req->r_session->s_mds : -1);
list_del_init(&req->r_unsafe_item);
/* last unsafe request during umount? */
- if (mdsc->stopping && !__get_oldest_tid(mdsc))
+ if (mdsc->stopping && !__get_oldest_req(mdsc))
complete(&mdsc->safe_umount_waiters);
mutex_unlock(&mdsc->mutex);
goto out;
}
- }
-
- BUG_ON(req->r_reply);
-
- if (!head->safe) {
+ } else {
req->r_got_unsafe = true;
list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
}
}
/* insert trace into our cache */
+ mutex_lock(&req->r_fill_mutex);
err = ceph_fill_trace(mdsc->client->sb, req, req->r_session);
if (err == 0) {
if (result == 0 && rinfo->dir_nr)
ceph_readdir_prepopulate(req, req->r_session);
ceph_unreserve_caps(&req->r_caps_reservation);
}
+ mutex_unlock(&req->r_fill_mutex);
up_read(&mdsc->snap_rwsem);
out_err:
- if (err) {
- req->r_err = err;
+ mutex_lock(&mdsc->mutex);
+ if (!req->r_aborted) {
+ if (err) {
+ req->r_err = err;
+ } else {
+ req->r_reply = msg;
+ ceph_msg_get(msg);
+ req->r_got_result = true;
+ }
} else {
- req->r_reply = msg;
- ceph_msg_get(msg);
+ dout("reply arrived after request %lld was aborted\n", tid);
}
+ mutex_unlock(&mdsc->mutex);
add_cap_releases(mdsc, req->r_session, -1);
mutex_unlock(&session->s_mutex);
/*
* handle mds notification that our request has been forwarded.
*/
-static void handle_forward(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
+static void handle_forward(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session,
+ struct ceph_msg *msg)
{
struct ceph_mds_request *req;
- u64 tid;
+ u64 tid = le64_to_cpu(msg->hdr.tid);
u32 next_mds;
u32 fwd_seq;
- u8 must_resend;
int err = -EINVAL;
void *p = msg->front.iov_base;
void *end = p + msg->front.iov_len;
- int from_mds, state;
-
- if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
- goto bad;
- from_mds = le64_to_cpu(msg->hdr.src.name.num);
- ceph_decode_need(&p, end, sizeof(u64)+2*sizeof(u32), bad);
- tid = ceph_decode_64(&p);
+ ceph_decode_need(&p, end, 2*sizeof(u32), bad);
next_mds = ceph_decode_32(&p);
fwd_seq = ceph_decode_32(&p);
- must_resend = ceph_decode_8(&p);
-
- WARN_ON(must_resend); /* shouldn't happen. */
mutex_lock(&mdsc->mutex);
req = __lookup_request(mdsc, tid);
if (!req) {
- dout("forward %llu dne\n", tid);
+ dout("forward %llu to mds%d - req dne\n", tid, next_mds);
goto out; /* dup reply? */
}
- state = mdsc->sessions[next_mds]->s_state;
if (fwd_seq <= req->r_num_fwd) {
dout("forward %llu to mds%d - old seq %d <= %d\n",
tid, next_mds, req->r_num_fwd, fwd_seq);
struct ceph_mds_client *mdsc = session->s_mdsc;
u32 op;
u64 seq;
- int mds;
+ int mds = session->s_mds;
struct ceph_mds_session_head *h = msg->front.iov_base;
int wake = 0;
- if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
- return;
- mds = le64_to_cpu(msg->hdr.src.name.num);
-
/* decode */
if (msg->front.iov_len != sizeof(*h))
goto bad;
seq = le64_to_cpu(h->seq);
mutex_lock(&mdsc->mutex);
+ if (op == CEPH_SESSION_CLOSE)
+ __unregister_session(mdsc, session);
/* FIXME: this ttl calculation is generous */
session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
mutex_unlock(&mdsc->mutex);
break;
case CEPH_SESSION_CLOSE:
- unregister_session(mdsc, session);
remove_session_caps(session);
wake = 1; /* for good measure */
complete(&mdsc->session_close_waiters);
{
struct ceph_mds_session *session = NULL;
struct ceph_msg *reply;
- int err;
- int got;
- u64 next_snap_ino = 0;
+ struct rb_node *p;
+ int err = -ENOMEM;
struct ceph_pagelist *pagelist;
pr_info("reconnect to recovering mds%d\n", mds);
goto fail_nopagelist;
ceph_pagelist_init(pagelist);
- reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, 0, 0, NULL);
- if (IS_ERR(reply)) {
- err = PTR_ERR(reply);
+ err = -ENOMEM;
+ reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0);
+ if (!reply)
goto fail_nomsg;
- }
/* find session */
session = __ceph_lookup_mds_session(mdsc, mds);
goto fail;
err = iterate_session_caps(session, encode_caps_cb, pagelist);
if (err < 0)
- goto out;
+ goto fail;
/*
* snaprealms. we provide mds with the ino, seq (version), and
* parent for all of our realms. If the mds has any newer info,
* it will tell us.
*/
- next_snap_ino = 0;
- while (1) {
- struct ceph_snap_realm *realm;
+ for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
+ struct ceph_snap_realm *realm =
+ rb_entry(p, struct ceph_snap_realm, node);
struct ceph_mds_snaprealm_reconnect sr_rec;
- got = radix_tree_gang_lookup(&mdsc->snap_realms,
- (void **)&realm, next_snap_ino, 1);
- if (!got)
- break;
dout(" adding snap realm %llx seq %lld parent %llx\n",
realm->ino, realm->seq, realm->parent_ino);
err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
if (err)
goto fail;
- next_snap_ino = realm->ino + 1;
}
send:
reply->nr_pages = calc_pages_for(0, pagelist->length);
ceph_con_send(&session->s_con, reply);
- if (session) {
- session->s_state = CEPH_MDS_SESSION_OPEN;
- __wake_requests(mdsc, &session->s_waiting);
- }
+ session->s_state = CEPH_MDS_SESSION_OPEN;
+ mutex_unlock(&session->s_mutex);
+
+ mutex_lock(&mdsc->mutex);
+ __wake_requests(mdsc, &session->s_waiting);
+ mutex_unlock(&mdsc->mutex);
+
+ ceph_put_mds_session(session);
-out:
up_read(&mdsc->snap_rwsem);
- if (session) {
- mutex_unlock(&session->s_mutex);
- ceph_put_mds_session(session);
- }
mutex_lock(&mdsc->mutex);
return;
fail:
ceph_msg_put(reply);
+ up_read(&mdsc->snap_rwsem);
+ mutex_unlock(&session->s_mutex);
+ ceph_put_mds_session(session);
fail_nomsg:
ceph_pagelist_release(pagelist);
kfree(pagelist);
fail_nopagelist:
- pr_err("ENOMEM preparing reconnect for mds%d\n", mds);
- goto out;
+ pr_err("error %d preparing reconnect for mds%d\n", err, mds);
+ mutex_lock(&mdsc->mutex);
+ return;
}
/* the session never opened, just close it
* out now */
__wake_requests(mdsc, &s->s_waiting);
- unregister_session(mdsc, s);
+ __unregister_session(mdsc, s);
} else {
/* just close it */
mutex_unlock(&mdsc->mutex);
di->lease_session = NULL;
}
-static void handle_lease(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
+static void handle_lease(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session,
+ struct ceph_msg *msg)
{
struct super_block *sb = mdsc->client->sb;
struct inode *inode;
- struct ceph_mds_session *session;
struct ceph_inode_info *ci;
struct dentry *parent, *dentry;
struct ceph_dentry_info *di;
- int mds;
+ int mds = session->s_mds;
struct ceph_mds_lease *h = msg->front.iov_base;
struct ceph_vino vino;
int mask;
struct qstr dname;
int release = 0;
- if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
- return;
- mds = le64_to_cpu(msg->hdr.src.name.num);
dout("handle_lease from mds%d\n", mds);
/* decode */
if (dname.len != get_unaligned_le32(h+1))
goto bad;
- /* find session */
- mutex_lock(&mdsc->mutex);
- session = __ceph_lookup_mds_session(mdsc, mds);
- mutex_unlock(&mdsc->mutex);
- if (!session) {
- pr_err("handle_lease got lease but no session mds%d\n", mds);
- return;
- }
-
mutex_lock(&session->s_mutex);
session->s_seq++;
out:
iput(inode);
mutex_unlock(&session->s_mutex);
- ceph_put_mds_session(session);
return;
bad:
dnamelen = dentry->d_name.len;
len += dnamelen;
- msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, 0, 0, NULL);
- if (IS_ERR(msg))
+ msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len);
+ if (!msg)
return;
lease = msg->front.iov_base;
lease->action = action;
mdsc->client = client;
mutex_init(&mdsc->mutex);
mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
+ if (mdsc->mdsmap == NULL)
+ return -ENOMEM;
+
init_completion(&mdsc->safe_umount_waiters);
init_completion(&mdsc->session_close_waiters);
INIT_LIST_HEAD(&mdsc->waiting_for_map);
mdsc->max_sessions = 0;
mdsc->stopping = 0;
init_rwsem(&mdsc->snap_rwsem);
- INIT_RADIX_TREE(&mdsc->snap_realms, GFP_NOFS);
+ mdsc->snap_realms = RB_ROOT;
INIT_LIST_HEAD(&mdsc->snap_empty);
spin_lock_init(&mdsc->snap_empty_lock);
mdsc->last_tid = 0;
- INIT_RADIX_TREE(&mdsc->request_tree, GFP_NOFS);
+ mdsc->request_tree = RB_ROOT;
INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
mdsc->last_renew_caps = jiffies;
INIT_LIST_HEAD(&mdsc->cap_delay_list);
init_waitqueue_head(&mdsc->cap_flushing_wq);
spin_lock_init(&mdsc->dentry_lru_lock);
INIT_LIST_HEAD(&mdsc->dentry_lru);
+
return 0;
}
struct ceph_client *client = mdsc->client;
mutex_lock(&mdsc->mutex);
- if (__get_oldest_tid(mdsc)) {
+ if (__get_oldest_req(mdsc)) {
mutex_unlock(&mdsc->mutex);
+
dout("wait_requests waiting for requests\n");
wait_for_completion_timeout(&mdsc->safe_umount_waiters,
client->mount_args->mount_timeout * HZ);
- mutex_lock(&mdsc->mutex);
/* tear down remaining requests */
- while (radix_tree_gang_lookup(&mdsc->request_tree,
- (void **)&req, 0, 1)) {
+ mutex_lock(&mdsc->mutex);
+ while ((req = __get_oldest_req(mdsc))) {
dout("wait_requests timed out on tid %llu\n",
req->r_tid);
- radix_tree_delete(&mdsc->request_tree, req->r_tid);
- ceph_mdsc_put_request(req);
+ __unregister_request(mdsc, req);
}
}
mutex_unlock(&mdsc->mutex);
*/
static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
{
- struct ceph_mds_request *req;
- u64 next_tid = 0;
- int got;
+ struct ceph_mds_request *req = NULL, *nextreq;
+ struct rb_node *n;
mutex_lock(&mdsc->mutex);
dout("wait_unsafe_requests want %lld\n", want_tid);
- while (1) {
- got = radix_tree_gang_lookup(&mdsc->request_tree, (void **)&req,
- next_tid, 1);
- if (!got)
- break;
- if (req->r_tid > want_tid)
- break;
-
- next_tid = req->r_tid + 1;
- if ((req->r_op & CEPH_MDS_OP_WRITE) == 0)
- continue; /* not a write op */
-
- ceph_mdsc_get_request(req);
- mutex_unlock(&mdsc->mutex);
- dout("wait_unsafe_requests wait on %llu (want %llu)\n",
- req->r_tid, want_tid);
- wait_for_completion(&req->r_safe_completion);
- mutex_lock(&mdsc->mutex);
- ceph_mdsc_put_request(req);
+restart:
+ req = __get_oldest_req(mdsc);
+ while (req && req->r_tid <= want_tid) {
+ /* find next request */
+ n = rb_next(&req->r_node);
+ if (n)
+ nextreq = rb_entry(n, struct ceph_mds_request, r_node);
+ else
+ nextreq = NULL;
+ if ((req->r_op & CEPH_MDS_OP_WRITE)) {
+ /* write op */
+ ceph_mdsc_get_request(req);
+ if (nextreq)
+ ceph_mdsc_get_request(nextreq);
+ mutex_unlock(&mdsc->mutex);
+ dout("wait_unsafe_requests wait on %llu (want %llu)\n",
+ req->r_tid, want_tid);
+ wait_for_completion(&req->r_safe_completion);
+ mutex_lock(&mdsc->mutex);
+ ceph_mdsc_put_request(req);
+ if (!nextreq)
+ break; /* next dne before, so we're done! */
+ if (RB_EMPTY_NODE(&nextreq->r_node)) {
+ /* next request was removed from tree */
+ ceph_mdsc_put_request(nextreq);
+ goto restart;
+ }
+ ceph_mdsc_put_request(nextreq); /* won't go away */
+ }
+ req = nextreq;
}
mutex_unlock(&mdsc->mutex);
dout("wait_unsafe_requests done\n");
{
u64 want_tid, want_flush;
+ if (mdsc->client->mount_state == CEPH_MOUNT_SHUTDOWN)
+ return;
+
dout("sync\n");
mutex_lock(&mdsc->mutex);
want_tid = mdsc->last_tid;
for (i = 0; i < mdsc->max_sessions; i++) {
if (mdsc->sessions[i]) {
session = get_session(mdsc->sessions[i]);
- unregister_session(mdsc, session);
+ __unregister_session(mdsc, session);
mutex_unlock(&mdsc->mutex);
mutex_lock(&session->s_mutex);
remove_session_caps(session);
struct ceph_mds_session *s = con->private;
if (get_session(s)) {
- dout("mdsc con_get %p %d -> %d\n", s,
- atomic_read(&s->s_ref) - 1, atomic_read(&s->s_ref));
+ dout("mdsc con_get %p ok (%d)\n", s, atomic_read(&s->s_ref));
return con;
}
dout("mdsc con_get %p FAIL\n", s);
{
struct ceph_mds_session *s = con->private;
- dout("mdsc con_put %p %d -> %d\n", s, atomic_read(&s->s_ref),
- atomic_read(&s->s_ref) - 1);
ceph_put_mds_session(s);
+ dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref));
}
/*
struct ceph_mds_client *mdsc = s->s_mdsc;
int type = le16_to_cpu(msg->hdr.type);
+ mutex_lock(&mdsc->mutex);
+ if (__verify_registered_session(mdsc, s) < 0) {
+ mutex_unlock(&mdsc->mutex);
+ goto out;
+ }
+ mutex_unlock(&mdsc->mutex);
+
switch (type) {
case CEPH_MSG_MDS_MAP:
ceph_mdsc_handle_map(mdsc, msg);
handle_reply(s, msg);
break;
case CEPH_MSG_CLIENT_REQUEST_FORWARD:
- handle_forward(mdsc, msg);
+ handle_forward(mdsc, s, msg);
break;
case CEPH_MSG_CLIENT_CAPS:
ceph_handle_caps(s, msg);
break;
case CEPH_MSG_CLIENT_SNAP:
- ceph_handle_snap(mdsc, msg);
+ ceph_handle_snap(mdsc, s, msg);
break;
case CEPH_MSG_CLIENT_LEASE:
- handle_lease(mdsc, msg);
+ handle_lease(mdsc, s, msg);
break;
default:
pr_err("received unknown message type %d %s\n", type,
ceph_msg_type_name(type));
}
+out:
ceph_msg_put(msg);
}
return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len);
}
+static int invalidate_authorizer(struct ceph_connection *con)
+{
+ struct ceph_mds_session *s = con->private;
+ struct ceph_mds_client *mdsc = s->s_mdsc;
+ struct ceph_auth_client *ac = mdsc->client->monc.auth;
+
+ if (ac->ops->invalidate_authorizer)
+ ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
+
+ return ceph_monc_validate_auth(&mdsc->client->monc);
+}
+
const static struct ceph_connection_operations mds_con_ops = {
.get = con_get,
.put = con_put,
.dispatch = dispatch,
.get_authorizer = get_authorizer,
.verify_authorizer_reply = verify_authorizer_reply,
+ .invalidate_authorizer = invalidate_authorizer,
.peer_reset = peer_reset,
- .alloc_msg = ceph_alloc_msg,
- .alloc_middle = ceph_alloc_middle,
};