ocfs2_dlmfs: Use the stackglue.

Rather than directly using o2dlm, dlmfs can now use the stackglue.  This
allows it to use userspace cluster stacks and fs/dlm.  This commit
forces o2cb for now.  A latter commit will bump the protocol version and
allow non-o2cb stacks.

This is one big sed, really.  LKM_xxMODE becomes DLM_LOCK_xx.  LKM_flag
becomes DLM_LKF_flag.

We also learn to check that the LVB is valid before reading it.  Any DLM
can lose the contents of the LVB during a complicated recovery.  userdlm
should be checking this.  Now it does.  dlmfs will return 0 from read(2)
if the LVB was invalid.

Signed-off-by: Joel Becker <joel.becker@oracle.com>
This commit is contained in:
Joel Becker 2010-01-30 04:33:50 -08:00
parent e8fce482f3
commit 0016eedc41
3 changed files with 170 additions and 177 deletions

View file

@ -47,21 +47,13 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include "stackglue.h"
#include "cluster/nodemanager.h"
#include "cluster/heartbeat.h"
#include "cluster/tcp.h"
#include "dlm/dlmapi.h"
#include "userdlm.h" #include "userdlm.h"
#include "dlmfsver.h" #include "dlmfsver.h"
#define MLOG_MASK_PREFIX ML_DLMFS #define MLOG_MASK_PREFIX ML_DLMFS
#include "cluster/masklog.h" #include "cluster/masklog.h"
#include "ocfs2_lockingver.h"
static const struct super_operations dlmfs_ops; static const struct super_operations dlmfs_ops;
static const struct file_operations dlmfs_file_operations; static const struct file_operations dlmfs_file_operations;
@ -72,15 +64,6 @@ static struct kmem_cache *dlmfs_inode_cache;
struct workqueue_struct *user_dlm_worker; struct workqueue_struct *user_dlm_worker;
/*
* This is the userdlmfs locking protocol version.
*
* See fs/ocfs2/dlmglue.c for more details on locking versions.
*/
static const struct dlm_protocol_version user_locking_protocol = {
.pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
.pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
};
/* /*
@ -259,7 +242,7 @@ static ssize_t dlmfs_file_read(struct file *filp,
loff_t *ppos) loff_t *ppos)
{ {
int bytes_left; int bytes_left;
ssize_t readlen; ssize_t readlen, got;
char *lvb_buf; char *lvb_buf;
struct inode *inode = filp->f_path.dentry->d_inode; struct inode *inode = filp->f_path.dentry->d_inode;
@ -285,9 +268,13 @@ static ssize_t dlmfs_file_read(struct file *filp,
if (!lvb_buf) if (!lvb_buf)
return -ENOMEM; return -ENOMEM;
user_dlm_read_lvb(inode, lvb_buf, readlen); got = user_dlm_read_lvb(inode, lvb_buf, readlen);
bytes_left = __copy_to_user(buf, lvb_buf, readlen); if (got) {
readlen -= bytes_left; BUG_ON(got != readlen);
bytes_left = __copy_to_user(buf, lvb_buf, readlen);
readlen -= bytes_left;
} else
readlen = 0;
kfree(lvb_buf); kfree(lvb_buf);
@ -346,7 +333,7 @@ static void dlmfs_init_once(void *foo)
struct dlmfs_inode_private *ip = struct dlmfs_inode_private *ip =
(struct dlmfs_inode_private *) foo; (struct dlmfs_inode_private *) foo;
ip->ip_dlm = NULL; ip->ip_conn = NULL;
ip->ip_parent = NULL; ip->ip_parent = NULL;
inode_init_once(&ip->ip_vfs_inode); inode_init_once(&ip->ip_vfs_inode);
@ -388,14 +375,14 @@ static void dlmfs_clear_inode(struct inode *inode)
goto clear_fields; goto clear_fields;
} }
mlog(0, "we're a directory, ip->ip_dlm = 0x%p\n", ip->ip_dlm); mlog(0, "we're a directory, ip->ip_conn = 0x%p\n", ip->ip_conn);
/* we must be a directory. If required, lets unregister the /* we must be a directory. If required, lets unregister the
* dlm context now. */ * dlm context now. */
if (ip->ip_dlm) if (ip->ip_conn)
user_dlm_unregister_context(ip->ip_dlm); user_dlm_unregister(ip->ip_conn);
clear_fields: clear_fields:
ip->ip_parent = NULL; ip->ip_parent = NULL;
ip->ip_dlm = NULL; ip->ip_conn = NULL;
} }
static struct backing_dev_info dlmfs_backing_dev_info = { static struct backing_dev_info dlmfs_backing_dev_info = {
@ -445,7 +432,7 @@ static struct inode *dlmfs_get_inode(struct inode *parent,
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
ip = DLMFS_I(inode); ip = DLMFS_I(inode);
ip->ip_dlm = DLMFS_I(parent)->ip_dlm; ip->ip_conn = DLMFS_I(parent)->ip_conn;
switch (mode & S_IFMT) { switch (mode & S_IFMT) {
default: default:
@ -499,13 +486,12 @@ static int dlmfs_mkdir(struct inode * dir,
struct inode *inode = NULL; struct inode *inode = NULL;
struct qstr *domain = &dentry->d_name; struct qstr *domain = &dentry->d_name;
struct dlmfs_inode_private *ip; struct dlmfs_inode_private *ip;
struct dlm_ctxt *dlm; struct ocfs2_cluster_connection *conn;
struct dlm_protocol_version proto = user_locking_protocol;
mlog(0, "mkdir %.*s\n", domain->len, domain->name); mlog(0, "mkdir %.*s\n", domain->len, domain->name);
/* verify that we have a proper domain */ /* verify that we have a proper domain */
if (domain->len >= O2NM_MAX_NAME_LEN) { if (domain->len >= GROUP_NAME_MAX) {
status = -EINVAL; status = -EINVAL;
mlog(ML_ERROR, "invalid domain name for directory.\n"); mlog(ML_ERROR, "invalid domain name for directory.\n");
goto bail; goto bail;
@ -520,14 +506,14 @@ static int dlmfs_mkdir(struct inode * dir,
ip = DLMFS_I(inode); ip = DLMFS_I(inode);
dlm = user_dlm_register_context(domain, &proto); conn = user_dlm_register(domain);
if (IS_ERR(dlm)) { if (IS_ERR(conn)) {
status = PTR_ERR(dlm); status = PTR_ERR(conn);
mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n", mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n",
status, domain->len, domain->name); status, domain->len, domain->name);
goto bail; goto bail;
} }
ip->ip_dlm = dlm; ip->ip_conn = conn;
inc_nlink(dir); inc_nlink(dir);
d_instantiate(dentry, inode); d_instantiate(dentry, inode);
@ -696,6 +682,7 @@ static int __init init_dlmfs_fs(void)
} }
cleanup_worker = 1; cleanup_worker = 1;
user_dlm_set_locking_protocol();
status = register_filesystem(&dlmfs_fs_type); status = register_filesystem(&dlmfs_fs_type);
bail: bail:
if (status) { if (status) {

View file

@ -34,18 +34,19 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/crc32.h> #include <linux/crc32.h>
#include "ocfs2_lockingver.h"
#include "cluster/nodemanager.h" #include "stackglue.h"
#include "cluster/heartbeat.h"
#include "cluster/tcp.h"
#include "dlm/dlmapi.h"
#include "userdlm.h" #include "userdlm.h"
#define MLOG_MASK_PREFIX ML_DLMFS #define MLOG_MASK_PREFIX ML_DLMFS
#include "cluster/masklog.h" #include "cluster/masklog.h"
static inline struct user_lock_res *user_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
{
return container_of(lksb, struct user_lock_res, l_lksb);
}
static inline int user_check_wait_flag(struct user_lock_res *lockres, static inline int user_check_wait_flag(struct user_lock_res *lockres,
int flag) int flag)
{ {
@ -73,15 +74,15 @@ static inline void user_wait_on_blocked_lock(struct user_lock_res *lockres)
} }
/* I heart container_of... */ /* I heart container_of... */
static inline struct dlm_ctxt * static inline struct ocfs2_cluster_connection *
dlm_ctxt_from_user_lockres(struct user_lock_res *lockres) cluster_connection_from_user_lockres(struct user_lock_res *lockres)
{ {
struct dlmfs_inode_private *ip; struct dlmfs_inode_private *ip;
ip = container_of(lockres, ip = container_of(lockres,
struct dlmfs_inode_private, struct dlmfs_inode_private,
ip_lockres); ip_lockres);
return ip->ip_dlm; return ip->ip_conn;
} }
static struct inode * static struct inode *
@ -103,9 +104,9 @@ static inline void user_recover_from_dlm_error(struct user_lock_res *lockres)
} }
#define user_log_dlm_error(_func, _stat, _lockres) do { \ #define user_log_dlm_error(_func, _stat, _lockres) do { \
mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on " \ mlog(ML_ERROR, "Dlm error %d while calling %s on " \
"resource %.*s: %s\n", dlm_errname(_stat), _func, \ "resource %.*s\n", _stat, _func, \
_lockres->l_namelen, _lockres->l_name, dlm_errmsg(_stat)); \ _lockres->l_namelen, _lockres->l_name); \
} while (0) } while (0)
/* WARNING: This function lives in a world where the only three lock /* WARNING: This function lives in a world where the only three lock
@ -113,34 +114,34 @@ static inline void user_recover_from_dlm_error(struct user_lock_res *lockres)
* lock types are added. */ * lock types are added. */
static inline int user_highest_compat_lock_level(int level) static inline int user_highest_compat_lock_level(int level)
{ {
int new_level = LKM_EXMODE; int new_level = DLM_LOCK_EX;
if (level == LKM_EXMODE) if (level == DLM_LOCK_EX)
new_level = LKM_NLMODE; new_level = DLM_LOCK_NL;
else if (level == LKM_PRMODE) else if (level == DLM_LOCK_PR)
new_level = LKM_PRMODE; new_level = DLM_LOCK_PR;
return new_level; return new_level;
} }
static void user_ast(void *opaque) static void user_ast(struct ocfs2_dlm_lksb *lksb)
{ {
struct user_lock_res *lockres = opaque; struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
struct dlm_lockstatus *lksb; int status;
mlog(0, "AST fired for lockres %.*s\n", lockres->l_namelen, mlog(0, "AST fired for lockres %.*s\n", lockres->l_namelen,
lockres->l_name); lockres->l_name);
spin_lock(&lockres->l_lock); spin_lock(&lockres->l_lock);
lksb = &(lockres->l_lksb); status = ocfs2_dlm_lock_status(&lockres->l_lksb);
if (lksb->status != DLM_NORMAL) { if (status) {
mlog(ML_ERROR, "lksb status value of %u on lockres %.*s\n", mlog(ML_ERROR, "lksb status value of %u on lockres %.*s\n",
lksb->status, lockres->l_namelen, lockres->l_name); status, lockres->l_namelen, lockres->l_name);
spin_unlock(&lockres->l_lock); spin_unlock(&lockres->l_lock);
return; return;
} }
mlog_bug_on_msg(lockres->l_requested == LKM_IVMODE, mlog_bug_on_msg(lockres->l_requested == DLM_LOCK_IV,
"Lockres %.*s, requested ivmode. flags 0x%x\n", "Lockres %.*s, requested ivmode. flags 0x%x\n",
lockres->l_namelen, lockres->l_name, lockres->l_flags); lockres->l_namelen, lockres->l_name, lockres->l_flags);
@ -148,13 +149,13 @@ static void user_ast(void *opaque)
if (lockres->l_requested < lockres->l_level) { if (lockres->l_requested < lockres->l_level) {
if (lockres->l_requested <= if (lockres->l_requested <=
user_highest_compat_lock_level(lockres->l_blocking)) { user_highest_compat_lock_level(lockres->l_blocking)) {
lockres->l_blocking = LKM_NLMODE; lockres->l_blocking = DLM_LOCK_NL;
lockres->l_flags &= ~USER_LOCK_BLOCKED; lockres->l_flags &= ~USER_LOCK_BLOCKED;
} }
} }
lockres->l_level = lockres->l_requested; lockres->l_level = lockres->l_requested;
lockres->l_requested = LKM_IVMODE; lockres->l_requested = DLM_LOCK_IV;
lockres->l_flags |= USER_LOCK_ATTACHED; lockres->l_flags |= USER_LOCK_ATTACHED;
lockres->l_flags &= ~USER_LOCK_BUSY; lockres->l_flags &= ~USER_LOCK_BUSY;
@ -193,11 +194,11 @@ static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres)
return; return;
switch (lockres->l_blocking) { switch (lockres->l_blocking) {
case LKM_EXMODE: case DLM_LOCK_EX:
if (!lockres->l_ex_holders && !lockres->l_ro_holders) if (!lockres->l_ex_holders && !lockres->l_ro_holders)
queue = 1; queue = 1;
break; break;
case LKM_PRMODE: case DLM_LOCK_PR:
if (!lockres->l_ex_holders) if (!lockres->l_ex_holders)
queue = 1; queue = 1;
break; break;
@ -209,9 +210,9 @@ static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres)
__user_dlm_queue_lockres(lockres); __user_dlm_queue_lockres(lockres);
} }
static void user_bast(void *opaque, int level) static void user_bast(struct ocfs2_dlm_lksb *lksb, int level)
{ {
struct user_lock_res *lockres = opaque; struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
mlog(0, "Blocking AST fired for lockres %.*s. Blocking level %d\n", mlog(0, "Blocking AST fired for lockres %.*s. Blocking level %d\n",
lockres->l_namelen, lockres->l_name, level); lockres->l_namelen, lockres->l_name, level);
@ -227,15 +228,15 @@ static void user_bast(void *opaque, int level)
wake_up(&lockres->l_event); wake_up(&lockres->l_event);
} }
static void user_unlock_ast(void *opaque, enum dlm_status status) static void user_unlock_ast(struct ocfs2_dlm_lksb *lksb, int status)
{ {
struct user_lock_res *lockres = opaque; struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
mlog(0, "UNLOCK AST called on lock %.*s\n", lockres->l_namelen, mlog(0, "UNLOCK AST called on lock %.*s\n", lockres->l_namelen,
lockres->l_name); lockres->l_name);
if (status != DLM_NORMAL && status != DLM_CANCELGRANT) if (status)
mlog(ML_ERROR, "Dlm returns status %d\n", status); mlog(ML_ERROR, "dlm returns status %d\n", status);
spin_lock(&lockres->l_lock); spin_lock(&lockres->l_lock);
/* The teardown flag gets set early during the unlock process, /* The teardown flag gets set early during the unlock process,
@ -243,7 +244,7 @@ static void user_unlock_ast(void *opaque, enum dlm_status status)
* for a concurrent cancel. */ * for a concurrent cancel. */
if (lockres->l_flags & USER_LOCK_IN_TEARDOWN if (lockres->l_flags & USER_LOCK_IN_TEARDOWN
&& !(lockres->l_flags & USER_LOCK_IN_CANCEL)) { && !(lockres->l_flags & USER_LOCK_IN_CANCEL)) {
lockres->l_level = LKM_IVMODE; lockres->l_level = DLM_LOCK_IV;
} else if (status == DLM_CANCELGRANT) { } else if (status == DLM_CANCELGRANT) {
/* We tried to cancel a convert request, but it was /* We tried to cancel a convert request, but it was
* already granted. Don't clear the busy flag - the * already granted. Don't clear the busy flag - the
@ -254,7 +255,7 @@ static void user_unlock_ast(void *opaque, enum dlm_status status)
} else { } else {
BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL)); BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL));
/* Cancel succeeded, we want to re-queue */ /* Cancel succeeded, we want to re-queue */
lockres->l_requested = LKM_IVMODE; /* cancel an lockres->l_requested = DLM_LOCK_IV; /* cancel an
* upconvert * upconvert
* request. */ * request. */
lockres->l_flags &= ~USER_LOCK_IN_CANCEL; lockres->l_flags &= ~USER_LOCK_IN_CANCEL;
@ -271,6 +272,21 @@ out_noclear:
wake_up(&lockres->l_event); wake_up(&lockres->l_event);
} }
/*
* This is the userdlmfs locking protocol version.
*
* See fs/ocfs2/dlmglue.c for more details on locking versions.
*/
static struct ocfs2_locking_protocol user_dlm_lproto = {
.lp_max_version = {
.pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
.pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
},
.lp_lock_ast = user_ast,
.lp_blocking_ast = user_bast,
.lp_unlock_ast = user_unlock_ast,
};
static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres) static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres)
{ {
struct inode *inode; struct inode *inode;
@ -283,7 +299,8 @@ static void user_dlm_unblock_lock(struct work_struct *work)
int new_level, status; int new_level, status;
struct user_lock_res *lockres = struct user_lock_res *lockres =
container_of(work, struct user_lock_res, l_work); container_of(work, struct user_lock_res, l_work);
struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); struct ocfs2_cluster_connection *conn =
cluster_connection_from_user_lockres(lockres);
mlog(0, "processing lockres %.*s\n", lockres->l_namelen, mlog(0, "processing lockres %.*s\n", lockres->l_namelen,
lockres->l_name); lockres->l_name);
@ -322,20 +339,17 @@ static void user_dlm_unblock_lock(struct work_struct *work)
lockres->l_flags |= USER_LOCK_IN_CANCEL; lockres->l_flags |= USER_LOCK_IN_CANCEL;
spin_unlock(&lockres->l_lock); spin_unlock(&lockres->l_lock);
status = dlmunlock(dlm, status = ocfs2_dlm_unlock(conn, &lockres->l_lksb,
&lockres->l_lksb, DLM_LKF_CANCEL);
LKM_CANCEL, if (status)
user_unlock_ast, user_log_dlm_error("ocfs2_dlm_unlock", status, lockres);
lockres);
if (status != DLM_NORMAL)
user_log_dlm_error("dlmunlock", status, lockres);
goto drop_ref; goto drop_ref;
} }
/* If there are still incompat holders, we can exit safely /* If there are still incompat holders, we can exit safely
* without worrying about re-queueing this lock as that will * without worrying about re-queueing this lock as that will
* happen on the last call to user_cluster_unlock. */ * happen on the last call to user_cluster_unlock. */
if ((lockres->l_blocking == LKM_EXMODE) if ((lockres->l_blocking == DLM_LOCK_EX)
&& (lockres->l_ex_holders || lockres->l_ro_holders)) { && (lockres->l_ex_holders || lockres->l_ro_holders)) {
spin_unlock(&lockres->l_lock); spin_unlock(&lockres->l_lock);
mlog(0, "can't downconvert for ex: ro = %u, ex = %u\n", mlog(0, "can't downconvert for ex: ro = %u, ex = %u\n",
@ -343,7 +357,7 @@ static void user_dlm_unblock_lock(struct work_struct *work)
goto drop_ref; goto drop_ref;
} }
if ((lockres->l_blocking == LKM_PRMODE) if ((lockres->l_blocking == DLM_LOCK_PR)
&& lockres->l_ex_holders) { && lockres->l_ex_holders) {
spin_unlock(&lockres->l_lock); spin_unlock(&lockres->l_lock);
mlog(0, "can't downconvert for pr: ex = %u\n", mlog(0, "can't downconvert for pr: ex = %u\n",
@ -360,17 +374,12 @@ static void user_dlm_unblock_lock(struct work_struct *work)
spin_unlock(&lockres->l_lock); spin_unlock(&lockres->l_lock);
/* need lock downconvert request now... */ /* need lock downconvert request now... */
status = dlmlock(dlm, status = ocfs2_dlm_lock(conn, new_level, &lockres->l_lksb,
new_level, DLM_LKF_CONVERT|DLM_LKF_VALBLK,
&lockres->l_lksb, lockres->l_name,
LKM_CONVERT|LKM_VALBLK, lockres->l_namelen);
lockres->l_name, if (status) {
lockres->l_namelen, user_log_dlm_error("ocfs2_dlm_lock", status, lockres);
user_ast,
lockres,
user_bast);
if (status != DLM_NORMAL) {
user_log_dlm_error("dlmlock", status, lockres);
user_recover_from_dlm_error(lockres); user_recover_from_dlm_error(lockres);
} }
@ -382,10 +391,10 @@ static inline void user_dlm_inc_holders(struct user_lock_res *lockres,
int level) int level)
{ {
switch(level) { switch(level) {
case LKM_EXMODE: case DLM_LOCK_EX:
lockres->l_ex_holders++; lockres->l_ex_holders++;
break; break;
case LKM_PRMODE: case DLM_LOCK_PR:
lockres->l_ro_holders++; lockres->l_ro_holders++;
break; break;
default: default:
@ -410,10 +419,11 @@ int user_dlm_cluster_lock(struct user_lock_res *lockres,
int lkm_flags) int lkm_flags)
{ {
int status, local_flags; int status, local_flags;
struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); struct ocfs2_cluster_connection *conn =
cluster_connection_from_user_lockres(lockres);
if (level != LKM_EXMODE && if (level != DLM_LOCK_EX &&
level != LKM_PRMODE) { level != DLM_LOCK_PR) {
mlog(ML_ERROR, "lockres %.*s: invalid request!\n", mlog(ML_ERROR, "lockres %.*s: invalid request!\n",
lockres->l_namelen, lockres->l_name); lockres->l_namelen, lockres->l_name);
status = -EINVAL; status = -EINVAL;
@ -422,7 +432,7 @@ int user_dlm_cluster_lock(struct user_lock_res *lockres,
mlog(0, "lockres %.*s: asking for %s lock, passed flags = 0x%x\n", mlog(0, "lockres %.*s: asking for %s lock, passed flags = 0x%x\n",
lockres->l_namelen, lockres->l_name, lockres->l_namelen, lockres->l_name,
(level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE", (level == DLM_LOCK_EX) ? "DLM_LOCK_EX" : "DLM_LOCK_PR",
lkm_flags); lkm_flags);
again: again:
@ -457,35 +467,26 @@ again:
} }
if (level > lockres->l_level) { if (level > lockres->l_level) {
local_flags = lkm_flags | LKM_VALBLK; local_flags = lkm_flags | DLM_LKF_VALBLK;
if (lockres->l_level != LKM_IVMODE) if (lockres->l_level != DLM_LOCK_IV)
local_flags |= LKM_CONVERT; local_flags |= DLM_LKF_CONVERT;
lockres->l_requested = level; lockres->l_requested = level;
lockres->l_flags |= USER_LOCK_BUSY; lockres->l_flags |= USER_LOCK_BUSY;
spin_unlock(&lockres->l_lock); spin_unlock(&lockres->l_lock);
BUG_ON(level == LKM_IVMODE); BUG_ON(level == DLM_LOCK_IV);
BUG_ON(level == LKM_NLMODE); BUG_ON(level == DLM_LOCK_NL);
/* call dlm_lock to upgrade lock now */ /* call dlm_lock to upgrade lock now */
status = dlmlock(dlm, status = ocfs2_dlm_lock(conn, level, &lockres->l_lksb,
level, local_flags, lockres->l_name,
&lockres->l_lksb, lockres->l_namelen);
local_flags, if (status) {
lockres->l_name, if ((lkm_flags & DLM_LKF_NOQUEUE) &&
lockres->l_namelen, (status != -EAGAIN))
user_ast, user_log_dlm_error("ocfs2_dlm_lock",
lockres, status, lockres);
user_bast);
if (status != DLM_NORMAL) {
if ((lkm_flags & LKM_NOQUEUE) &&
(status == DLM_NOTQUEUED))
status = -EAGAIN;
else {
user_log_dlm_error("dlmlock", status, lockres);
status = -EINVAL;
}
user_recover_from_dlm_error(lockres); user_recover_from_dlm_error(lockres);
goto bail; goto bail;
} }
@ -506,11 +507,11 @@ static inline void user_dlm_dec_holders(struct user_lock_res *lockres,
int level) int level)
{ {
switch(level) { switch(level) {
case LKM_EXMODE: case DLM_LOCK_EX:
BUG_ON(!lockres->l_ex_holders); BUG_ON(!lockres->l_ex_holders);
lockres->l_ex_holders--; lockres->l_ex_holders--;
break; break;
case LKM_PRMODE: case DLM_LOCK_PR:
BUG_ON(!lockres->l_ro_holders); BUG_ON(!lockres->l_ro_holders);
lockres->l_ro_holders--; lockres->l_ro_holders--;
break; break;
@ -522,8 +523,8 @@ static inline void user_dlm_dec_holders(struct user_lock_res *lockres,
void user_dlm_cluster_unlock(struct user_lock_res *lockres, void user_dlm_cluster_unlock(struct user_lock_res *lockres,
int level) int level)
{ {
if (level != LKM_EXMODE && if (level != DLM_LOCK_EX &&
level != LKM_PRMODE) { level != DLM_LOCK_PR) {
mlog(ML_ERROR, "lockres %.*s: invalid request!\n", mlog(ML_ERROR, "lockres %.*s: invalid request!\n",
lockres->l_namelen, lockres->l_name); lockres->l_namelen, lockres->l_name);
return; return;
@ -540,33 +541,40 @@ void user_dlm_write_lvb(struct inode *inode,
unsigned int len) unsigned int len)
{ {
struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres; struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
char *lvb = lockres->l_lksb.lvb; char *lvb;
BUG_ON(len > DLM_LVB_LEN); BUG_ON(len > DLM_LVB_LEN);
spin_lock(&lockres->l_lock); spin_lock(&lockres->l_lock);
BUG_ON(lockres->l_level < LKM_EXMODE); BUG_ON(lockres->l_level < DLM_LOCK_EX);
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
memcpy(lvb, val, len); memcpy(lvb, val, len);
spin_unlock(&lockres->l_lock); spin_unlock(&lockres->l_lock);
} }
void user_dlm_read_lvb(struct inode *inode, ssize_t user_dlm_read_lvb(struct inode *inode,
char *val, char *val,
unsigned int len) unsigned int len)
{ {
struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres; struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
char *lvb = lockres->l_lksb.lvb; char *lvb;
ssize_t ret = len;
BUG_ON(len > DLM_LVB_LEN); BUG_ON(len > DLM_LVB_LEN);
spin_lock(&lockres->l_lock); spin_lock(&lockres->l_lock);
BUG_ON(lockres->l_level < LKM_PRMODE); BUG_ON(lockres->l_level < DLM_LOCK_PR);
memcpy(val, lvb, len); if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)) {
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
memcpy(val, lvb, len);
} else
ret = 0;
spin_unlock(&lockres->l_lock); spin_unlock(&lockres->l_lock);
return ret;
} }
void user_dlm_lock_res_init(struct user_lock_res *lockres, void user_dlm_lock_res_init(struct user_lock_res *lockres,
@ -576,9 +584,9 @@ void user_dlm_lock_res_init(struct user_lock_res *lockres,
spin_lock_init(&lockres->l_lock); spin_lock_init(&lockres->l_lock);
init_waitqueue_head(&lockres->l_event); init_waitqueue_head(&lockres->l_event);
lockres->l_level = LKM_IVMODE; lockres->l_level = DLM_LOCK_IV;
lockres->l_requested = LKM_IVMODE; lockres->l_requested = DLM_LOCK_IV;
lockres->l_blocking = LKM_IVMODE; lockres->l_blocking = DLM_LOCK_IV;
/* should have been checked before getting here. */ /* should have been checked before getting here. */
BUG_ON(dentry->d_name.len >= USER_DLM_LOCK_ID_MAX_LEN); BUG_ON(dentry->d_name.len >= USER_DLM_LOCK_ID_MAX_LEN);
@ -592,7 +600,8 @@ void user_dlm_lock_res_init(struct user_lock_res *lockres,
int user_dlm_destroy_lock(struct user_lock_res *lockres) int user_dlm_destroy_lock(struct user_lock_res *lockres)
{ {
int status = -EBUSY; int status = -EBUSY;
struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); struct ocfs2_cluster_connection *conn =
cluster_connection_from_user_lockres(lockres);
mlog(0, "asked to destroy %.*s\n", lockres->l_namelen, lockres->l_name); mlog(0, "asked to destroy %.*s\n", lockres->l_namelen, lockres->l_name);
@ -627,14 +636,9 @@ int user_dlm_destroy_lock(struct user_lock_res *lockres)
lockres->l_flags |= USER_LOCK_BUSY; lockres->l_flags |= USER_LOCK_BUSY;
spin_unlock(&lockres->l_lock); spin_unlock(&lockres->l_lock);
status = dlmunlock(dlm, status = ocfs2_dlm_unlock(conn, &lockres->l_lksb, DLM_LKF_VALBLK);
&lockres->l_lksb, if (status) {
LKM_VALBLK, user_log_dlm_error("ocfs2_dlm_unlock", status, lockres);
user_unlock_ast,
lockres);
if (status != DLM_NORMAL) {
user_log_dlm_error("dlmunlock", status, lockres);
status = -EINVAL;
goto bail; goto bail;
} }
@ -645,32 +649,34 @@ bail:
return status; return status;
} }
struct dlm_ctxt *user_dlm_register_context(struct qstr *name, static void user_dlm_recovery_handler_noop(int node_num,
struct dlm_protocol_version *proto) void *recovery_data)
{ {
struct dlm_ctxt *dlm; /* We ignore recovery events */
u32 dlm_key; return;
char *domain;
domain = kmalloc(name->len + 1, GFP_NOFS);
if (!domain) {
mlog_errno(-ENOMEM);
return ERR_PTR(-ENOMEM);
}
dlm_key = crc32_le(0, name->name, name->len);
snprintf(domain, name->len + 1, "%.*s", name->len, name->name);
dlm = dlm_register_domain(domain, dlm_key, proto);
if (IS_ERR(dlm))
mlog_errno(PTR_ERR(dlm));
kfree(domain);
return dlm;
} }
void user_dlm_unregister_context(struct dlm_ctxt *dlm) void user_dlm_set_locking_protocol(void)
{ {
dlm_unregister_domain(dlm); ocfs2_stack_glue_set_max_proto_version(&user_dlm_lproto.lp_max_version);
}
struct ocfs2_cluster_connection *user_dlm_register(struct qstr *name)
{
int rc;
struct ocfs2_cluster_connection *conn;
rc = ocfs2_cluster_connect("o2cb", name->name, name->len,
&user_dlm_lproto,
user_dlm_recovery_handler_noop,
NULL, &conn);
if (rc)
mlog_errno(rc);
return rc ? ERR_PTR(rc) : conn;
}
void user_dlm_unregister(struct ocfs2_cluster_connection *conn)
{
ocfs2_cluster_disconnect(conn, 0);
} }

View file

@ -57,7 +57,7 @@ struct user_lock_res {
int l_level; int l_level;
unsigned int l_ro_holders; unsigned int l_ro_holders;
unsigned int l_ex_holders; unsigned int l_ex_holders;
struct dlm_lockstatus l_lksb; struct ocfs2_dlm_lksb l_lksb;
int l_requested; int l_requested;
int l_blocking; int l_blocking;
@ -80,15 +80,15 @@ void user_dlm_cluster_unlock(struct user_lock_res *lockres,
void user_dlm_write_lvb(struct inode *inode, void user_dlm_write_lvb(struct inode *inode,
const char *val, const char *val,
unsigned int len); unsigned int len);
void user_dlm_read_lvb(struct inode *inode, ssize_t user_dlm_read_lvb(struct inode *inode,
char *val, char *val,
unsigned int len); unsigned int len);
struct dlm_ctxt *user_dlm_register_context(struct qstr *name, struct ocfs2_cluster_connection *user_dlm_register(struct qstr *name);
struct dlm_protocol_version *proto); void user_dlm_unregister(struct ocfs2_cluster_connection *conn);
void user_dlm_unregister_context(struct dlm_ctxt *dlm); void user_dlm_set_locking_protocol(void);
struct dlmfs_inode_private { struct dlmfs_inode_private {
struct dlm_ctxt *ip_dlm; struct ocfs2_cluster_connection *ip_conn;
struct user_lock_res ip_lockres; /* unused for directories. */ struct user_lock_res ip_lockres; /* unused for directories. */
struct inode *ip_parent; struct inode *ip_parent;