Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6: (292 commits) [GFS2] Fix endian bug for de_type [GFS2] Initialize SELinux extended attributes at inode creation time. [GFS2] Move logging code into log.c (mostly) [GFS2] Mark nlink cleared so VFS sees it happen [GFS2] Two redundant casts removed [GFS2] Remove uneeded endian conversion [GFS2] Remove duplicate sb reading code [GFS2] Mark metadata reads for blktrace [GFS2] Remove iflags.h, use FS_ [GFS2] Fix code style/indent in ops_file.c [GFS2] streamline-generic_file_-interfaces-and-filemap gfs fix [GFS2] Remove readv/writev methods and use aio_read/aio_write instead (gfs bits) [GFS2] inode-diet: Eliminate i_blksize from the inode structure [GFS2] inode_diet: Replace inode.u.generic_ip with inode.i_private (gfs) [GFS2] Fix typo in last patch [GFS2] Fix direct i/o logic in filemap.c [GFS2] Fix bug in Makefiles for lock modules [GFS2] Remove (extra) fs_subsys declaration [GFS2/DLM] Fix trailing whitespace [GFS2] Tidy up meta_io code ...
This commit is contained in:
commit
4a61f17378
126 changed files with 40197 additions and 6 deletions
6
CREDITS
6
CREDITS
|
@ -3578,11 +3578,11 @@ S: Fargo, North Dakota 58122
|
|||
S: USA
|
||||
|
||||
N: Steven Whitehouse
|
||||
E: SteveW@ACM.org
|
||||
E: steve@chygwyn.com
|
||||
W: http://www.chygwyn.com/~steve
|
||||
D: Linux DECnet project: http://www.sucs.swan.ac.uk/~rohan/DECnet/index.html
|
||||
D: Linux DECnet project
|
||||
D: Minor debugging of other networking protocols.
|
||||
D: Misc bug fixes and filesystem development
|
||||
D: Misc bug fixes and GFS2 filesystem development
|
||||
|
||||
N: Hans-Joachim Widmaier
|
||||
E: hjw@zvw.de
|
||||
|
|
43
Documentation/filesystems/gfs2.txt
Normal file
43
Documentation/filesystems/gfs2.txt
Normal file
|
@ -0,0 +1,43 @@
|
|||
Global File System
|
||||
------------------
|
||||
|
||||
http://sources.redhat.com/cluster/
|
||||
|
||||
GFS is a cluster file system. It allows a cluster of computers to
|
||||
simultaneously use a block device that is shared between them (with FC,
|
||||
iSCSI, NBD, etc). GFS reads and writes to the block device like a local
|
||||
file system, but also uses a lock module to allow the computers coordinate
|
||||
their I/O so file system consistency is maintained. One of the nifty
|
||||
features of GFS is perfect consistency -- changes made to the file system
|
||||
on one machine show up immediately on all other machines in the cluster.
|
||||
|
||||
GFS uses interchangable inter-node locking mechanisms. Different lock
|
||||
modules can plug into GFS and each file system selects the appropriate
|
||||
lock module at mount time. Lock modules include:
|
||||
|
||||
lock_nolock -- allows gfs to be used as a local file system
|
||||
|
||||
lock_dlm -- uses a distributed lock manager (dlm) for inter-node locking
|
||||
The dlm is found at linux/fs/dlm/
|
||||
|
||||
In addition to interfacing with an external locking manager, a gfs lock
|
||||
module is responsible for interacting with external cluster management
|
||||
systems. Lock_dlm depends on user space cluster management systems found
|
||||
at the URL above.
|
||||
|
||||
To use gfs as a local file system, no external clustering systems are
|
||||
needed, simply:
|
||||
|
||||
$ mkfs -t gfs2 -p lock_nolock -j 1 /dev/block_device
|
||||
$ mount -t gfs2 /dev/block_device /dir
|
||||
|
||||
GFS2 is not on-disk compatible with previous versions of GFS.
|
||||
|
||||
The following man pages can be found at the URL above:
|
||||
gfs2_fsck to repair a filesystem
|
||||
gfs2_grow to expand a filesystem online
|
||||
gfs2_jadd to add journals to a filesystem online
|
||||
gfs2_tool to manipulate, examine and tune a filesystem
|
||||
gfs2_quota to examine and change quota values in a filesystem
|
||||
mount.gfs2 to help mount(8) mount a filesystem
|
||||
mkfs.gfs2 to make a filesystem
|
18
MAINTAINERS
18
MAINTAINERS
|
@ -898,6 +898,16 @@ M: jack@suse.cz
|
|||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
|
||||
DISTRIBUTED LOCK MANAGER
|
||||
P: Patrick Caulfield
|
||||
M: pcaulfie@redhat.com
|
||||
P: David Teigland
|
||||
M: teigland@redhat.com
|
||||
L: cluster-devel@redhat.com
|
||||
W: http://sources.redhat.com/cluster/
|
||||
T: git kernel.org:/pub/scm/linux/kernel/git/steve/gfs-2.6.git
|
||||
S: Supported
|
||||
|
||||
DAVICOM FAST ETHERNET (DMFE) NETWORK DRIVER
|
||||
P: Tobias Ringstrom
|
||||
M: tori@unhappy.mine.nu
|
||||
|
@ -1173,6 +1183,14 @@ M: khc@pm.waw.pl
|
|||
W: http://www.kernel.org/pub/linux/utils/net/hdlc/
|
||||
S: Maintained
|
||||
|
||||
GFS2 FILE SYSTEM
|
||||
P: Steven Whitehouse
|
||||
M: swhiteho@redhat.com
|
||||
L: cluster-devel@redhat.com
|
||||
W: http://sources.redhat.com/cluster/
|
||||
T: git kernel.org:/pub/scm/linux/kernel/git/steve/gfs-2.6.git
|
||||
S: Supported
|
||||
|
||||
GIGASET ISDN DRIVERS
|
||||
P: Hansjoerg Lipp
|
||||
M: hjlipp@web.de
|
||||
|
|
|
@ -325,6 +325,7 @@ config FS_POSIX_ACL
|
|||
default n
|
||||
|
||||
source "fs/xfs/Kconfig"
|
||||
source "fs/gfs2/Kconfig"
|
||||
|
||||
config OCFS2_FS
|
||||
tristate "OCFS2 file system support"
|
||||
|
@ -1995,6 +1996,7 @@ endmenu
|
|||
endif
|
||||
|
||||
source "fs/nls/Kconfig"
|
||||
source "fs/dlm/Kconfig"
|
||||
|
||||
endmenu
|
||||
|
||||
|
|
|
@ -57,6 +57,7 @@ obj-$(CONFIG_CONFIGFS_FS) += configfs/
|
|||
obj-y += devpts/
|
||||
|
||||
obj-$(CONFIG_PROFILING) += dcookies.o
|
||||
obj-$(CONFIG_DLM) += dlm/
|
||||
|
||||
# Do not add any filesystems before this line
|
||||
obj-$(CONFIG_REISERFS_FS) += reiserfs/
|
||||
|
@ -110,3 +111,4 @@ obj-$(CONFIG_HOSTFS) += hostfs/
|
|||
obj-$(CONFIG_HPPFS) += hppfs/
|
||||
obj-$(CONFIG_DEBUG_FS) += debugfs/
|
||||
obj-$(CONFIG_OCFS2_FS) += ocfs2/
|
||||
obj-$(CONFIG_GFS2_FS) += gfs2/
|
||||
|
|
|
@ -224,4 +224,4 @@ EXPORT_SYMBOL(config_item_init);
|
|||
EXPORT_SYMBOL(config_group_init);
|
||||
EXPORT_SYMBOL(config_item_get);
|
||||
EXPORT_SYMBOL(config_item_put);
|
||||
|
||||
EXPORT_SYMBOL(config_group_find_obj);
|
||||
|
|
21
fs/dlm/Kconfig
Normal file
21
fs/dlm/Kconfig
Normal file
|
@ -0,0 +1,21 @@
|
|||
menu "Distributed Lock Manager"
|
||||
depends on INET && EXPERIMENTAL
|
||||
|
||||
config DLM
|
||||
tristate "Distributed Lock Manager (DLM)"
|
||||
depends on IPV6 || IPV6=n
|
||||
depends on IP_SCTP
|
||||
select CONFIGFS_FS
|
||||
help
|
||||
A general purpose distributed lock manager for kernel or userspace
|
||||
applications.
|
||||
|
||||
config DLM_DEBUG
|
||||
bool "DLM debugging"
|
||||
depends on DLM
|
||||
help
|
||||
Under the debugfs mount point, the name of each lockspace will
|
||||
appear as a file in the "dlm" directory. The output is the
|
||||
list of resource and locks the local node knows about.
|
||||
|
||||
endmenu
|
19
fs/dlm/Makefile
Normal file
19
fs/dlm/Makefile
Normal file
|
@ -0,0 +1,19 @@
|
|||
obj-$(CONFIG_DLM) += dlm.o
|
||||
dlm-y := ast.o \
|
||||
config.o \
|
||||
dir.o \
|
||||
lock.o \
|
||||
lockspace.o \
|
||||
lowcomms.o \
|
||||
main.o \
|
||||
member.o \
|
||||
memory.o \
|
||||
midcomms.o \
|
||||
rcom.o \
|
||||
recover.o \
|
||||
recoverd.o \
|
||||
requestqueue.o \
|
||||
user.o \
|
||||
util.o
|
||||
dlm-$(CONFIG_DLM_DEBUG) += debug_fs.o
|
||||
|
173
fs/dlm/ast.c
Normal file
173
fs/dlm/ast.c
Normal file
|
@ -0,0 +1,173 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#include "dlm_internal.h"
|
||||
#include "lock.h"
|
||||
#include "user.h"
|
||||
|
||||
#define WAKE_ASTS 0
|
||||
|
||||
static struct list_head ast_queue;
|
||||
static spinlock_t ast_queue_lock;
|
||||
static struct task_struct * astd_task;
|
||||
static unsigned long astd_wakeflags;
|
||||
static struct mutex astd_running;
|
||||
|
||||
|
||||
void dlm_del_ast(struct dlm_lkb *lkb)
|
||||
{
|
||||
spin_lock(&ast_queue_lock);
|
||||
if (lkb->lkb_ast_type & (AST_COMP | AST_BAST))
|
||||
list_del(&lkb->lkb_astqueue);
|
||||
spin_unlock(&ast_queue_lock);
|
||||
}
|
||||
|
||||
void dlm_add_ast(struct dlm_lkb *lkb, int type)
|
||||
{
|
||||
if (lkb->lkb_flags & DLM_IFL_USER) {
|
||||
dlm_user_add_ast(lkb, type);
|
||||
return;
|
||||
}
|
||||
DLM_ASSERT(lkb->lkb_astaddr != DLM_FAKE_USER_AST, dlm_print_lkb(lkb););
|
||||
|
||||
spin_lock(&ast_queue_lock);
|
||||
if (!(lkb->lkb_ast_type & (AST_COMP | AST_BAST))) {
|
||||
kref_get(&lkb->lkb_ref);
|
||||
list_add_tail(&lkb->lkb_astqueue, &ast_queue);
|
||||
}
|
||||
lkb->lkb_ast_type |= type;
|
||||
spin_unlock(&ast_queue_lock);
|
||||
|
||||
set_bit(WAKE_ASTS, &astd_wakeflags);
|
||||
wake_up_process(astd_task);
|
||||
}
|
||||
|
||||
static void process_asts(void)
|
||||
{
|
||||
struct dlm_ls *ls = NULL;
|
||||
struct dlm_rsb *r = NULL;
|
||||
struct dlm_lkb *lkb;
|
||||
void (*cast) (long param);
|
||||
void (*bast) (long param, int mode);
|
||||
int type = 0, found, bmode;
|
||||
|
||||
for (;;) {
|
||||
found = 0;
|
||||
spin_lock(&ast_queue_lock);
|
||||
list_for_each_entry(lkb, &ast_queue, lkb_astqueue) {
|
||||
r = lkb->lkb_resource;
|
||||
ls = r->res_ls;
|
||||
|
||||
if (dlm_locking_stopped(ls))
|
||||
continue;
|
||||
|
||||
list_del(&lkb->lkb_astqueue);
|
||||
type = lkb->lkb_ast_type;
|
||||
lkb->lkb_ast_type = 0;
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
spin_unlock(&ast_queue_lock);
|
||||
|
||||
if (!found)
|
||||
break;
|
||||
|
||||
cast = lkb->lkb_astaddr;
|
||||
bast = lkb->lkb_bastaddr;
|
||||
bmode = lkb->lkb_bastmode;
|
||||
|
||||
if ((type & AST_COMP) && cast)
|
||||
cast(lkb->lkb_astparam);
|
||||
|
||||
/* FIXME: Is it safe to look at lkb_grmode here
|
||||
without doing a lock_rsb() ?
|
||||
Look at other checks in v1 to avoid basts. */
|
||||
|
||||
if ((type & AST_BAST) && bast)
|
||||
if (!dlm_modes_compat(lkb->lkb_grmode, bmode))
|
||||
bast(lkb->lkb_astparam, bmode);
|
||||
|
||||
/* this removes the reference added by dlm_add_ast
|
||||
and may result in the lkb being freed */
|
||||
dlm_put_lkb(lkb);
|
||||
|
||||
schedule();
|
||||
}
|
||||
}
|
||||
|
||||
static inline int no_asts(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
spin_lock(&ast_queue_lock);
|
||||
ret = list_empty(&ast_queue);
|
||||
spin_unlock(&ast_queue_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dlm_astd(void *data)
|
||||
{
|
||||
while (!kthread_should_stop()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (!test_bit(WAKE_ASTS, &astd_wakeflags))
|
||||
schedule();
|
||||
set_current_state(TASK_RUNNING);
|
||||
|
||||
mutex_lock(&astd_running);
|
||||
if (test_and_clear_bit(WAKE_ASTS, &astd_wakeflags))
|
||||
process_asts();
|
||||
mutex_unlock(&astd_running);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dlm_astd_wake(void)
|
||||
{
|
||||
if (!no_asts()) {
|
||||
set_bit(WAKE_ASTS, &astd_wakeflags);
|
||||
wake_up_process(astd_task);
|
||||
}
|
||||
}
|
||||
|
||||
int dlm_astd_start(void)
|
||||
{
|
||||
struct task_struct *p;
|
||||
int error = 0;
|
||||
|
||||
INIT_LIST_HEAD(&ast_queue);
|
||||
spin_lock_init(&ast_queue_lock);
|
||||
mutex_init(&astd_running);
|
||||
|
||||
p = kthread_run(dlm_astd, NULL, "dlm_astd");
|
||||
if (IS_ERR(p))
|
||||
error = PTR_ERR(p);
|
||||
else
|
||||
astd_task = p;
|
||||
return error;
|
||||
}
|
||||
|
||||
void dlm_astd_stop(void)
|
||||
{
|
||||
kthread_stop(astd_task);
|
||||
}
|
||||
|
||||
void dlm_astd_suspend(void)
|
||||
{
|
||||
mutex_lock(&astd_running);
|
||||
}
|
||||
|
||||
void dlm_astd_resume(void)
|
||||
{
|
||||
mutex_unlock(&astd_running);
|
||||
}
|
||||
|
26
fs/dlm/ast.h
Normal file
26
fs/dlm/ast.h
Normal file
|
@ -0,0 +1,26 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#ifndef __ASTD_DOT_H__
|
||||
#define __ASTD_DOT_H__
|
||||
|
||||
void dlm_add_ast(struct dlm_lkb *lkb, int type);
|
||||
void dlm_del_ast(struct dlm_lkb *lkb);
|
||||
|
||||
void dlm_astd_wake(void);
|
||||
int dlm_astd_start(void);
|
||||
void dlm_astd_stop(void);
|
||||
void dlm_astd_suspend(void);
|
||||
void dlm_astd_resume(void);
|
||||
|
||||
#endif
|
||||
|
789
fs/dlm/config.c
Normal file
789
fs/dlm/config.c
Normal file
|
@ -0,0 +1,789 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/configfs.h>
|
||||
#include <net/sock.h>
|
||||
|
||||
#include "config.h"
|
||||
#include "lowcomms.h"
|
||||
|
||||
/*
|
||||
* /config/dlm/<cluster>/spaces/<space>/nodes/<node>/nodeid
|
||||
* /config/dlm/<cluster>/spaces/<space>/nodes/<node>/weight
|
||||
* /config/dlm/<cluster>/comms/<comm>/nodeid
|
||||
* /config/dlm/<cluster>/comms/<comm>/local
|
||||
* /config/dlm/<cluster>/comms/<comm>/addr
|
||||
* The <cluster> level is useless, but I haven't figured out how to avoid it.
|
||||
*/
|
||||
|
||||
static struct config_group *space_list;
|
||||
static struct config_group *comm_list;
|
||||
static struct comm *local_comm;
|
||||
|
||||
struct clusters;
|
||||
struct cluster;
|
||||
struct spaces;
|
||||
struct space;
|
||||
struct comms;
|
||||
struct comm;
|
||||
struct nodes;
|
||||
struct node;
|
||||
|
||||
static struct config_group *make_cluster(struct config_group *, const char *);
|
||||
static void drop_cluster(struct config_group *, struct config_item *);
|
||||
static void release_cluster(struct config_item *);
|
||||
static struct config_group *make_space(struct config_group *, const char *);
|
||||
static void drop_space(struct config_group *, struct config_item *);
|
||||
static void release_space(struct config_item *);
|
||||
static struct config_item *make_comm(struct config_group *, const char *);
|
||||
static void drop_comm(struct config_group *, struct config_item *);
|
||||
static void release_comm(struct config_item *);
|
||||
static struct config_item *make_node(struct config_group *, const char *);
|
||||
static void drop_node(struct config_group *, struct config_item *);
|
||||
static void release_node(struct config_item *);
|
||||
|
||||
static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a,
|
||||
char *buf);
|
||||
static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a,
|
||||
const char *buf, size_t len);
|
||||
static ssize_t show_node(struct config_item *i, struct configfs_attribute *a,
|
||||
char *buf);
|
||||
static ssize_t store_node(struct config_item *i, struct configfs_attribute *a,
|
||||
const char *buf, size_t len);
|
||||
|
||||
static ssize_t comm_nodeid_read(struct comm *cm, char *buf);
|
||||
static ssize_t comm_nodeid_write(struct comm *cm, const char *buf, size_t len);
|
||||
static ssize_t comm_local_read(struct comm *cm, char *buf);
|
||||
static ssize_t comm_local_write(struct comm *cm, const char *buf, size_t len);
|
||||
static ssize_t comm_addr_write(struct comm *cm, const char *buf, size_t len);
|
||||
static ssize_t node_nodeid_read(struct node *nd, char *buf);
|
||||
static ssize_t node_nodeid_write(struct node *nd, const char *buf, size_t len);
|
||||
static ssize_t node_weight_read(struct node *nd, char *buf);
|
||||
static ssize_t node_weight_write(struct node *nd, const char *buf, size_t len);
|
||||
|
||||
enum {
|
||||
COMM_ATTR_NODEID = 0,
|
||||
COMM_ATTR_LOCAL,
|
||||
COMM_ATTR_ADDR,
|
||||
};
|
||||
|
||||
struct comm_attribute {
|
||||
struct configfs_attribute attr;
|
||||
ssize_t (*show)(struct comm *, char *);
|
||||
ssize_t (*store)(struct comm *, const char *, size_t);
|
||||
};
|
||||
|
||||
static struct comm_attribute comm_attr_nodeid = {
|
||||
.attr = { .ca_owner = THIS_MODULE,
|
||||
.ca_name = "nodeid",
|
||||
.ca_mode = S_IRUGO | S_IWUSR },
|
||||
.show = comm_nodeid_read,
|
||||
.store = comm_nodeid_write,
|
||||
};
|
||||
|
||||
static struct comm_attribute comm_attr_local = {
|
||||
.attr = { .ca_owner = THIS_MODULE,
|
||||
.ca_name = "local",
|
||||
.ca_mode = S_IRUGO | S_IWUSR },
|
||||
.show = comm_local_read,
|
||||
.store = comm_local_write,
|
||||
};
|
||||
|
||||
static struct comm_attribute comm_attr_addr = {
|
||||
.attr = { .ca_owner = THIS_MODULE,
|
||||
.ca_name = "addr",
|
||||
.ca_mode = S_IRUGO | S_IWUSR },
|
||||
.store = comm_addr_write,
|
||||
};
|
||||
|
||||
static struct configfs_attribute *comm_attrs[] = {
|
||||
[COMM_ATTR_NODEID] = &comm_attr_nodeid.attr,
|
||||
[COMM_ATTR_LOCAL] = &comm_attr_local.attr,
|
||||
[COMM_ATTR_ADDR] = &comm_attr_addr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
enum {
|
||||
NODE_ATTR_NODEID = 0,
|
||||
NODE_ATTR_WEIGHT,
|
||||
};
|
||||
|
||||
struct node_attribute {
|
||||
struct configfs_attribute attr;
|
||||
ssize_t (*show)(struct node *, char *);
|
||||
ssize_t (*store)(struct node *, const char *, size_t);
|
||||
};
|
||||
|
||||
static struct node_attribute node_attr_nodeid = {
|
||||
.attr = { .ca_owner = THIS_MODULE,
|
||||
.ca_name = "nodeid",
|
||||
.ca_mode = S_IRUGO | S_IWUSR },
|
||||
.show = node_nodeid_read,
|
||||
.store = node_nodeid_write,
|
||||
};
|
||||
|
||||
static struct node_attribute node_attr_weight = {
|
||||
.attr = { .ca_owner = THIS_MODULE,
|
||||
.ca_name = "weight",
|
||||
.ca_mode = S_IRUGO | S_IWUSR },
|
||||
.show = node_weight_read,
|
||||
.store = node_weight_write,
|
||||
};
|
||||
|
||||
static struct configfs_attribute *node_attrs[] = {
|
||||
[NODE_ATTR_NODEID] = &node_attr_nodeid.attr,
|
||||
[NODE_ATTR_WEIGHT] = &node_attr_weight.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
struct clusters {
|
||||
struct configfs_subsystem subsys;
|
||||
};
|
||||
|
||||
struct cluster {
|
||||
struct config_group group;
|
||||
};
|
||||
|
||||
struct spaces {
|
||||
struct config_group ss_group;
|
||||
};
|
||||
|
||||
struct space {
|
||||
struct config_group group;
|
||||
struct list_head members;
|
||||
struct mutex members_lock;
|
||||
int members_count;
|
||||
};
|
||||
|
||||
struct comms {
|
||||
struct config_group cs_group;
|
||||
};
|
||||
|
||||
struct comm {
|
||||
struct config_item item;
|
||||
int nodeid;
|
||||
int local;
|
||||
int addr_count;
|
||||
struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
|
||||
};
|
||||
|
||||
struct nodes {
|
||||
struct config_group ns_group;
|
||||
};
|
||||
|
||||
struct node {
|
||||
struct config_item item;
|
||||
struct list_head list; /* space->members */
|
||||
int nodeid;
|
||||
int weight;
|
||||
};
|
||||
|
||||
static struct configfs_group_operations clusters_ops = {
|
||||
.make_group = make_cluster,
|
||||
.drop_item = drop_cluster,
|
||||
};
|
||||
|
||||
static struct configfs_item_operations cluster_ops = {
|
||||
.release = release_cluster,
|
||||
};
|
||||
|
||||
static struct configfs_group_operations spaces_ops = {
|
||||
.make_group = make_space,
|
||||
.drop_item = drop_space,
|
||||
};
|
||||
|
||||
static struct configfs_item_operations space_ops = {
|
||||
.release = release_space,
|
||||
};
|
||||
|
||||
static struct configfs_group_operations comms_ops = {
|
||||
.make_item = make_comm,
|
||||
.drop_item = drop_comm,
|
||||
};
|
||||
|
||||
static struct configfs_item_operations comm_ops = {
|
||||
.release = release_comm,
|
||||
.show_attribute = show_comm,
|
||||
.store_attribute = store_comm,
|
||||
};
|
||||
|
||||
static struct configfs_group_operations nodes_ops = {
|
||||
.make_item = make_node,
|
||||
.drop_item = drop_node,
|
||||
};
|
||||
|
||||
static struct configfs_item_operations node_ops = {
|
||||
.release = release_node,
|
||||
.show_attribute = show_node,
|
||||
.store_attribute = store_node,
|
||||
};
|
||||
|
||||
static struct config_item_type clusters_type = {
|
||||
.ct_group_ops = &clusters_ops,
|
||||
.ct_owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static struct config_item_type cluster_type = {
|
||||
.ct_item_ops = &cluster_ops,
|
||||
.ct_owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static struct config_item_type spaces_type = {
|
||||
.ct_group_ops = &spaces_ops,
|
||||
.ct_owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static struct config_item_type space_type = {
|
||||
.ct_item_ops = &space_ops,
|
||||
.ct_owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static struct config_item_type comms_type = {
|
||||
.ct_group_ops = &comms_ops,
|
||||
.ct_owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static struct config_item_type comm_type = {
|
||||
.ct_item_ops = &comm_ops,
|
||||
.ct_attrs = comm_attrs,
|
||||
.ct_owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static struct config_item_type nodes_type = {
|
||||
.ct_group_ops = &nodes_ops,
|
||||
.ct_owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static struct config_item_type node_type = {
|
||||
.ct_item_ops = &node_ops,
|
||||
.ct_attrs = node_attrs,
|
||||
.ct_owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static struct cluster *to_cluster(struct config_item *i)
|
||||
{
|
||||
return i ? container_of(to_config_group(i), struct cluster, group):NULL;
|
||||
}
|
||||
|
||||
static struct space *to_space(struct config_item *i)
|
||||
{
|
||||
return i ? container_of(to_config_group(i), struct space, group) : NULL;
|
||||
}
|
||||
|
||||
static struct comm *to_comm(struct config_item *i)
|
||||
{
|
||||
return i ? container_of(i, struct comm, item) : NULL;
|
||||
}
|
||||
|
||||
static struct node *to_node(struct config_item *i)
|
||||
{
|
||||
return i ? container_of(i, struct node, item) : NULL;
|
||||
}
|
||||
|
||||
static struct config_group *make_cluster(struct config_group *g,
|
||||
const char *name)
|
||||
{
|
||||
struct cluster *cl = NULL;
|
||||
struct spaces *sps = NULL;
|
||||
struct comms *cms = NULL;
|
||||
void *gps = NULL;
|
||||
|
||||
cl = kzalloc(sizeof(struct cluster), GFP_KERNEL);
|
||||
gps = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL);
|
||||
sps = kzalloc(sizeof(struct spaces), GFP_KERNEL);
|
||||
cms = kzalloc(sizeof(struct comms), GFP_KERNEL);
|
||||
|
||||
if (!cl || !gps || !sps || !cms)
|
||||
goto fail;
|
||||
|
||||
config_group_init_type_name(&cl->group, name, &cluster_type);
|
||||
config_group_init_type_name(&sps->ss_group, "spaces", &spaces_type);
|
||||
config_group_init_type_name(&cms->cs_group, "comms", &comms_type);
|
||||
|
||||
cl->group.default_groups = gps;
|
||||
cl->group.default_groups[0] = &sps->ss_group;
|
||||
cl->group.default_groups[1] = &cms->cs_group;
|
||||
cl->group.default_groups[2] = NULL;
|
||||
|
||||
space_list = &sps->ss_group;
|
||||
comm_list = &cms->cs_group;
|
||||
return &cl->group;
|
||||
|
||||
fail:
|
||||
kfree(cl);
|
||||
kfree(gps);
|
||||
kfree(sps);
|
||||
kfree(cms);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void drop_cluster(struct config_group *g, struct config_item *i)
|
||||
{
|
||||
struct cluster *cl = to_cluster(i);
|
||||
struct config_item *tmp;
|
||||
int j;
|
||||
|
||||
for (j = 0; cl->group.default_groups[j]; j++) {
|
||||
tmp = &cl->group.default_groups[j]->cg_item;
|
||||
cl->group.default_groups[j] = NULL;
|
||||
config_item_put(tmp);
|
||||
}
|
||||
|
||||
space_list = NULL;
|
||||
comm_list = NULL;
|
||||
|
||||
config_item_put(i);
|
||||
}
|
||||
|
||||
static void release_cluster(struct config_item *i)
|
||||
{
|
||||
struct cluster *cl = to_cluster(i);
|
||||
kfree(cl->group.default_groups);
|
||||
kfree(cl);
|
||||
}
|
||||
|
||||
static struct config_group *make_space(struct config_group *g, const char *name)
|
||||
{
|
||||
struct space *sp = NULL;
|
||||
struct nodes *nds = NULL;
|
||||
void *gps = NULL;
|
||||
|
||||
sp = kzalloc(sizeof(struct space), GFP_KERNEL);
|
||||
gps = kcalloc(2, sizeof(struct config_group *), GFP_KERNEL);
|
||||
nds = kzalloc(sizeof(struct nodes), GFP_KERNEL);
|
||||
|
||||
if (!sp || !gps || !nds)
|
||||
goto fail;
|
||||
|
||||
config_group_init_type_name(&sp->group, name, &space_type);
|
||||
config_group_init_type_name(&nds->ns_group, "nodes", &nodes_type);
|
||||
|
||||
sp->group.default_groups = gps;
|
||||
sp->group.default_groups[0] = &nds->ns_group;
|
||||
sp->group.default_groups[1] = NULL;
|
||||
|
||||
INIT_LIST_HEAD(&sp->members);
|
||||
mutex_init(&sp->members_lock);
|
||||
sp->members_count = 0;
|
||||
return &sp->group;
|
||||
|
||||
fail:
|
||||
kfree(sp);
|
||||
kfree(gps);
|
||||
kfree(nds);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void drop_space(struct config_group *g, struct config_item *i)
|
||||
{
|
||||
struct space *sp = to_space(i);
|
||||
struct config_item *tmp;
|
||||
int j;
|
||||
|
||||
/* assert list_empty(&sp->members) */
|
||||
|
||||
for (j = 0; sp->group.default_groups[j]; j++) {
|
||||
tmp = &sp->group.default_groups[j]->cg_item;
|
||||
sp->group.default_groups[j] = NULL;
|
||||
config_item_put(tmp);
|
||||
}
|
||||
|
||||
config_item_put(i);
|
||||
}
|
||||
|
||||
static void release_space(struct config_item *i)
|
||||
{
|
||||
struct space *sp = to_space(i);
|
||||
kfree(sp->group.default_groups);
|
||||
kfree(sp);
|
||||
}
|
||||
|
||||
static struct config_item *make_comm(struct config_group *g, const char *name)
|
||||
{
|
||||
struct comm *cm;
|
||||
|
||||
cm = kzalloc(sizeof(struct comm), GFP_KERNEL);
|
||||
if (!cm)
|
||||
return NULL;
|
||||
|
||||
config_item_init_type_name(&cm->item, name, &comm_type);
|
||||
cm->nodeid = -1;
|
||||
cm->local = 0;
|
||||
cm->addr_count = 0;
|
||||
return &cm->item;
|
||||
}
|
||||
|
||||
static void drop_comm(struct config_group *g, struct config_item *i)
|
||||
{
|
||||
struct comm *cm = to_comm(i);
|
||||
if (local_comm == cm)
|
||||
local_comm = NULL;
|
||||
dlm_lowcomms_close(cm->nodeid);
|
||||
while (cm->addr_count--)
|
||||
kfree(cm->addr[cm->addr_count]);
|
||||
config_item_put(i);
|
||||
}
|
||||
|
||||
static void release_comm(struct config_item *i)
|
||||
{
|
||||
struct comm *cm = to_comm(i);
|
||||
kfree(cm);
|
||||
}
|
||||
|
||||
static struct config_item *make_node(struct config_group *g, const char *name)
|
||||
{
|
||||
struct space *sp = to_space(g->cg_item.ci_parent);
|
||||
struct node *nd;
|
||||
|
||||
nd = kzalloc(sizeof(struct node), GFP_KERNEL);
|
||||
if (!nd)
|
||||
return NULL;
|
||||
|
||||
config_item_init_type_name(&nd->item, name, &node_type);
|
||||
nd->nodeid = -1;
|
||||
nd->weight = 1; /* default weight of 1 if none is set */
|
||||
|
||||
mutex_lock(&sp->members_lock);
|
||||
list_add(&nd->list, &sp->members);
|
||||
sp->members_count++;
|
||||
mutex_unlock(&sp->members_lock);
|
||||
|
||||
return &nd->item;
|
||||
}
|
||||
|
||||
static void drop_node(struct config_group *g, struct config_item *i)
|
||||
{
|
||||
struct space *sp = to_space(g->cg_item.ci_parent);
|
||||
struct node *nd = to_node(i);
|
||||
|
||||
mutex_lock(&sp->members_lock);
|
||||
list_del(&nd->list);
|
||||
sp->members_count--;
|
||||
mutex_unlock(&sp->members_lock);
|
||||
|
||||
config_item_put(i);
|
||||
}
|
||||
|
||||
static void release_node(struct config_item *i)
|
||||
{
|
||||
struct node *nd = to_node(i);
|
||||
kfree(nd);
|
||||
}
|
||||
|
||||
static struct clusters clusters_root = {
|
||||
.subsys = {
|
||||
.su_group = {
|
||||
.cg_item = {
|
||||
.ci_namebuf = "dlm",
|
||||
.ci_type = &clusters_type,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
int dlm_config_init(void)
|
||||
{
|
||||
config_group_init(&clusters_root.subsys.su_group);
|
||||
init_MUTEX(&clusters_root.subsys.su_sem);
|
||||
return configfs_register_subsystem(&clusters_root.subsys);
|
||||
}
|
||||
|
||||
void dlm_config_exit(void)
|
||||
{
|
||||
configfs_unregister_subsystem(&clusters_root.subsys);
|
||||
}
|
||||
|
||||
/*
|
||||
* Functions for user space to read/write attributes
|
||||
*/
|
||||
|
||||
static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a,
|
||||
char *buf)
|
||||
{
|
||||
struct comm *cm = to_comm(i);
|
||||
struct comm_attribute *cma =
|
||||
container_of(a, struct comm_attribute, attr);
|
||||
return cma->show ? cma->show(cm, buf) : 0;
|
||||
}
|
||||
|
||||
static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a,
|
||||
const char *buf, size_t len)
|
||||
{
|
||||
struct comm *cm = to_comm(i);
|
||||
struct comm_attribute *cma =
|
||||
container_of(a, struct comm_attribute, attr);
|
||||
return cma->store ? cma->store(cm, buf, len) : -EINVAL;
|
||||
}
|
||||
|
||||
static ssize_t comm_nodeid_read(struct comm *cm, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", cm->nodeid);
|
||||
}
|
||||
|
||||
static ssize_t comm_nodeid_write(struct comm *cm, const char *buf, size_t len)
|
||||
{
|
||||
cm->nodeid = simple_strtol(buf, NULL, 0);
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t comm_local_read(struct comm *cm, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", cm->local);
|
||||
}
|
||||
|
||||
static ssize_t comm_local_write(struct comm *cm, const char *buf, size_t len)
|
||||
{
|
||||
cm->local= simple_strtol(buf, NULL, 0);
|
||||
if (cm->local && !local_comm)
|
||||
local_comm = cm;
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t comm_addr_write(struct comm *cm, const char *buf, size_t len)
|
||||
{
|
||||
struct sockaddr_storage *addr;
|
||||
|
||||
if (len != sizeof(struct sockaddr_storage))
|
||||
return -EINVAL;
|
||||
|
||||
if (cm->addr_count >= DLM_MAX_ADDR_COUNT)
|
||||
return -ENOSPC;
|
||||
|
||||
addr = kzalloc(sizeof(*addr), GFP_KERNEL);
|
||||
if (!addr)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(addr, buf, len);
|
||||
cm->addr[cm->addr_count++] = addr;
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t show_node(struct config_item *i, struct configfs_attribute *a,
|
||||
char *buf)
|
||||
{
|
||||
struct node *nd = to_node(i);
|
||||
struct node_attribute *nda =
|
||||
container_of(a, struct node_attribute, attr);
|
||||
return nda->show ? nda->show(nd, buf) : 0;
|
||||
}
|
||||
|
||||
static ssize_t store_node(struct config_item *i, struct configfs_attribute *a,
|
||||
const char *buf, size_t len)
|
||||
{
|
||||
struct node *nd = to_node(i);
|
||||
struct node_attribute *nda =
|
||||
container_of(a, struct node_attribute, attr);
|
||||
return nda->store ? nda->store(nd, buf, len) : -EINVAL;
|
||||
}
|
||||
|
||||
static ssize_t node_nodeid_read(struct node *nd, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", nd->nodeid);
|
||||
}
|
||||
|
||||
static ssize_t node_nodeid_write(struct node *nd, const char *buf, size_t len)
|
||||
{
|
||||
nd->nodeid = simple_strtol(buf, NULL, 0);
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t node_weight_read(struct node *nd, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", nd->weight);
|
||||
}
|
||||
|
||||
static ssize_t node_weight_write(struct node *nd, const char *buf, size_t len)
|
||||
{
|
||||
nd->weight = simple_strtol(buf, NULL, 0);
|
||||
return len;
|
||||
}
|
||||
|
||||
/*
|
||||
* Functions for the dlm to get the info that's been configured
|
||||
*/
|
||||
|
||||
static struct space *get_space(char *name)
|
||||
{
|
||||
if (!space_list)
|
||||
return NULL;
|
||||
return to_space(config_group_find_obj(space_list, name));
|
||||
}
|
||||
|
||||
static void put_space(struct space *sp)
|
||||
{
|
||||
config_item_put(&sp->group.cg_item);
|
||||
}
|
||||
|
||||
static struct comm *get_comm(int nodeid, struct sockaddr_storage *addr)
|
||||
{
|
||||
struct config_item *i;
|
||||
struct comm *cm = NULL;
|
||||
int found = 0;
|
||||
|
||||
if (!comm_list)
|
||||
return NULL;
|
||||
|
||||
down(&clusters_root.subsys.su_sem);
|
||||
|
||||
list_for_each_entry(i, &comm_list->cg_children, ci_entry) {
|
||||
cm = to_comm(i);
|
||||
|
||||
if (nodeid) {
|
||||
if (cm->nodeid != nodeid)
|
||||
continue;
|
||||
found = 1;
|
||||
break;
|
||||
} else {
|
||||
if (!cm->addr_count ||
|
||||
memcmp(cm->addr[0], addr, sizeof(*addr)))
|
||||
continue;
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
up(&clusters_root.subsys.su_sem);
|
||||
|
||||
if (found)
|
||||
config_item_get(i);
|
||||
else
|
||||
cm = NULL;
|
||||
return cm;
|
||||
}
|
||||
|
||||
static void put_comm(struct comm *cm)
|
||||
{
|
||||
config_item_put(&cm->item);
|
||||
}
|
||||
|
||||
/* caller must free mem */
|
||||
int dlm_nodeid_list(char *lsname, int **ids_out)
|
||||
{
|
||||
struct space *sp;
|
||||
struct node *nd;
|
||||
int i = 0, rv = 0;
|
||||
int *ids;
|
||||
|
||||
sp = get_space(lsname);
|
||||
if (!sp)
|
||||
return -EEXIST;
|
||||
|
||||
mutex_lock(&sp->members_lock);
|
||||
if (!sp->members_count) {
|
||||
rv = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ids = kcalloc(sp->members_count, sizeof(int), GFP_KERNEL);
|
||||
if (!ids) {
|
||||
rv = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rv = sp->members_count;
|
||||
list_for_each_entry(nd, &sp->members, list)
|
||||
ids[i++] = nd->nodeid;
|
||||
|
||||
if (rv != i)
|
||||
printk("bad nodeid count %d %d\n", rv, i);
|
||||
|
||||
*ids_out = ids;
|
||||
out:
|
||||
mutex_unlock(&sp->members_lock);
|
||||
put_space(sp);
|
||||
return rv;
|
||||
}
|
||||
|
||||
int dlm_node_weight(char *lsname, int nodeid)
|
||||
{
|
||||
struct space *sp;
|
||||
struct node *nd;
|
||||
int w = -EEXIST;
|
||||
|
||||
sp = get_space(lsname);
|
||||
if (!sp)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&sp->members_lock);
|
||||
list_for_each_entry(nd, &sp->members, list) {
|
||||
if (nd->nodeid != nodeid)
|
||||
continue;
|
||||
w = nd->weight;
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&sp->members_lock);
|
||||
put_space(sp);
|
||||
out:
|
||||
return w;
|
||||
}
|
||||
|
||||
int dlm_nodeid_to_addr(int nodeid, struct sockaddr_storage *addr)
|
||||
{
|
||||
struct comm *cm = get_comm(nodeid, NULL);
|
||||
if (!cm)
|
||||
return -EEXIST;
|
||||
if (!cm->addr_count)
|
||||
return -ENOENT;
|
||||
memcpy(addr, cm->addr[0], sizeof(*addr));
|
||||
put_comm(cm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dlm_addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
|
||||
{
|
||||
struct comm *cm = get_comm(0, addr);
|
||||
if (!cm)
|
||||
return -EEXIST;
|
||||
*nodeid = cm->nodeid;
|
||||
put_comm(cm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dlm_our_nodeid(void)
|
||||
{
|
||||
return local_comm ? local_comm->nodeid : 0;
|
||||
}
|
||||
|
||||
/* num 0 is first addr, num 1 is second addr */
|
||||
int dlm_our_addr(struct sockaddr_storage *addr, int num)
|
||||
{
|
||||
if (!local_comm)
|
||||
return -1;
|
||||
if (num + 1 > local_comm->addr_count)
|
||||
return -1;
|
||||
memcpy(addr, local_comm->addr[num], sizeof(*addr));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Config file defaults */
|
||||
#define DEFAULT_TCP_PORT 21064
|
||||
#define DEFAULT_BUFFER_SIZE 4096
|
||||
#define DEFAULT_RSBTBL_SIZE 256
|
||||
#define DEFAULT_LKBTBL_SIZE 1024
|
||||
#define DEFAULT_DIRTBL_SIZE 512
|
||||
#define DEFAULT_RECOVER_TIMER 5
|
||||
#define DEFAULT_TOSS_SECS 10
|
||||
#define DEFAULT_SCAN_SECS 5
|
||||
|
||||
struct dlm_config_info dlm_config = {
|
||||
.tcp_port = DEFAULT_TCP_PORT,
|
||||
.buffer_size = DEFAULT_BUFFER_SIZE,
|
||||
.rsbtbl_size = DEFAULT_RSBTBL_SIZE,
|
||||
.lkbtbl_size = DEFAULT_LKBTBL_SIZE,
|
||||
.dirtbl_size = DEFAULT_DIRTBL_SIZE,
|
||||
.recover_timer = DEFAULT_RECOVER_TIMER,
|
||||
.toss_secs = DEFAULT_TOSS_SECS,
|
||||
.scan_secs = DEFAULT_SCAN_SECS
|
||||
};
|
||||
|
42
fs/dlm/config.h
Normal file
42
fs/dlm/config.h
Normal file
|
@ -0,0 +1,42 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#ifndef __CONFIG_DOT_H__
|
||||
#define __CONFIG_DOT_H__
|
||||
|
||||
#define DLM_MAX_ADDR_COUNT 3
|
||||
|
||||
struct dlm_config_info {
|
||||
int tcp_port;
|
||||
int buffer_size;
|
||||
int rsbtbl_size;
|
||||
int lkbtbl_size;
|
||||
int dirtbl_size;
|
||||
int recover_timer;
|
||||
int toss_secs;
|
||||
int scan_secs;
|
||||
};
|
||||
|
||||
extern struct dlm_config_info dlm_config;
|
||||
|
||||
int dlm_config_init(void);
|
||||
void dlm_config_exit(void);
|
||||
int dlm_node_weight(char *lsname, int nodeid);
|
||||
int dlm_nodeid_list(char *lsname, int **ids_out);
|
||||
int dlm_nodeid_to_addr(int nodeid, struct sockaddr_storage *addr);
|
||||
int dlm_addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid);
|
||||
int dlm_our_nodeid(void);
|
||||
int dlm_our_addr(struct sockaddr_storage *addr, int num);
|
||||
|
||||
#endif /* __CONFIG_DOT_H__ */
|
||||
|
387
fs/dlm/debug_fs.c
Normal file
387
fs/dlm/debug_fs.c
Normal file
|
@ -0,0 +1,387 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
#include "dlm_internal.h"
|
||||
|
||||
#define DLM_DEBUG_BUF_LEN 4096
|
||||
static char debug_buf[DLM_DEBUG_BUF_LEN];
|
||||
static struct mutex debug_buf_lock;
|
||||
|
||||
static struct dentry *dlm_root;
|
||||
|
||||
struct rsb_iter {
|
||||
int entry;
|
||||
struct dlm_ls *ls;
|
||||
struct list_head *next;
|
||||
struct dlm_rsb *rsb;
|
||||
};
|
||||
|
||||
/*
|
||||
* dump all rsb's in the lockspace hash table
|
||||
*/
|
||||
|
||||
static char *print_lockmode(int mode)
|
||||
{
|
||||
switch (mode) {
|
||||
case DLM_LOCK_IV:
|
||||
return "--";
|
||||
case DLM_LOCK_NL:
|
||||
return "NL";
|
||||
case DLM_LOCK_CR:
|
||||
return "CR";
|
||||
case DLM_LOCK_CW:
|
||||
return "CW";
|
||||
case DLM_LOCK_PR:
|
||||
return "PR";
|
||||
case DLM_LOCK_PW:
|
||||
return "PW";
|
||||
case DLM_LOCK_EX:
|
||||
return "EX";
|
||||
default:
|
||||
return "??";
|
||||
}
|
||||
}
|
||||
|
||||
static void print_lock(struct seq_file *s, struct dlm_lkb *lkb,
|
||||
struct dlm_rsb *res)
|
||||
{
|
||||
seq_printf(s, "%08x %s", lkb->lkb_id, print_lockmode(lkb->lkb_grmode));
|
||||
|
||||
if (lkb->lkb_status == DLM_LKSTS_CONVERT
|
||||
|| lkb->lkb_status == DLM_LKSTS_WAITING)
|
||||
seq_printf(s, " (%s)", print_lockmode(lkb->lkb_rqmode));
|
||||
|
||||
if (lkb->lkb_nodeid) {
|
||||
if (lkb->lkb_nodeid != res->res_nodeid)
|
||||
seq_printf(s, " Remote: %3d %08x", lkb->lkb_nodeid,
|
||||
lkb->lkb_remid);
|
||||
else
|
||||
seq_printf(s, " Master: %08x", lkb->lkb_remid);
|
||||
}
|
||||
|
||||
if (lkb->lkb_wait_type)
|
||||
seq_printf(s, " wait_type: %d", lkb->lkb_wait_type);
|
||||
|
||||
seq_printf(s, "\n");
|
||||
}
|
||||
|
||||
static int print_resource(struct dlm_rsb *res, struct seq_file *s)
|
||||
{
|
||||
struct dlm_lkb *lkb;
|
||||
int i, lvblen = res->res_ls->ls_lvblen, recover_list, root_list;
|
||||
|
||||
seq_printf(s, "\nResource %p Name (len=%d) \"", res, res->res_length);
|
||||
for (i = 0; i < res->res_length; i++) {
|
||||
if (isprint(res->res_name[i]))
|
||||
seq_printf(s, "%c", res->res_name[i]);
|
||||
else
|
||||
seq_printf(s, "%c", '.');
|
||||
}
|
||||
if (res->res_nodeid > 0)
|
||||
seq_printf(s, "\" \nLocal Copy, Master is node %d\n",
|
||||
res->res_nodeid);
|
||||
else if (res->res_nodeid == 0)
|
||||
seq_printf(s, "\" \nMaster Copy\n");
|
||||
else if (res->res_nodeid == -1)
|
||||
seq_printf(s, "\" \nLooking up master (lkid %x)\n",
|
||||
res->res_first_lkid);
|
||||
else
|
||||
seq_printf(s, "\" \nInvalid master %d\n", res->res_nodeid);
|
||||
|
||||
/* Print the LVB: */
|
||||
if (res->res_lvbptr) {
|
||||
seq_printf(s, "LVB: ");
|
||||
for (i = 0; i < lvblen; i++) {
|
||||
if (i == lvblen / 2)
|
||||
seq_printf(s, "\n ");
|
||||
seq_printf(s, "%02x ",
|
||||
(unsigned char) res->res_lvbptr[i]);
|
||||
}
|
||||
if (rsb_flag(res, RSB_VALNOTVALID))
|
||||
seq_printf(s, " (INVALID)");
|
||||
seq_printf(s, "\n");
|
||||
}
|
||||
|
||||
root_list = !list_empty(&res->res_root_list);
|
||||
recover_list = !list_empty(&res->res_recover_list);
|
||||
|
||||
if (root_list || recover_list) {
|
||||
seq_printf(s, "Recovery: root %d recover %d flags %lx "
|
||||
"count %d\n", root_list, recover_list,
|
||||
res->res_flags, res->res_recover_locks_count);
|
||||
}
|
||||
|
||||
/* Print the locks attached to this resource */
|
||||
seq_printf(s, "Granted Queue\n");
|
||||
list_for_each_entry(lkb, &res->res_grantqueue, lkb_statequeue)
|
||||
print_lock(s, lkb, res);
|
||||
|
||||
seq_printf(s, "Conversion Queue\n");
|
||||
list_for_each_entry(lkb, &res->res_convertqueue, lkb_statequeue)
|
||||
print_lock(s, lkb, res);
|
||||
|
||||
seq_printf(s, "Waiting Queue\n");
|
||||
list_for_each_entry(lkb, &res->res_waitqueue, lkb_statequeue)
|
||||
print_lock(s, lkb, res);
|
||||
|
||||
if (list_empty(&res->res_lookup))
|
||||
goto out;
|
||||
|
||||
seq_printf(s, "Lookup Queue\n");
|
||||
list_for_each_entry(lkb, &res->res_lookup, lkb_rsb_lookup) {
|
||||
seq_printf(s, "%08x %s", lkb->lkb_id,
|
||||
print_lockmode(lkb->lkb_rqmode));
|
||||
if (lkb->lkb_wait_type)
|
||||
seq_printf(s, " wait_type: %d", lkb->lkb_wait_type);
|
||||
seq_printf(s, "\n");
|
||||
}
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rsb_iter_next(struct rsb_iter *ri)
|
||||
{
|
||||
struct dlm_ls *ls = ri->ls;
|
||||
int i;
|
||||
|
||||
if (!ri->next) {
|
||||
top:
|
||||
/* Find the next non-empty hash bucket */
|
||||
for (i = ri->entry; i < ls->ls_rsbtbl_size; i++) {
|
||||
read_lock(&ls->ls_rsbtbl[i].lock);
|
||||
if (!list_empty(&ls->ls_rsbtbl[i].list)) {
|
||||
ri->next = ls->ls_rsbtbl[i].list.next;
|
||||
read_unlock(&ls->ls_rsbtbl[i].lock);
|
||||
break;
|
||||
}
|
||||
read_unlock(&ls->ls_rsbtbl[i].lock);
|
||||
}
|
||||
ri->entry = i;
|
||||
|
||||
if (ri->entry >= ls->ls_rsbtbl_size)
|
||||
return 1;
|
||||
} else {
|
||||
i = ri->entry;
|
||||
read_lock(&ls->ls_rsbtbl[i].lock);
|
||||
ri->next = ri->next->next;
|
||||
if (ri->next->next == ls->ls_rsbtbl[i].list.next) {
|
||||
/* End of list - move to next bucket */
|
||||
ri->next = NULL;
|
||||
ri->entry++;
|
||||
read_unlock(&ls->ls_rsbtbl[i].lock);
|
||||
goto top;
|
||||
}
|
||||
read_unlock(&ls->ls_rsbtbl[i].lock);
|
||||
}
|
||||
ri->rsb = list_entry(ri->next, struct dlm_rsb, res_hashchain);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rsb_iter_free(struct rsb_iter *ri)
|
||||
{
|
||||
kfree(ri);
|
||||
}
|
||||
|
||||
static struct rsb_iter *rsb_iter_init(struct dlm_ls *ls)
|
||||
{
|
||||
struct rsb_iter *ri;
|
||||
|
||||
ri = kmalloc(sizeof *ri, GFP_KERNEL);
|
||||
if (!ri)
|
||||
return NULL;
|
||||
|
||||
ri->ls = ls;
|
||||
ri->entry = 0;
|
||||
ri->next = NULL;
|
||||
|
||||
if (rsb_iter_next(ri)) {
|
||||
rsb_iter_free(ri);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return ri;
|
||||
}
|
||||
|
||||
static void *rsb_seq_start(struct seq_file *file, loff_t *pos)
|
||||
{
|
||||
struct rsb_iter *ri;
|
||||
loff_t n = *pos;
|
||||
|
||||
ri = rsb_iter_init(file->private);
|
||||
if (!ri)
|
||||
return NULL;
|
||||
|
||||
while (n--) {
|
||||
if (rsb_iter_next(ri)) {
|
||||
rsb_iter_free(ri);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return ri;
|
||||
}
|
||||
|
||||
static void *rsb_seq_next(struct seq_file *file, void *iter_ptr, loff_t *pos)
|
||||
{
|
||||
struct rsb_iter *ri = iter_ptr;
|
||||
|
||||
(*pos)++;
|
||||
|
||||
if (rsb_iter_next(ri)) {
|
||||
rsb_iter_free(ri);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return ri;
|
||||
}
|
||||
|
||||
static void rsb_seq_stop(struct seq_file *file, void *iter_ptr)
|
||||
{
|
||||
/* nothing for now */
|
||||
}
|
||||
|
||||
static int rsb_seq_show(struct seq_file *file, void *iter_ptr)
|
||||
{
|
||||
struct rsb_iter *ri = iter_ptr;
|
||||
|
||||
print_resource(ri->rsb, file);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct seq_operations rsb_seq_ops = {
|
||||
.start = rsb_seq_start,
|
||||
.next = rsb_seq_next,
|
||||
.stop = rsb_seq_stop,
|
||||
.show = rsb_seq_show,
|
||||
};
|
||||
|
||||
static int rsb_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct seq_file *seq;
|
||||
int ret;
|
||||
|
||||
ret = seq_open(file, &rsb_seq_ops);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
seq = file->private_data;
|
||||
seq->private = inode->i_private;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct file_operations rsb_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = rsb_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release
|
||||
};
|
||||
|
||||
/*
|
||||
* dump lkb's on the ls_waiters list
|
||||
*/
|
||||
|
||||
static int waiters_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
file->private_data = inode->i_private;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t waiters_read(struct file *file, char __user *userbuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct dlm_ls *ls = file->private_data;
|
||||
struct dlm_lkb *lkb;
|
||||
size_t len = DLM_DEBUG_BUF_LEN, pos = 0, ret, rv;
|
||||
|
||||
mutex_lock(&debug_buf_lock);
|
||||
mutex_lock(&ls->ls_waiters_mutex);
|
||||
memset(debug_buf, 0, sizeof(debug_buf));
|
||||
|
||||
list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
|
||||
ret = snprintf(debug_buf + pos, len - pos, "%x %d %d %s\n",
|
||||
lkb->lkb_id, lkb->lkb_wait_type,
|
||||
lkb->lkb_nodeid, lkb->lkb_resource->res_name);
|
||||
if (ret >= len - pos)
|
||||
break;
|
||||
pos += ret;
|
||||
}
|
||||
mutex_unlock(&ls->ls_waiters_mutex);
|
||||
|
||||
rv = simple_read_from_buffer(userbuf, count, ppos, debug_buf, pos);
|
||||
mutex_unlock(&debug_buf_lock);
|
||||
return rv;
|
||||
}
|
||||
|
||||
static struct file_operations waiters_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = waiters_open,
|
||||
.read = waiters_read
|
||||
};
|
||||
|
||||
int dlm_create_debug_file(struct dlm_ls *ls)
|
||||
{
|
||||
char name[DLM_LOCKSPACE_LEN+8];
|
||||
|
||||
ls->ls_debug_rsb_dentry = debugfs_create_file(ls->ls_name,
|
||||
S_IFREG | S_IRUGO,
|
||||
dlm_root,
|
||||
ls,
|
||||
&rsb_fops);
|
||||
if (!ls->ls_debug_rsb_dentry)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(name, 0, sizeof(name));
|
||||
snprintf(name, DLM_LOCKSPACE_LEN+8, "%s_waiters", ls->ls_name);
|
||||
|
||||
ls->ls_debug_waiters_dentry = debugfs_create_file(name,
|
||||
S_IFREG | S_IRUGO,
|
||||
dlm_root,
|
||||
ls,
|
||||
&waiters_fops);
|
||||
if (!ls->ls_debug_waiters_dentry) {
|
||||
debugfs_remove(ls->ls_debug_rsb_dentry);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dlm_delete_debug_file(struct dlm_ls *ls)
|
||||
{
|
||||
if (ls->ls_debug_rsb_dentry)
|
||||
debugfs_remove(ls->ls_debug_rsb_dentry);
|
||||
if (ls->ls_debug_waiters_dentry)
|
||||
debugfs_remove(ls->ls_debug_waiters_dentry);
|
||||
}
|
||||
|
||||
int dlm_register_debugfs(void)
|
||||
{
|
||||
mutex_init(&debug_buf_lock);
|
||||
dlm_root = debugfs_create_dir("dlm", NULL);
|
||||
return dlm_root ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
void dlm_unregister_debugfs(void)
|
||||
{
|
||||
debugfs_remove(dlm_root);
|
||||
}
|
||||
|
423
fs/dlm/dir.c
Normal file
423
fs/dlm/dir.c
Normal file
|
@ -0,0 +1,423 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#include "dlm_internal.h"
|
||||
#include "lockspace.h"
|
||||
#include "member.h"
|
||||
#include "lowcomms.h"
|
||||
#include "rcom.h"
|
||||
#include "config.h"
|
||||
#include "memory.h"
|
||||
#include "recover.h"
|
||||
#include "util.h"
|
||||
#include "lock.h"
|
||||
#include "dir.h"
|
||||
|
||||
|
||||
static void put_free_de(struct dlm_ls *ls, struct dlm_direntry *de)
|
||||
{
|
||||
spin_lock(&ls->ls_recover_list_lock);
|
||||
list_add(&de->list, &ls->ls_recover_list);
|
||||
spin_unlock(&ls->ls_recover_list_lock);
|
||||
}
|
||||
|
||||
static struct dlm_direntry *get_free_de(struct dlm_ls *ls, int len)
|
||||
{
|
||||
int found = 0;
|
||||
struct dlm_direntry *de;
|
||||
|
||||
spin_lock(&ls->ls_recover_list_lock);
|
||||
list_for_each_entry(de, &ls->ls_recover_list, list) {
|
||||
if (de->length == len) {
|
||||
list_del(&de->list);
|
||||
de->master_nodeid = 0;
|
||||
memset(de->name, 0, len);
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&ls->ls_recover_list_lock);
|
||||
|
||||
if (!found)
|
||||
de = allocate_direntry(ls, len);
|
||||
return de;
|
||||
}
|
||||
|
||||
void dlm_clear_free_entries(struct dlm_ls *ls)
|
||||
{
|
||||
struct dlm_direntry *de;
|
||||
|
||||
spin_lock(&ls->ls_recover_list_lock);
|
||||
while (!list_empty(&ls->ls_recover_list)) {
|
||||
de = list_entry(ls->ls_recover_list.next, struct dlm_direntry,
|
||||
list);
|
||||
list_del(&de->list);
|
||||
free_direntry(de);
|
||||
}
|
||||
spin_unlock(&ls->ls_recover_list_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* We use the upper 16 bits of the hash value to select the directory node.
|
||||
* Low bits are used for distribution of rsb's among hash buckets on each node.
|
||||
*
|
||||
* To give the exact range wanted (0 to num_nodes-1), we apply a modulus of
|
||||
* num_nodes to the hash value. This value in the desired range is used as an
|
||||
* offset into the sorted list of nodeid's to give the particular nodeid.
|
||||
*/
|
||||
|
||||
int dlm_hash2nodeid(struct dlm_ls *ls, uint32_t hash)
|
||||
{
|
||||
struct list_head *tmp;
|
||||
struct dlm_member *memb = NULL;
|
||||
uint32_t node, n = 0;
|
||||
int nodeid;
|
||||
|
||||
if (ls->ls_num_nodes == 1) {
|
||||
nodeid = dlm_our_nodeid();
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ls->ls_node_array) {
|
||||
node = (hash >> 16) % ls->ls_total_weight;
|
||||
nodeid = ls->ls_node_array[node];
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* make_member_array() failed to kmalloc ls_node_array... */
|
||||
|
||||
node = (hash >> 16) % ls->ls_num_nodes;
|
||||
|
||||
list_for_each(tmp, &ls->ls_nodes) {
|
||||
if (n++ != node)
|
||||
continue;
|
||||
memb = list_entry(tmp, struct dlm_member, list);
|
||||
break;
|
||||
}
|
||||
|
||||
DLM_ASSERT(memb , printk("num_nodes=%u n=%u node=%u\n",
|
||||
ls->ls_num_nodes, n, node););
|
||||
nodeid = memb->nodeid;
|
||||
out:
|
||||
return nodeid;
|
||||
}
|
||||
|
||||
int dlm_dir_nodeid(struct dlm_rsb *r)
|
||||
{
|
||||
return dlm_hash2nodeid(r->res_ls, r->res_hash);
|
||||
}
|
||||
|
||||
static inline uint32_t dir_hash(struct dlm_ls *ls, char *name, int len)
|
||||
{
|
||||
uint32_t val;
|
||||
|
||||
val = jhash(name, len, 0);
|
||||
val &= (ls->ls_dirtbl_size - 1);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static void add_entry_to_hash(struct dlm_ls *ls, struct dlm_direntry *de)
|
||||
{
|
||||
uint32_t bucket;
|
||||
|
||||
bucket = dir_hash(ls, de->name, de->length);
|
||||
list_add_tail(&de->list, &ls->ls_dirtbl[bucket].list);
|
||||
}
|
||||
|
||||
static struct dlm_direntry *search_bucket(struct dlm_ls *ls, char *name,
|
||||
int namelen, uint32_t bucket)
|
||||
{
|
||||
struct dlm_direntry *de;
|
||||
|
||||
list_for_each_entry(de, &ls->ls_dirtbl[bucket].list, list) {
|
||||
if (de->length == namelen && !memcmp(name, de->name, namelen))
|
||||
goto out;
|
||||
}
|
||||
de = NULL;
|
||||
out:
|
||||
return de;
|
||||
}
|
||||
|
||||
void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int namelen)
|
||||
{
|
||||
struct dlm_direntry *de;
|
||||
uint32_t bucket;
|
||||
|
||||
bucket = dir_hash(ls, name, namelen);
|
||||
|
||||
write_lock(&ls->ls_dirtbl[bucket].lock);
|
||||
|
||||
de = search_bucket(ls, name, namelen, bucket);
|
||||
|
||||
if (!de) {
|
||||
log_error(ls, "remove fr %u none", nodeid);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (de->master_nodeid != nodeid) {
|
||||
log_error(ls, "remove fr %u ID %u", nodeid, de->master_nodeid);
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_del(&de->list);
|
||||
free_direntry(de);
|
||||
out:
|
||||
write_unlock(&ls->ls_dirtbl[bucket].lock);
|
||||
}
|
||||
|
||||
void dlm_dir_clear(struct dlm_ls *ls)
|
||||
{
|
||||
struct list_head *head;
|
||||
struct dlm_direntry *de;
|
||||
int i;
|
||||
|
||||
DLM_ASSERT(list_empty(&ls->ls_recover_list), );
|
||||
|
||||
for (i = 0; i < ls->ls_dirtbl_size; i++) {
|
||||
write_lock(&ls->ls_dirtbl[i].lock);
|
||||
head = &ls->ls_dirtbl[i].list;
|
||||
while (!list_empty(head)) {
|
||||
de = list_entry(head->next, struct dlm_direntry, list);
|
||||
list_del(&de->list);
|
||||
put_free_de(ls, de);
|
||||
}
|
||||
write_unlock(&ls->ls_dirtbl[i].lock);
|
||||
}
|
||||
}
|
||||
|
||||
int dlm_recover_directory(struct dlm_ls *ls)
|
||||
{
|
||||
struct dlm_member *memb;
|
||||
struct dlm_direntry *de;
|
||||
char *b, *last_name = NULL;
|
||||
int error = -ENOMEM, last_len, count = 0;
|
||||
uint16_t namelen;
|
||||
|
||||
log_debug(ls, "dlm_recover_directory");
|
||||
|
||||
if (dlm_no_directory(ls))
|
||||
goto out_status;
|
||||
|
||||
dlm_dir_clear(ls);
|
||||
|
||||
last_name = kmalloc(DLM_RESNAME_MAXLEN, GFP_KERNEL);
|
||||
if (!last_name)
|
||||
goto out;
|
||||
|
||||
list_for_each_entry(memb, &ls->ls_nodes, list) {
|
||||
memset(last_name, 0, DLM_RESNAME_MAXLEN);
|
||||
last_len = 0;
|
||||
|
||||
for (;;) {
|
||||
error = dlm_recovery_stopped(ls);
|
||||
if (error)
|
||||
goto out_free;
|
||||
|
||||
error = dlm_rcom_names(ls, memb->nodeid,
|
||||
last_name, last_len);
|
||||
if (error)
|
||||
goto out_free;
|
||||
|
||||
schedule();
|
||||
|
||||
/*
|
||||
* pick namelen/name pairs out of received buffer
|
||||
*/
|
||||
|
||||
b = ls->ls_recover_buf + sizeof(struct dlm_rcom);
|
||||
|
||||
for (;;) {
|
||||
memcpy(&namelen, b, sizeof(uint16_t));
|
||||
namelen = be16_to_cpu(namelen);
|
||||
b += sizeof(uint16_t);
|
||||
|
||||
/* namelen of 0xFFFFF marks end of names for
|
||||
this node; namelen of 0 marks end of the
|
||||
buffer */
|
||||
|
||||
if (namelen == 0xFFFF)
|
||||
goto done;
|
||||
if (!namelen)
|
||||
break;
|
||||
|
||||
error = -ENOMEM;
|
||||
de = get_free_de(ls, namelen);
|
||||
if (!de)
|
||||
goto out_free;
|
||||
|
||||
de->master_nodeid = memb->nodeid;
|
||||
de->length = namelen;
|
||||
last_len = namelen;
|
||||
memcpy(de->name, b, namelen);
|
||||
memcpy(last_name, b, namelen);
|
||||
b += namelen;
|
||||
|
||||
add_entry_to_hash(ls, de);
|
||||
count++;
|
||||
}
|
||||
}
|
||||
done:
|
||||
;
|
||||
}
|
||||
|
||||
out_status:
|
||||
error = 0;
|
||||
dlm_set_recover_status(ls, DLM_RS_DIR);
|
||||
log_debug(ls, "dlm_recover_directory %d entries", count);
|
||||
out_free:
|
||||
kfree(last_name);
|
||||
out:
|
||||
dlm_clear_free_entries(ls);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
|
||||
int namelen, int *r_nodeid)
|
||||
{
|
||||
struct dlm_direntry *de, *tmp;
|
||||
uint32_t bucket;
|
||||
|
||||
bucket = dir_hash(ls, name, namelen);
|
||||
|
||||
write_lock(&ls->ls_dirtbl[bucket].lock);
|
||||
de = search_bucket(ls, name, namelen, bucket);
|
||||
if (de) {
|
||||
*r_nodeid = de->master_nodeid;
|
||||
write_unlock(&ls->ls_dirtbl[bucket].lock);
|
||||
if (*r_nodeid == nodeid)
|
||||
return -EEXIST;
|
||||
return 0;
|
||||
}
|
||||
|
||||
write_unlock(&ls->ls_dirtbl[bucket].lock);
|
||||
|
||||
de = allocate_direntry(ls, namelen);
|
||||
if (!de)
|
||||
return -ENOMEM;
|
||||
|
||||
de->master_nodeid = nodeid;
|
||||
de->length = namelen;
|
||||
memcpy(de->name, name, namelen);
|
||||
|
||||
write_lock(&ls->ls_dirtbl[bucket].lock);
|
||||
tmp = search_bucket(ls, name, namelen, bucket);
|
||||
if (tmp) {
|
||||
free_direntry(de);
|
||||
de = tmp;
|
||||
} else {
|
||||
list_add_tail(&de->list, &ls->ls_dirtbl[bucket].list);
|
||||
}
|
||||
*r_nodeid = de->master_nodeid;
|
||||
write_unlock(&ls->ls_dirtbl[bucket].lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dlm_dir_lookup(struct dlm_ls *ls, int nodeid, char *name, int namelen,
|
||||
int *r_nodeid)
|
||||
{
|
||||
return get_entry(ls, nodeid, name, namelen, r_nodeid);
|
||||
}
|
||||
|
||||
/* Copy the names of master rsb's into the buffer provided.
|
||||
Only select names whose dir node is the given nodeid. */
|
||||
|
||||
void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen,
|
||||
char *outbuf, int outlen, int nodeid)
|
||||
{
|
||||
struct list_head *list;
|
||||
struct dlm_rsb *start_r = NULL, *r = NULL;
|
||||
int offset = 0, start_namelen, error, dir_nodeid;
|
||||
char *start_name;
|
||||
uint16_t be_namelen;
|
||||
|
||||
/*
|
||||
* Find the rsb where we left off (or start again)
|
||||
*/
|
||||
|
||||
start_namelen = inlen;
|
||||
start_name = inbuf;
|
||||
|
||||
if (start_namelen > 1) {
|
||||
/*
|
||||
* We could also use a find_rsb_root() function here that
|
||||
* searched the ls_root_list.
|
||||
*/
|
||||
error = dlm_find_rsb(ls, start_name, start_namelen, R_MASTER,
|
||||
&start_r);
|
||||
DLM_ASSERT(!error && start_r,
|
||||
printk("error %d\n", error););
|
||||
DLM_ASSERT(!list_empty(&start_r->res_root_list),
|
||||
dlm_print_rsb(start_r););
|
||||
dlm_put_rsb(start_r);
|
||||
}
|
||||
|
||||
/*
|
||||
* Send rsb names for rsb's we're master of and whose directory node
|
||||
* matches the requesting node.
|
||||
*/
|
||||
|
||||
down_read(&ls->ls_root_sem);
|
||||
if (start_r)
|
||||
list = start_r->res_root_list.next;
|
||||
else
|
||||
list = ls->ls_root_list.next;
|
||||
|
||||
for (offset = 0; list != &ls->ls_root_list; list = list->next) {
|
||||
r = list_entry(list, struct dlm_rsb, res_root_list);
|
||||
if (r->res_nodeid)
|
||||
continue;
|
||||
|
||||
dir_nodeid = dlm_dir_nodeid(r);
|
||||
if (dir_nodeid != nodeid)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* The block ends when we can't fit the following in the
|
||||
* remaining buffer space:
|
||||
* namelen (uint16_t) +
|
||||
* name (r->res_length) +
|
||||
* end-of-block record 0x0000 (uint16_t)
|
||||
*/
|
||||
|
||||
if (offset + sizeof(uint16_t)*2 + r->res_length > outlen) {
|
||||
/* Write end-of-block record */
|
||||
be_namelen = 0;
|
||||
memcpy(outbuf + offset, &be_namelen, sizeof(uint16_t));
|
||||
offset += sizeof(uint16_t);
|
||||
goto out;
|
||||
}
|
||||
|
||||
be_namelen = cpu_to_be16(r->res_length);
|
||||
memcpy(outbuf + offset, &be_namelen, sizeof(uint16_t));
|
||||
offset += sizeof(uint16_t);
|
||||
memcpy(outbuf + offset, r->res_name, r->res_length);
|
||||
offset += r->res_length;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we've reached the end of the list (and there's room) write a
|
||||
* terminating record.
|
||||
*/
|
||||
|
||||
if ((list == &ls->ls_root_list) &&
|
||||
(offset + sizeof(uint16_t) <= outlen)) {
|
||||
be_namelen = 0xFFFF;
|
||||
memcpy(outbuf + offset, &be_namelen, sizeof(uint16_t));
|
||||
offset += sizeof(uint16_t);
|
||||
}
|
||||
|
||||
out:
|
||||
up_read(&ls->ls_root_sem);
|
||||
}
|
||||
|
30
fs/dlm/dir.h
Normal file
30
fs/dlm/dir.h
Normal file
|
@ -0,0 +1,30 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#ifndef __DIR_DOT_H__
|
||||
#define __DIR_DOT_H__
|
||||
|
||||
|
||||
int dlm_dir_nodeid(struct dlm_rsb *rsb);
|
||||
int dlm_hash2nodeid(struct dlm_ls *ls, uint32_t hash);
|
||||
void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int len);
|
||||
void dlm_dir_clear(struct dlm_ls *ls);
|
||||
void dlm_clear_free_entries(struct dlm_ls *ls);
|
||||
int dlm_recover_directory(struct dlm_ls *ls);
|
||||
int dlm_dir_lookup(struct dlm_ls *ls, int nodeid, char *name, int namelen,
|
||||
int *r_nodeid);
|
||||
void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen,
|
||||
char *outbuf, int outlen, int nodeid);
|
||||
|
||||
#endif /* __DIR_DOT_H__ */
|
||||
|
543
fs/dlm/dlm_internal.h
Normal file
543
fs/dlm/dlm_internal.h
Normal file
|
@ -0,0 +1,543 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#ifndef __DLM_INTERNAL_DOT_H__
|
||||
#define __DLM_INTERNAL_DOT_H__
|
||||
|
||||
/*
|
||||
* This is the main header file to be included in each DLM source file.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/socket.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/jhash.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <asm/semaphore.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include <linux/dlm.h>
|
||||
|
||||
#define DLM_LOCKSPACE_LEN 64
|
||||
|
||||
/* Size of the temp buffer midcomms allocates on the stack.
|
||||
We try to make this large enough so most messages fit.
|
||||
FIXME: should sctp make this unnecessary? */
|
||||
|
||||
#define DLM_INBUF_LEN 148
|
||||
|
||||
struct dlm_ls;
|
||||
struct dlm_lkb;
|
||||
struct dlm_rsb;
|
||||
struct dlm_member;
|
||||
struct dlm_lkbtable;
|
||||
struct dlm_rsbtable;
|
||||
struct dlm_dirtable;
|
||||
struct dlm_direntry;
|
||||
struct dlm_recover;
|
||||
struct dlm_header;
|
||||
struct dlm_message;
|
||||
struct dlm_rcom;
|
||||
struct dlm_mhandle;
|
||||
|
||||
#define log_print(fmt, args...) \
|
||||
printk(KERN_ERR "dlm: "fmt"\n" , ##args)
|
||||
#define log_error(ls, fmt, args...) \
|
||||
printk(KERN_ERR "dlm: %s: " fmt "\n", (ls)->ls_name , ##args)
|
||||
|
||||
#define DLM_LOG_DEBUG
|
||||
#ifdef DLM_LOG_DEBUG
|
||||
#define log_debug(ls, fmt, args...) log_error(ls, fmt, ##args)
|
||||
#else
|
||||
#define log_debug(ls, fmt, args...)
|
||||
#endif
|
||||
|
||||
#define DLM_ASSERT(x, do) \
|
||||
{ \
|
||||
if (!(x)) \
|
||||
{ \
|
||||
printk(KERN_ERR "\nDLM: Assertion failed on line %d of file %s\n" \
|
||||
"DLM: assertion: \"%s\"\n" \
|
||||
"DLM: time = %lu\n", \
|
||||
__LINE__, __FILE__, #x, jiffies); \
|
||||
{do} \
|
||||
printk("\n"); \
|
||||
BUG(); \
|
||||
panic("DLM: Record message above and reboot.\n"); \
|
||||
} \
|
||||
}
|
||||
|
||||
#define DLM_FAKE_USER_AST ERR_PTR(-EINVAL)
|
||||
|
||||
|
||||
struct dlm_direntry {
|
||||
struct list_head list;
|
||||
uint32_t master_nodeid;
|
||||
uint16_t length;
|
||||
char name[1];
|
||||
};
|
||||
|
||||
struct dlm_dirtable {
|
||||
struct list_head list;
|
||||
rwlock_t lock;
|
||||
};
|
||||
|
||||
struct dlm_rsbtable {
|
||||
struct list_head list;
|
||||
struct list_head toss;
|
||||
rwlock_t lock;
|
||||
};
|
||||
|
||||
struct dlm_lkbtable {
|
||||
struct list_head list;
|
||||
rwlock_t lock;
|
||||
uint16_t counter;
|
||||
};
|
||||
|
||||
/*
|
||||
* Lockspace member (per node in a ls)
|
||||
*/
|
||||
|
||||
struct dlm_member {
|
||||
struct list_head list;
|
||||
int nodeid;
|
||||
int weight;
|
||||
};
|
||||
|
||||
/*
|
||||
* Save and manage recovery state for a lockspace.
|
||||
*/
|
||||
|
||||
struct dlm_recover {
|
||||
struct list_head list;
|
||||
int *nodeids;
|
||||
int node_count;
|
||||
uint64_t seq;
|
||||
};
|
||||
|
||||
/*
|
||||
* Pass input args to second stage locking function.
|
||||
*/
|
||||
|
||||
struct dlm_args {
|
||||
uint32_t flags;
|
||||
void *astaddr;
|
||||
long astparam;
|
||||
void *bastaddr;
|
||||
int mode;
|
||||
struct dlm_lksb *lksb;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Lock block
|
||||
*
|
||||
* A lock can be one of three types:
|
||||
*
|
||||
* local copy lock is mastered locally
|
||||
* (lkb_nodeid is zero and DLM_LKF_MSTCPY is not set)
|
||||
* process copy lock is mastered on a remote node
|
||||
* (lkb_nodeid is non-zero and DLM_LKF_MSTCPY is not set)
|
||||
* master copy master node's copy of a lock owned by remote node
|
||||
* (lkb_nodeid is non-zero and DLM_LKF_MSTCPY is set)
|
||||
*
|
||||
* lkb_exflags: a copy of the most recent flags arg provided to dlm_lock or
|
||||
* dlm_unlock. The dlm does not modify these or use any private flags in
|
||||
* this field; it only contains DLM_LKF_ flags from dlm.h. These flags
|
||||
* are sent as-is to the remote master when the lock is remote.
|
||||
*
|
||||
* lkb_flags: internal dlm flags (DLM_IFL_ prefix) from dlm_internal.h.
|
||||
* Some internal flags are shared between the master and process nodes;
|
||||
* these shared flags are kept in the lower two bytes. One of these
|
||||
* flags set on the master copy will be propagated to the process copy
|
||||
* and v.v. Other internal flags are private to the master or process
|
||||
* node (e.g. DLM_IFL_MSTCPY). These are kept in the high two bytes.
|
||||
*
|
||||
* lkb_sbflags: status block flags. These flags are copied directly into
|
||||
* the caller's lksb.sb_flags prior to the dlm_lock/dlm_unlock completion
|
||||
* ast. All defined in dlm.h with DLM_SBF_ prefix.
|
||||
*
|
||||
* lkb_status: the lock status indicates which rsb queue the lock is
|
||||
* on, grant, convert, or wait. DLM_LKSTS_ WAITING/GRANTED/CONVERT
|
||||
*
|
||||
* lkb_wait_type: the dlm message type (DLM_MSG_ prefix) for which a
|
||||
* reply is needed. Only set when the lkb is on the lockspace waiters
|
||||
* list awaiting a reply from a remote node.
|
||||
*
|
||||
* lkb_nodeid: when the lkb is a local copy, nodeid is 0; when the lkb
|
||||
* is a master copy, nodeid specifies the remote lock holder, when the
|
||||
* lkb is a process copy, the nodeid specifies the lock master.
|
||||
*/
|
||||
|
||||
/* lkb_ast_type */
|
||||
|
||||
#define AST_COMP 1
|
||||
#define AST_BAST 2
|
||||
|
||||
/* lkb_status */
|
||||
|
||||
#define DLM_LKSTS_WAITING 1
|
||||
#define DLM_LKSTS_GRANTED 2
|
||||
#define DLM_LKSTS_CONVERT 3
|
||||
|
||||
/* lkb_flags */
|
||||
|
||||
#define DLM_IFL_MSTCPY 0x00010000
|
||||
#define DLM_IFL_RESEND 0x00020000
|
||||
#define DLM_IFL_DEAD 0x00040000
|
||||
#define DLM_IFL_USER 0x00000001
|
||||
#define DLM_IFL_ORPHAN 0x00000002
|
||||
|
||||
struct dlm_lkb {
|
||||
struct dlm_rsb *lkb_resource; /* the rsb */
|
||||
struct kref lkb_ref;
|
||||
int lkb_nodeid; /* copied from rsb */
|
||||
int lkb_ownpid; /* pid of lock owner */
|
||||
uint32_t lkb_id; /* our lock ID */
|
||||
uint32_t lkb_remid; /* lock ID on remote partner */
|
||||
uint32_t lkb_exflags; /* external flags from caller */
|
||||
uint32_t lkb_sbflags; /* lksb flags */
|
||||
uint32_t lkb_flags; /* internal flags */
|
||||
uint32_t lkb_lvbseq; /* lvb sequence number */
|
||||
|
||||
int8_t lkb_status; /* granted, waiting, convert */
|
||||
int8_t lkb_rqmode; /* requested lock mode */
|
||||
int8_t lkb_grmode; /* granted lock mode */
|
||||
int8_t lkb_bastmode; /* requested mode */
|
||||
int8_t lkb_highbast; /* highest mode bast sent for */
|
||||
|
||||
int8_t lkb_wait_type; /* type of reply waiting for */
|
||||
int8_t lkb_ast_type; /* type of ast queued for */
|
||||
|
||||
struct list_head lkb_idtbl_list; /* lockspace lkbtbl */
|
||||
struct list_head lkb_statequeue; /* rsb g/c/w list */
|
||||
struct list_head lkb_rsb_lookup; /* waiting for rsb lookup */
|
||||
struct list_head lkb_wait_reply; /* waiting for remote reply */
|
||||
struct list_head lkb_astqueue; /* need ast to be sent */
|
||||
struct list_head lkb_ownqueue; /* list of locks for a process */
|
||||
|
||||
char *lkb_lvbptr;
|
||||
struct dlm_lksb *lkb_lksb; /* caller's status block */
|
||||
void *lkb_astaddr; /* caller's ast function */
|
||||
void *lkb_bastaddr; /* caller's bast function */
|
||||
long lkb_astparam; /* caller's ast arg */
|
||||
};
|
||||
|
||||
|
||||
struct dlm_rsb {
|
||||
struct dlm_ls *res_ls; /* the lockspace */
|
||||
struct kref res_ref;
|
||||
struct mutex res_mutex;
|
||||
unsigned long res_flags;
|
||||
int res_length; /* length of rsb name */
|
||||
int res_nodeid;
|
||||
uint32_t res_lvbseq;
|
||||
uint32_t res_hash;
|
||||
uint32_t res_bucket; /* rsbtbl */
|
||||
unsigned long res_toss_time;
|
||||
uint32_t res_first_lkid;
|
||||
struct list_head res_lookup; /* lkbs waiting on first */
|
||||
struct list_head res_hashchain; /* rsbtbl */
|
||||
struct list_head res_grantqueue;
|
||||
struct list_head res_convertqueue;
|
||||
struct list_head res_waitqueue;
|
||||
|
||||
struct list_head res_root_list; /* used for recovery */
|
||||
struct list_head res_recover_list; /* used for recovery */
|
||||
int res_recover_locks_count;
|
||||
|
||||
char *res_lvbptr;
|
||||
char res_name[1];
|
||||
};
|
||||
|
||||
/* find_rsb() flags */
|
||||
|
||||
#define R_MASTER 1 /* only return rsb if it's a master */
|
||||
#define R_CREATE 2 /* create/add rsb if not found */
|
||||
|
||||
/* rsb_flags */
|
||||
|
||||
enum rsb_flags {
|
||||
RSB_MASTER_UNCERTAIN,
|
||||
RSB_VALNOTVALID,
|
||||
RSB_VALNOTVALID_PREV,
|
||||
RSB_NEW_MASTER,
|
||||
RSB_NEW_MASTER2,
|
||||
RSB_RECOVER_CONVERT,
|
||||
RSB_LOCKS_PURGED,
|
||||
};
|
||||
|
||||
static inline void rsb_set_flag(struct dlm_rsb *r, enum rsb_flags flag)
|
||||
{
|
||||
__set_bit(flag, &r->res_flags);
|
||||
}
|
||||
|
||||
static inline void rsb_clear_flag(struct dlm_rsb *r, enum rsb_flags flag)
|
||||
{
|
||||
__clear_bit(flag, &r->res_flags);
|
||||
}
|
||||
|
||||
static inline int rsb_flag(struct dlm_rsb *r, enum rsb_flags flag)
|
||||
{
|
||||
return test_bit(flag, &r->res_flags);
|
||||
}
|
||||
|
||||
|
||||
/* dlm_header is first element of all structs sent between nodes */
|
||||
|
||||
#define DLM_HEADER_MAJOR 0x00020000
|
||||
#define DLM_HEADER_MINOR 0x00000001
|
||||
|
||||
#define DLM_MSG 1
|
||||
#define DLM_RCOM 2
|
||||
|
||||
struct dlm_header {
|
||||
uint32_t h_version;
|
||||
uint32_t h_lockspace;
|
||||
uint32_t h_nodeid; /* nodeid of sender */
|
||||
uint16_t h_length;
|
||||
uint8_t h_cmd; /* DLM_MSG, DLM_RCOM */
|
||||
uint8_t h_pad;
|
||||
};
|
||||
|
||||
|
||||
#define DLM_MSG_REQUEST 1
|
||||
#define DLM_MSG_CONVERT 2
|
||||
#define DLM_MSG_UNLOCK 3
|
||||
#define DLM_MSG_CANCEL 4
|
||||
#define DLM_MSG_REQUEST_REPLY 5
|
||||
#define DLM_MSG_CONVERT_REPLY 6
|
||||
#define DLM_MSG_UNLOCK_REPLY 7
|
||||
#define DLM_MSG_CANCEL_REPLY 8
|
||||
#define DLM_MSG_GRANT 9
|
||||
#define DLM_MSG_BAST 10
|
||||
#define DLM_MSG_LOOKUP 11
|
||||
#define DLM_MSG_REMOVE 12
|
||||
#define DLM_MSG_LOOKUP_REPLY 13
|
||||
|
||||
struct dlm_message {
|
||||
struct dlm_header m_header;
|
||||
uint32_t m_type; /* DLM_MSG_ */
|
||||
uint32_t m_nodeid;
|
||||
uint32_t m_pid;
|
||||
uint32_t m_lkid; /* lkid on sender */
|
||||
uint32_t m_remid; /* lkid on receiver */
|
||||
uint32_t m_parent_lkid;
|
||||
uint32_t m_parent_remid;
|
||||
uint32_t m_exflags;
|
||||
uint32_t m_sbflags;
|
||||
uint32_t m_flags;
|
||||
uint32_t m_lvbseq;
|
||||
uint32_t m_hash;
|
||||
int m_status;
|
||||
int m_grmode;
|
||||
int m_rqmode;
|
||||
int m_bastmode;
|
||||
int m_asts;
|
||||
int m_result; /* 0 or -EXXX */
|
||||
char m_extra[0]; /* name or lvb */
|
||||
};
|
||||
|
||||
|
||||
#define DLM_RS_NODES 0x00000001
|
||||
#define DLM_RS_NODES_ALL 0x00000002
|
||||
#define DLM_RS_DIR 0x00000004
|
||||
#define DLM_RS_DIR_ALL 0x00000008
|
||||
#define DLM_RS_LOCKS 0x00000010
|
||||
#define DLM_RS_LOCKS_ALL 0x00000020
|
||||
#define DLM_RS_DONE 0x00000040
|
||||
#define DLM_RS_DONE_ALL 0x00000080
|
||||
|
||||
#define DLM_RCOM_STATUS 1
|
||||
#define DLM_RCOM_NAMES 2
|
||||
#define DLM_RCOM_LOOKUP 3
|
||||
#define DLM_RCOM_LOCK 4
|
||||
#define DLM_RCOM_STATUS_REPLY 5
|
||||
#define DLM_RCOM_NAMES_REPLY 6
|
||||
#define DLM_RCOM_LOOKUP_REPLY 7
|
||||
#define DLM_RCOM_LOCK_REPLY 8
|
||||
|
||||
struct dlm_rcom {
|
||||
struct dlm_header rc_header;
|
||||
uint32_t rc_type; /* DLM_RCOM_ */
|
||||
int rc_result; /* multi-purpose */
|
||||
uint64_t rc_id; /* match reply with request */
|
||||
char rc_buf[0];
|
||||
};
|
||||
|
||||
struct rcom_config {
|
||||
uint32_t rf_lvblen;
|
||||
uint32_t rf_lsflags;
|
||||
uint64_t rf_unused;
|
||||
};
|
||||
|
||||
struct rcom_lock {
|
||||
uint32_t rl_ownpid;
|
||||
uint32_t rl_lkid;
|
||||
uint32_t rl_remid;
|
||||
uint32_t rl_parent_lkid;
|
||||
uint32_t rl_parent_remid;
|
||||
uint32_t rl_exflags;
|
||||
uint32_t rl_flags;
|
||||
uint32_t rl_lvbseq;
|
||||
int rl_result;
|
||||
int8_t rl_rqmode;
|
||||
int8_t rl_grmode;
|
||||
int8_t rl_status;
|
||||
int8_t rl_asts;
|
||||
uint16_t rl_wait_type;
|
||||
uint16_t rl_namelen;
|
||||
char rl_name[DLM_RESNAME_MAXLEN];
|
||||
char rl_lvb[0];
|
||||
};
|
||||
|
||||
struct dlm_ls {
|
||||
struct list_head ls_list; /* list of lockspaces */
|
||||
dlm_lockspace_t *ls_local_handle;
|
||||
uint32_t ls_global_id; /* global unique lockspace ID */
|
||||
uint32_t ls_exflags;
|
||||
int ls_lvblen;
|
||||
int ls_count; /* reference count */
|
||||
unsigned long ls_flags; /* LSFL_ */
|
||||
struct kobject ls_kobj;
|
||||
|
||||
struct dlm_rsbtable *ls_rsbtbl;
|
||||
uint32_t ls_rsbtbl_size;
|
||||
|
||||
struct dlm_lkbtable *ls_lkbtbl;
|
||||
uint32_t ls_lkbtbl_size;
|
||||
|
||||
struct dlm_dirtable *ls_dirtbl;
|
||||
uint32_t ls_dirtbl_size;
|
||||
|
||||
struct mutex ls_waiters_mutex;
|
||||
struct list_head ls_waiters; /* lkbs needing a reply */
|
||||
|
||||
struct list_head ls_nodes; /* current nodes in ls */
|
||||
struct list_head ls_nodes_gone; /* dead node list, recovery */
|
||||
int ls_num_nodes; /* number of nodes in ls */
|
||||
int ls_low_nodeid;
|
||||
int ls_total_weight;
|
||||
int *ls_node_array;
|
||||
|
||||
struct dlm_rsb ls_stub_rsb; /* for returning errors */
|
||||
struct dlm_lkb ls_stub_lkb; /* for returning errors */
|
||||
struct dlm_message ls_stub_ms; /* for faking a reply */
|
||||
|
||||
struct dentry *ls_debug_rsb_dentry; /* debugfs */
|
||||
struct dentry *ls_debug_waiters_dentry; /* debugfs */
|
||||
|
||||
wait_queue_head_t ls_uevent_wait; /* user part of join/leave */
|
||||
int ls_uevent_result;
|
||||
|
||||
struct miscdevice ls_device;
|
||||
|
||||
/* recovery related */
|
||||
|
||||
struct timer_list ls_timer;
|
||||
struct task_struct *ls_recoverd_task;
|
||||
struct mutex ls_recoverd_active;
|
||||
spinlock_t ls_recover_lock;
|
||||
uint32_t ls_recover_status; /* DLM_RS_ */
|
||||
uint64_t ls_recover_seq;
|
||||
struct dlm_recover *ls_recover_args;
|
||||
struct rw_semaphore ls_in_recovery; /* block local requests */
|
||||
struct list_head ls_requestqueue;/* queue remote requests */
|
||||
struct mutex ls_requestqueue_mutex;
|
||||
char *ls_recover_buf;
|
||||
int ls_recover_nodeid; /* for debugging */
|
||||
uint64_t ls_rcom_seq;
|
||||
struct list_head ls_recover_list;
|
||||
spinlock_t ls_recover_list_lock;
|
||||
int ls_recover_list_count;
|
||||
wait_queue_head_t ls_wait_general;
|
||||
struct mutex ls_clear_proc_locks;
|
||||
|
||||
struct list_head ls_root_list; /* root resources */
|
||||
struct rw_semaphore ls_root_sem; /* protect root_list */
|
||||
|
||||
int ls_namelen;
|
||||
char ls_name[1];
|
||||
};
|
||||
|
||||
#define LSFL_WORK 0
|
||||
#define LSFL_RUNNING 1
|
||||
#define LSFL_RECOVERY_STOP 2
|
||||
#define LSFL_RCOM_READY 3
|
||||
#define LSFL_UEVENT_WAIT 4
|
||||
|
||||
/* much of this is just saving user space pointers associated with the
|
||||
lock that we pass back to the user lib with an ast */
|
||||
|
||||
struct dlm_user_args {
|
||||
struct dlm_user_proc *proc; /* each process that opens the lockspace
|
||||
device has private data
|
||||
(dlm_user_proc) on the struct file,
|
||||
the process's locks point back to it*/
|
||||
struct dlm_lksb lksb;
|
||||
int old_mode;
|
||||
int update_user_lvb;
|
||||
struct dlm_lksb __user *user_lksb;
|
||||
void __user *castparam;
|
||||
void __user *castaddr;
|
||||
void __user *bastparam;
|
||||
void __user *bastaddr;
|
||||
};
|
||||
|
||||
#define DLM_PROC_FLAGS_CLOSING 1
|
||||
#define DLM_PROC_FLAGS_COMPAT 2
|
||||
|
||||
/* locks list is kept so we can remove all a process's locks when it
|
||||
exits (or orphan those that are persistent) */
|
||||
|
||||
struct dlm_user_proc {
|
||||
dlm_lockspace_t *lockspace;
|
||||
unsigned long flags; /* DLM_PROC_FLAGS */
|
||||
struct list_head asts;
|
||||
spinlock_t asts_spin;
|
||||
struct list_head locks;
|
||||
spinlock_t locks_spin;
|
||||
wait_queue_head_t wait;
|
||||
};
|
||||
|
||||
static inline int dlm_locking_stopped(struct dlm_ls *ls)
|
||||
{
|
||||
return !test_bit(LSFL_RUNNING, &ls->ls_flags);
|
||||
}
|
||||
|
||||
static inline int dlm_recovery_stopped(struct dlm_ls *ls)
|
||||
{
|
||||
return test_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
|
||||
}
|
||||
|
||||
static inline int dlm_no_directory(struct dlm_ls *ls)
|
||||
{
|
||||
return (ls->ls_exflags & DLM_LSFL_NODIR) ? 1 : 0;
|
||||
}
|
||||
|
||||
#endif /* __DLM_INTERNAL_DOT_H__ */
|
||||
|
3871
fs/dlm/lock.c
Normal file
3871
fs/dlm/lock.c
Normal file
File diff suppressed because it is too large
Load diff
62
fs/dlm/lock.h
Normal file
62
fs/dlm/lock.h
Normal file
|
@ -0,0 +1,62 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#ifndef __LOCK_DOT_H__
|
||||
#define __LOCK_DOT_H__
|
||||
|
||||
void dlm_print_rsb(struct dlm_rsb *r);
|
||||
void dlm_dump_rsb(struct dlm_rsb *r);
|
||||
void dlm_print_lkb(struct dlm_lkb *lkb);
|
||||
int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery);
|
||||
int dlm_modes_compat(int mode1, int mode2);
|
||||
int dlm_find_rsb(struct dlm_ls *ls, char *name, int namelen,
|
||||
unsigned int flags, struct dlm_rsb **r_ret);
|
||||
void dlm_put_rsb(struct dlm_rsb *r);
|
||||
void dlm_hold_rsb(struct dlm_rsb *r);
|
||||
int dlm_put_lkb(struct dlm_lkb *lkb);
|
||||
void dlm_scan_rsbs(struct dlm_ls *ls);
|
||||
|
||||
int dlm_purge_locks(struct dlm_ls *ls);
|
||||
void dlm_purge_mstcpy_locks(struct dlm_rsb *r);
|
||||
void dlm_grant_after_purge(struct dlm_ls *ls);
|
||||
int dlm_recover_waiters_post(struct dlm_ls *ls);
|
||||
void dlm_recover_waiters_pre(struct dlm_ls *ls);
|
||||
int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc);
|
||||
int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc);
|
||||
|
||||
int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua, int mode,
|
||||
uint32_t flags, void *name, unsigned int namelen, uint32_t parent_lkid);
|
||||
int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
|
||||
int mode, uint32_t flags, uint32_t lkid, char *lvb_in);
|
||||
int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
|
||||
uint32_t flags, uint32_t lkid, char *lvb_in);
|
||||
int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
|
||||
uint32_t flags, uint32_t lkid);
|
||||
void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc);
|
||||
|
||||
static inline int is_master(struct dlm_rsb *r)
|
||||
{
|
||||
return !r->res_nodeid;
|
||||
}
|
||||
|
||||
static inline void lock_rsb(struct dlm_rsb *r)
|
||||
{
|
||||
mutex_lock(&r->res_mutex);
|
||||
}
|
||||
|
||||
static inline void unlock_rsb(struct dlm_rsb *r)
|
||||
{
|
||||
mutex_unlock(&r->res_mutex);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
717
fs/dlm/lockspace.c
Normal file
717
fs/dlm/lockspace.c
Normal file
|
@ -0,0 +1,717 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#include "dlm_internal.h"
|
||||
#include "lockspace.h"
|
||||
#include "member.h"
|
||||
#include "recoverd.h"
|
||||
#include "ast.h"
|
||||
#include "dir.h"
|
||||
#include "lowcomms.h"
|
||||
#include "config.h"
|
||||
#include "memory.h"
|
||||
#include "lock.h"
|
||||
#include "recover.h"
|
||||
|
||||
#ifdef CONFIG_DLM_DEBUG
|
||||
int dlm_create_debug_file(struct dlm_ls *ls);
|
||||
void dlm_delete_debug_file(struct dlm_ls *ls);
|
||||
#else
|
||||
static inline int dlm_create_debug_file(struct dlm_ls *ls) { return 0; }
|
||||
static inline void dlm_delete_debug_file(struct dlm_ls *ls) { }
|
||||
#endif
|
||||
|
||||
static int ls_count;
|
||||
static struct mutex ls_lock;
|
||||
static struct list_head lslist;
|
||||
static spinlock_t lslist_lock;
|
||||
static struct task_struct * scand_task;
|
||||
|
||||
|
||||
static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
|
||||
{
|
||||
ssize_t ret = len;
|
||||
int n = simple_strtol(buf, NULL, 0);
|
||||
|
||||
switch (n) {
|
||||
case 0:
|
||||
dlm_ls_stop(ls);
|
||||
break;
|
||||
case 1:
|
||||
dlm_ls_start(ls);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len)
|
||||
{
|
||||
ls->ls_uevent_result = simple_strtol(buf, NULL, 0);
|
||||
set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags);
|
||||
wake_up(&ls->ls_uevent_wait);
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", ls->ls_global_id);
|
||||
}
|
||||
|
||||
static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len)
|
||||
{
|
||||
ls->ls_global_id = simple_strtoul(buf, NULL, 0);
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf)
|
||||
{
|
||||
uint32_t status = dlm_recover_status(ls);
|
||||
return snprintf(buf, PAGE_SIZE, "%x\n", status);
|
||||
}
|
||||
|
||||
static ssize_t dlm_recover_nodeid_show(struct dlm_ls *ls, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", ls->ls_recover_nodeid);
|
||||
}
|
||||
|
||||
struct dlm_attr {
|
||||
struct attribute attr;
|
||||
ssize_t (*show)(struct dlm_ls *, char *);
|
||||
ssize_t (*store)(struct dlm_ls *, const char *, size_t);
|
||||
};
|
||||
|
||||
static struct dlm_attr dlm_attr_control = {
|
||||
.attr = {.name = "control", .mode = S_IWUSR},
|
||||
.store = dlm_control_store
|
||||
};
|
||||
|
||||
static struct dlm_attr dlm_attr_event = {
|
||||
.attr = {.name = "event_done", .mode = S_IWUSR},
|
||||
.store = dlm_event_store
|
||||
};
|
||||
|
||||
static struct dlm_attr dlm_attr_id = {
|
||||
.attr = {.name = "id", .mode = S_IRUGO | S_IWUSR},
|
||||
.show = dlm_id_show,
|
||||
.store = dlm_id_store
|
||||
};
|
||||
|
||||
static struct dlm_attr dlm_attr_recover_status = {
|
||||
.attr = {.name = "recover_status", .mode = S_IRUGO},
|
||||
.show = dlm_recover_status_show
|
||||
};
|
||||
|
||||
static struct dlm_attr dlm_attr_recover_nodeid = {
|
||||
.attr = {.name = "recover_nodeid", .mode = S_IRUGO},
|
||||
.show = dlm_recover_nodeid_show
|
||||
};
|
||||
|
||||
static struct attribute *dlm_attrs[] = {
|
||||
&dlm_attr_control.attr,
|
||||
&dlm_attr_event.attr,
|
||||
&dlm_attr_id.attr,
|
||||
&dlm_attr_recover_status.attr,
|
||||
&dlm_attr_recover_nodeid.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static ssize_t dlm_attr_show(struct kobject *kobj, struct attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
|
||||
struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
|
||||
return a->show ? a->show(ls, buf) : 0;
|
||||
}
|
||||
|
||||
static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr,
|
||||
const char *buf, size_t len)
|
||||
{
|
||||
struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
|
||||
struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
|
||||
return a->store ? a->store(ls, buf, len) : len;
|
||||
}
|
||||
|
||||
static struct sysfs_ops dlm_attr_ops = {
|
||||
.show = dlm_attr_show,
|
||||
.store = dlm_attr_store,
|
||||
};
|
||||
|
||||
static struct kobj_type dlm_ktype = {
|
||||
.default_attrs = dlm_attrs,
|
||||
.sysfs_ops = &dlm_attr_ops,
|
||||
};
|
||||
|
||||
static struct kset dlm_kset = {
|
||||
.subsys = &kernel_subsys,
|
||||
.kobj = {.name = "dlm",},
|
||||
.ktype = &dlm_ktype,
|
||||
};
|
||||
|
||||
static int kobject_setup(struct dlm_ls *ls)
|
||||
{
|
||||
char lsname[DLM_LOCKSPACE_LEN];
|
||||
int error;
|
||||
|
||||
memset(lsname, 0, DLM_LOCKSPACE_LEN);
|
||||
snprintf(lsname, DLM_LOCKSPACE_LEN, "%s", ls->ls_name);
|
||||
|
||||
error = kobject_set_name(&ls->ls_kobj, "%s", lsname);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
ls->ls_kobj.kset = &dlm_kset;
|
||||
ls->ls_kobj.ktype = &dlm_ktype;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_uevent(struct dlm_ls *ls, int in)
|
||||
{
|
||||
int error;
|
||||
|
||||
if (in)
|
||||
kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
|
||||
else
|
||||
kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
|
||||
|
||||
error = wait_event_interruptible(ls->ls_uevent_wait,
|
||||
test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
error = ls->ls_uevent_result;
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
int dlm_lockspace_init(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
ls_count = 0;
|
||||
mutex_init(&ls_lock);
|
||||
INIT_LIST_HEAD(&lslist);
|
||||
spin_lock_init(&lslist_lock);
|
||||
|
||||
error = kset_register(&dlm_kset);
|
||||
if (error)
|
||||
printk("dlm_lockspace_init: cannot register kset %d\n", error);
|
||||
return error;
|
||||
}
|
||||
|
||||
void dlm_lockspace_exit(void)
|
||||
{
|
||||
kset_unregister(&dlm_kset);
|
||||
}
|
||||
|
||||
static int dlm_scand(void *data)
|
||||
{
|
||||
struct dlm_ls *ls;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
list_for_each_entry(ls, &lslist, ls_list)
|
||||
dlm_scan_rsbs(ls);
|
||||
schedule_timeout_interruptible(dlm_config.scan_secs * HZ);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dlm_scand_start(void)
|
||||
{
|
||||
struct task_struct *p;
|
||||
int error = 0;
|
||||
|
||||
p = kthread_run(dlm_scand, NULL, "dlm_scand");
|
||||
if (IS_ERR(p))
|
||||
error = PTR_ERR(p);
|
||||
else
|
||||
scand_task = p;
|
||||
return error;
|
||||
}
|
||||
|
||||
static void dlm_scand_stop(void)
|
||||
{
|
||||
kthread_stop(scand_task);
|
||||
}
|
||||
|
||||
static struct dlm_ls *dlm_find_lockspace_name(char *name, int namelen)
|
||||
{
|
||||
struct dlm_ls *ls;
|
||||
|
||||
spin_lock(&lslist_lock);
|
||||
|
||||
list_for_each_entry(ls, &lslist, ls_list) {
|
||||
if (ls->ls_namelen == namelen &&
|
||||
memcmp(ls->ls_name, name, namelen) == 0)
|
||||
goto out;
|
||||
}
|
||||
ls = NULL;
|
||||
out:
|
||||
spin_unlock(&lslist_lock);
|
||||
return ls;
|
||||
}
|
||||
|
||||
struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
|
||||
{
|
||||
struct dlm_ls *ls;
|
||||
|
||||
spin_lock(&lslist_lock);
|
||||
|
||||
list_for_each_entry(ls, &lslist, ls_list) {
|
||||
if (ls->ls_global_id == id) {
|
||||
ls->ls_count++;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
ls = NULL;
|
||||
out:
|
||||
spin_unlock(&lslist_lock);
|
||||
return ls;
|
||||
}
|
||||
|
||||
struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
|
||||
{
|
||||
struct dlm_ls *ls;
|
||||
|
||||
spin_lock(&lslist_lock);
|
||||
list_for_each_entry(ls, &lslist, ls_list) {
|
||||
if (ls->ls_local_handle == lockspace) {
|
||||
ls->ls_count++;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
ls = NULL;
|
||||
out:
|
||||
spin_unlock(&lslist_lock);
|
||||
return ls;
|
||||
}
|
||||
|
||||
struct dlm_ls *dlm_find_lockspace_device(int minor)
|
||||
{
|
||||
struct dlm_ls *ls;
|
||||
|
||||
spin_lock(&lslist_lock);
|
||||
list_for_each_entry(ls, &lslist, ls_list) {
|
||||
if (ls->ls_device.minor == minor) {
|
||||
ls->ls_count++;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
ls = NULL;
|
||||
out:
|
||||
spin_unlock(&lslist_lock);
|
||||
return ls;
|
||||
}
|
||||
|
||||
void dlm_put_lockspace(struct dlm_ls *ls)
|
||||
{
|
||||
spin_lock(&lslist_lock);
|
||||
ls->ls_count--;
|
||||
spin_unlock(&lslist_lock);
|
||||
}
|
||||
|
||||
static void remove_lockspace(struct dlm_ls *ls)
|
||||
{
|
||||
for (;;) {
|
||||
spin_lock(&lslist_lock);
|
||||
if (ls->ls_count == 0) {
|
||||
list_del(&ls->ls_list);
|
||||
spin_unlock(&lslist_lock);
|
||||
return;
|
||||
}
|
||||
spin_unlock(&lslist_lock);
|
||||
ssleep(1);
|
||||
}
|
||||
}
|
||||
|
||||
static int threads_start(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
/* Thread which process lock requests for all lockspace's */
|
||||
error = dlm_astd_start();
|
||||
if (error) {
|
||||
log_print("cannot start dlm_astd thread %d", error);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
error = dlm_scand_start();
|
||||
if (error) {
|
||||
log_print("cannot start dlm_scand thread %d", error);
|
||||
goto astd_fail;
|
||||
}
|
||||
|
||||
/* Thread for sending/receiving messages for all lockspace's */
|
||||
error = dlm_lowcomms_start();
|
||||
if (error) {
|
||||
log_print("cannot start dlm lowcomms %d", error);
|
||||
goto scand_fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
scand_fail:
|
||||
dlm_scand_stop();
|
||||
astd_fail:
|
||||
dlm_astd_stop();
|
||||
fail:
|
||||
return error;
|
||||
}
|
||||
|
||||
static void threads_stop(void)
|
||||
{
|
||||
dlm_scand_stop();
|
||||
dlm_lowcomms_stop();
|
||||
dlm_astd_stop();
|
||||
}
|
||||
|
||||
static int new_lockspace(char *name, int namelen, void **lockspace,
|
||||
uint32_t flags, int lvblen)
|
||||
{
|
||||
struct dlm_ls *ls;
|
||||
int i, size, error = -ENOMEM;
|
||||
|
||||
if (namelen > DLM_LOCKSPACE_LEN)
|
||||
return -EINVAL;
|
||||
|
||||
if (!lvblen || (lvblen % 8))
|
||||
return -EINVAL;
|
||||
|
||||
if (!try_module_get(THIS_MODULE))
|
||||
return -EINVAL;
|
||||
|
||||
ls = dlm_find_lockspace_name(name, namelen);
|
||||
if (ls) {
|
||||
*lockspace = ls;
|
||||
module_put(THIS_MODULE);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_KERNEL);
|
||||
if (!ls)
|
||||
goto out;
|
||||
memcpy(ls->ls_name, name, namelen);
|
||||
ls->ls_namelen = namelen;
|
||||
ls->ls_exflags = flags;
|
||||
ls->ls_lvblen = lvblen;
|
||||
ls->ls_count = 0;
|
||||
ls->ls_flags = 0;
|
||||
|
||||
size = dlm_config.rsbtbl_size;
|
||||
ls->ls_rsbtbl_size = size;
|
||||
|
||||
ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_KERNEL);
|
||||
if (!ls->ls_rsbtbl)
|
||||
goto out_lsfree;
|
||||
for (i = 0; i < size; i++) {
|
||||
INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
|
||||
INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
|
||||
rwlock_init(&ls->ls_rsbtbl[i].lock);
|
||||
}
|
||||
|
||||
size = dlm_config.lkbtbl_size;
|
||||
ls->ls_lkbtbl_size = size;
|
||||
|
||||
ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_KERNEL);
|
||||
if (!ls->ls_lkbtbl)
|
||||
goto out_rsbfree;
|
||||
for (i = 0; i < size; i++) {
|
||||
INIT_LIST_HEAD(&ls->ls_lkbtbl[i].list);
|
||||
rwlock_init(&ls->ls_lkbtbl[i].lock);
|
||||
ls->ls_lkbtbl[i].counter = 1;
|
||||
}
|
||||
|
||||
size = dlm_config.dirtbl_size;
|
||||
ls->ls_dirtbl_size = size;
|
||||
|
||||
ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_KERNEL);
|
||||
if (!ls->ls_dirtbl)
|
||||
goto out_lkbfree;
|
||||
for (i = 0; i < size; i++) {
|
||||
INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
|
||||
rwlock_init(&ls->ls_dirtbl[i].lock);
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&ls->ls_waiters);
|
||||
mutex_init(&ls->ls_waiters_mutex);
|
||||
|
||||
INIT_LIST_HEAD(&ls->ls_nodes);
|
||||
INIT_LIST_HEAD(&ls->ls_nodes_gone);
|
||||
ls->ls_num_nodes = 0;
|
||||
ls->ls_low_nodeid = 0;
|
||||
ls->ls_total_weight = 0;
|
||||
ls->ls_node_array = NULL;
|
||||
|
||||
memset(&ls->ls_stub_rsb, 0, sizeof(struct dlm_rsb));
|
||||
ls->ls_stub_rsb.res_ls = ls;
|
||||
|
||||
ls->ls_debug_rsb_dentry = NULL;
|
||||
ls->ls_debug_waiters_dentry = NULL;
|
||||
|
||||
init_waitqueue_head(&ls->ls_uevent_wait);
|
||||
ls->ls_uevent_result = 0;
|
||||
|
||||
ls->ls_recoverd_task = NULL;
|
||||
mutex_init(&ls->ls_recoverd_active);
|
||||
spin_lock_init(&ls->ls_recover_lock);
|
||||
ls->ls_recover_status = 0;
|
||||
ls->ls_recover_seq = 0;
|
||||
ls->ls_recover_args = NULL;
|
||||
init_rwsem(&ls->ls_in_recovery);
|
||||
INIT_LIST_HEAD(&ls->ls_requestqueue);
|
||||
mutex_init(&ls->ls_requestqueue_mutex);
|
||||
mutex_init(&ls->ls_clear_proc_locks);
|
||||
|
||||
ls->ls_recover_buf = kmalloc(dlm_config.buffer_size, GFP_KERNEL);
|
||||
if (!ls->ls_recover_buf)
|
||||
goto out_dirfree;
|
||||
|
||||
INIT_LIST_HEAD(&ls->ls_recover_list);
|
||||
spin_lock_init(&ls->ls_recover_list_lock);
|
||||
ls->ls_recover_list_count = 0;
|
||||
ls->ls_local_handle = ls;
|
||||
init_waitqueue_head(&ls->ls_wait_general);
|
||||
INIT_LIST_HEAD(&ls->ls_root_list);
|
||||
init_rwsem(&ls->ls_root_sem);
|
||||
|
||||
down_write(&ls->ls_in_recovery);
|
||||
|
||||
spin_lock(&lslist_lock);
|
||||
list_add(&ls->ls_list, &lslist);
|
||||
spin_unlock(&lslist_lock);
|
||||
|
||||
/* needs to find ls in lslist */
|
||||
error = dlm_recoverd_start(ls);
|
||||
if (error) {
|
||||
log_error(ls, "can't start dlm_recoverd %d", error);
|
||||
goto out_rcomfree;
|
||||
}
|
||||
|
||||
dlm_create_debug_file(ls);
|
||||
|
||||
error = kobject_setup(ls);
|
||||
if (error)
|
||||
goto out_del;
|
||||
|
||||
error = kobject_register(&ls->ls_kobj);
|
||||
if (error)
|
||||
goto out_del;
|
||||
|
||||
error = do_uevent(ls, 1);
|
||||
if (error)
|
||||
goto out_unreg;
|
||||
|
||||
*lockspace = ls;
|
||||
return 0;
|
||||
|
||||
out_unreg:
|
||||
kobject_unregister(&ls->ls_kobj);
|
||||
out_del:
|
||||
dlm_delete_debug_file(ls);
|
||||
dlm_recoverd_stop(ls);
|
||||
out_rcomfree:
|
||||
spin_lock(&lslist_lock);
|
||||
list_del(&ls->ls_list);
|
||||
spin_unlock(&lslist_lock);
|
||||
kfree(ls->ls_recover_buf);
|
||||
out_dirfree:
|
||||
kfree(ls->ls_dirtbl);
|
||||
out_lkbfree:
|
||||
kfree(ls->ls_lkbtbl);
|
||||
out_rsbfree:
|
||||
kfree(ls->ls_rsbtbl);
|
||||
out_lsfree:
|
||||
kfree(ls);
|
||||
out:
|
||||
module_put(THIS_MODULE);
|
||||
return error;
|
||||
}
|
||||
|
||||
int dlm_new_lockspace(char *name, int namelen, void **lockspace,
|
||||
uint32_t flags, int lvblen)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
mutex_lock(&ls_lock);
|
||||
if (!ls_count)
|
||||
error = threads_start();
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
error = new_lockspace(name, namelen, lockspace, flags, lvblen);
|
||||
if (!error)
|
||||
ls_count++;
|
||||
out:
|
||||
mutex_unlock(&ls_lock);
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Return 1 if the lockspace still has active remote locks,
|
||||
* 2 if the lockspace still has active local locks.
|
||||
*/
|
||||
static int lockspace_busy(struct dlm_ls *ls)
|
||||
{
|
||||
int i, lkb_found = 0;
|
||||
struct dlm_lkb *lkb;
|
||||
|
||||
/* NOTE: We check the lockidtbl here rather than the resource table.
|
||||
This is because there may be LKBs queued as ASTs that have been
|
||||
unlinked from their RSBs and are pending deletion once the AST has
|
||||
been delivered */
|
||||
|
||||
for (i = 0; i < ls->ls_lkbtbl_size; i++) {
|
||||
read_lock(&ls->ls_lkbtbl[i].lock);
|
||||
if (!list_empty(&ls->ls_lkbtbl[i].list)) {
|
||||
lkb_found = 1;
|
||||
list_for_each_entry(lkb, &ls->ls_lkbtbl[i].list,
|
||||
lkb_idtbl_list) {
|
||||
if (!lkb->lkb_nodeid) {
|
||||
read_unlock(&ls->ls_lkbtbl[i].lock);
|
||||
return 2;
|
||||
}
|
||||
}
|
||||
}
|
||||
read_unlock(&ls->ls_lkbtbl[i].lock);
|
||||
}
|
||||
return lkb_found;
|
||||
}
|
||||
|
||||
static int release_lockspace(struct dlm_ls *ls, int force)
|
||||
{
|
||||
struct dlm_lkb *lkb;
|
||||
struct dlm_rsb *rsb;
|
||||
struct list_head *head;
|
||||
int i;
|
||||
int busy = lockspace_busy(ls);
|
||||
|
||||
if (busy > force)
|
||||
return -EBUSY;
|
||||
|
||||
if (force < 3)
|
||||
do_uevent(ls, 0);
|
||||
|
||||
dlm_recoverd_stop(ls);
|
||||
|
||||
remove_lockspace(ls);
|
||||
|
||||
dlm_delete_debug_file(ls);
|
||||
|
||||
dlm_astd_suspend();
|
||||
|
||||
kfree(ls->ls_recover_buf);
|
||||
|
||||
/*
|
||||
* Free direntry structs.
|
||||
*/
|
||||
|
||||
dlm_dir_clear(ls);
|
||||
kfree(ls->ls_dirtbl);
|
||||
|
||||
/*
|
||||
* Free all lkb's on lkbtbl[] lists.
|
||||
*/
|
||||
|
||||
for (i = 0; i < ls->ls_lkbtbl_size; i++) {
|
||||
head = &ls->ls_lkbtbl[i].list;
|
||||
while (!list_empty(head)) {
|
||||
lkb = list_entry(head->next, struct dlm_lkb,
|
||||
lkb_idtbl_list);
|
||||
|
||||
list_del(&lkb->lkb_idtbl_list);
|
||||
|
||||
dlm_del_ast(lkb);
|
||||
|
||||
if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
|
||||
free_lvb(lkb->lkb_lvbptr);
|
||||
|
||||
free_lkb(lkb);
|
||||
}
|
||||
}
|
||||
dlm_astd_resume();
|
||||
|
||||
kfree(ls->ls_lkbtbl);
|
||||
|
||||
/*
|
||||
* Free all rsb's on rsbtbl[] lists
|
||||
*/
|
||||
|
||||
for (i = 0; i < ls->ls_rsbtbl_size; i++) {
|
||||
head = &ls->ls_rsbtbl[i].list;
|
||||
while (!list_empty(head)) {
|
||||
rsb = list_entry(head->next, struct dlm_rsb,
|
||||
res_hashchain);
|
||||
|
||||
list_del(&rsb->res_hashchain);
|
||||
free_rsb(rsb);
|
||||
}
|
||||
|
||||
head = &ls->ls_rsbtbl[i].toss;
|
||||
while (!list_empty(head)) {
|
||||
rsb = list_entry(head->next, struct dlm_rsb,
|
||||
res_hashchain);
|
||||
list_del(&rsb->res_hashchain);
|
||||
free_rsb(rsb);
|
||||
}
|
||||
}
|
||||
|
||||
kfree(ls->ls_rsbtbl);
|
||||
|
||||
/*
|
||||
* Free structures on any other lists
|
||||
*/
|
||||
|
||||
kfree(ls->ls_recover_args);
|
||||
dlm_clear_free_entries(ls);
|
||||
dlm_clear_members(ls);
|
||||
dlm_clear_members_gone(ls);
|
||||
kfree(ls->ls_node_array);
|
||||
kobject_unregister(&ls->ls_kobj);
|
||||
kfree(ls);
|
||||
|
||||
mutex_lock(&ls_lock);
|
||||
ls_count--;
|
||||
if (!ls_count)
|
||||
threads_stop();
|
||||
mutex_unlock(&ls_lock);
|
||||
|
||||
module_put(THIS_MODULE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called when a system has released all its locks and is not going to use the
|
||||
* lockspace any longer. We free everything we're managing for this lockspace.
|
||||
* Remaining nodes will go through the recovery process as if we'd died. The
|
||||
* lockspace must continue to function as usual, participating in recoveries,
|
||||
* until this returns.
|
||||
*
|
||||
* Force has 4 possible values:
|
||||
* 0 - don't destroy locksapce if it has any LKBs
|
||||
* 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
|
||||
* 2 - destroy lockspace regardless of LKBs
|
||||
* 3 - destroy lockspace as part of a forced shutdown
|
||||
*/
|
||||
|
||||
int dlm_release_lockspace(void *lockspace, int force)
|
||||
{
|
||||
struct dlm_ls *ls;
|
||||
|
||||
ls = dlm_find_lockspace_local(lockspace);
|
||||
if (!ls)
|
||||
return -EINVAL;
|
||||
dlm_put_lockspace(ls);
|
||||
return release_lockspace(ls, force);
|
||||
}
|
||||
|
25
fs/dlm/lockspace.h
Normal file
25
fs/dlm/lockspace.h
Normal file
|
@ -0,0 +1,25 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#ifndef __LOCKSPACE_DOT_H__
|
||||
#define __LOCKSPACE_DOT_H__
|
||||
|
||||
int dlm_lockspace_init(void);
|
||||
void dlm_lockspace_exit(void);
|
||||
struct dlm_ls *dlm_find_lockspace_global(uint32_t id);
|
||||
struct dlm_ls *dlm_find_lockspace_local(void *id);
|
||||
struct dlm_ls *dlm_find_lockspace_device(int minor);
|
||||
void dlm_put_lockspace(struct dlm_ls *ls);
|
||||
|
||||
#endif /* __LOCKSPACE_DOT_H__ */
|
||||
|
1238
fs/dlm/lowcomms.c
Normal file
1238
fs/dlm/lowcomms.c
Normal file
File diff suppressed because it is too large
Load diff
26
fs/dlm/lowcomms.h
Normal file
26
fs/dlm/lowcomms.h
Normal file
|
@ -0,0 +1,26 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#ifndef __LOWCOMMS_DOT_H__
|
||||
#define __LOWCOMMS_DOT_H__
|
||||
|
||||
int dlm_lowcomms_init(void);
|
||||
void dlm_lowcomms_exit(void);
|
||||
int dlm_lowcomms_start(void);
|
||||
void dlm_lowcomms_stop(void);
|
||||
int dlm_lowcomms_close(int nodeid);
|
||||
void *dlm_lowcomms_get_buffer(int nodeid, int len, int allocation, char **ppc);
|
||||
void dlm_lowcomms_commit_buffer(void *mh);
|
||||
|
||||
#endif /* __LOWCOMMS_DOT_H__ */
|
||||
|
18
fs/dlm/lvb_table.h
Normal file
18
fs/dlm/lvb_table.h
Normal file
|
@ -0,0 +1,18 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#ifndef __LVB_TABLE_DOT_H__
|
||||
#define __LVB_TABLE_DOT_H__
|
||||
|
||||
extern const int dlm_lvb_operations[8][8];
|
||||
|
||||
#endif
|
97
fs/dlm/main.c
Normal file
97
fs/dlm/main.c
Normal file
|
@ -0,0 +1,97 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#include "dlm_internal.h"
|
||||
#include "lockspace.h"
|
||||
#include "lock.h"
|
||||
#include "user.h"
|
||||
#include "memory.h"
|
||||
#include "lowcomms.h"
|
||||
#include "config.h"
|
||||
|
||||
#ifdef CONFIG_DLM_DEBUG
|
||||
int dlm_register_debugfs(void);
|
||||
void dlm_unregister_debugfs(void);
|
||||
#else
|
||||
static inline int dlm_register_debugfs(void) { return 0; }
|
||||
static inline void dlm_unregister_debugfs(void) { }
|
||||
#endif
|
||||
|
||||
static int __init init_dlm(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = dlm_memory_init();
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
error = dlm_lockspace_init();
|
||||
if (error)
|
||||
goto out_mem;
|
||||
|
||||
error = dlm_config_init();
|
||||
if (error)
|
||||
goto out_lockspace;
|
||||
|
||||
error = dlm_register_debugfs();
|
||||
if (error)
|
||||
goto out_config;
|
||||
|
||||
error = dlm_lowcomms_init();
|
||||
if (error)
|
||||
goto out_debug;
|
||||
|
||||
error = dlm_user_init();
|
||||
if (error)
|
||||
goto out_lowcomms;
|
||||
|
||||
printk("DLM (built %s %s) installed\n", __DATE__, __TIME__);
|
||||
|
||||
return 0;
|
||||
|
||||
out_lowcomms:
|
||||
dlm_lowcomms_exit();
|
||||
out_debug:
|
||||
dlm_unregister_debugfs();
|
||||
out_config:
|
||||
dlm_config_exit();
|
||||
out_lockspace:
|
||||
dlm_lockspace_exit();
|
||||
out_mem:
|
||||
dlm_memory_exit();
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
static void __exit exit_dlm(void)
|
||||
{
|
||||
dlm_user_exit();
|
||||
dlm_lowcomms_exit();
|
||||
dlm_config_exit();
|
||||
dlm_memory_exit();
|
||||
dlm_lockspace_exit();
|
||||
dlm_unregister_debugfs();
|
||||
}
|
||||
|
||||
module_init(init_dlm);
|
||||
module_exit(exit_dlm);
|
||||
|
||||
MODULE_DESCRIPTION("Distributed Lock Manager");
|
||||
MODULE_AUTHOR("Red Hat, Inc.");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
EXPORT_SYMBOL_GPL(dlm_new_lockspace);
|
||||
EXPORT_SYMBOL_GPL(dlm_release_lockspace);
|
||||
EXPORT_SYMBOL_GPL(dlm_lock);
|
||||
EXPORT_SYMBOL_GPL(dlm_unlock);
|
||||
|
327
fs/dlm/member.c
Normal file
327
fs/dlm/member.c
Normal file
|
@ -0,0 +1,327 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#include "dlm_internal.h"
|
||||
#include "lockspace.h"
|
||||
#include "member.h"
|
||||
#include "recoverd.h"
|
||||
#include "recover.h"
|
||||
#include "rcom.h"
|
||||
#include "config.h"
|
||||
|
||||
/*
|
||||
* Following called by dlm_recoverd thread
|
||||
*/
|
||||
|
||||
static void add_ordered_member(struct dlm_ls *ls, struct dlm_member *new)
|
||||
{
|
||||
struct dlm_member *memb = NULL;
|
||||
struct list_head *tmp;
|
||||
struct list_head *newlist = &new->list;
|
||||
struct list_head *head = &ls->ls_nodes;
|
||||
|
||||
list_for_each(tmp, head) {
|
||||
memb = list_entry(tmp, struct dlm_member, list);
|
||||
if (new->nodeid < memb->nodeid)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!memb)
|
||||
list_add_tail(newlist, head);
|
||||
else {
|
||||
/* FIXME: can use list macro here */
|
||||
newlist->prev = tmp->prev;
|
||||
newlist->next = tmp;
|
||||
tmp->prev->next = newlist;
|
||||
tmp->prev = newlist;
|
||||
}
|
||||
}
|
||||
|
||||
static int dlm_add_member(struct dlm_ls *ls, int nodeid)
|
||||
{
|
||||
struct dlm_member *memb;
|
||||
int w;
|
||||
|
||||
memb = kzalloc(sizeof(struct dlm_member), GFP_KERNEL);
|
||||
if (!memb)
|
||||
return -ENOMEM;
|
||||
|
||||
w = dlm_node_weight(ls->ls_name, nodeid);
|
||||
if (w < 0)
|
||||
return w;
|
||||
|
||||
memb->nodeid = nodeid;
|
||||
memb->weight = w;
|
||||
add_ordered_member(ls, memb);
|
||||
ls->ls_num_nodes++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dlm_remove_member(struct dlm_ls *ls, struct dlm_member *memb)
|
||||
{
|
||||
list_move(&memb->list, &ls->ls_nodes_gone);
|
||||
ls->ls_num_nodes--;
|
||||
}
|
||||
|
||||
static int dlm_is_member(struct dlm_ls *ls, int nodeid)
|
||||
{
|
||||
struct dlm_member *memb;
|
||||
|
||||
list_for_each_entry(memb, &ls->ls_nodes, list) {
|
||||
if (memb->nodeid == nodeid)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dlm_is_removed(struct dlm_ls *ls, int nodeid)
|
||||
{
|
||||
struct dlm_member *memb;
|
||||
|
||||
list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
|
||||
if (memb->nodeid == nodeid)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void clear_memb_list(struct list_head *head)
|
||||
{
|
||||
struct dlm_member *memb;
|
||||
|
||||
while (!list_empty(head)) {
|
||||
memb = list_entry(head->next, struct dlm_member, list);
|
||||
list_del(&memb->list);
|
||||
kfree(memb);
|
||||
}
|
||||
}
|
||||
|
||||
void dlm_clear_members(struct dlm_ls *ls)
|
||||
{
|
||||
clear_memb_list(&ls->ls_nodes);
|
||||
ls->ls_num_nodes = 0;
|
||||
}
|
||||
|
||||
void dlm_clear_members_gone(struct dlm_ls *ls)
|
||||
{
|
||||
clear_memb_list(&ls->ls_nodes_gone);
|
||||
}
|
||||
|
||||
static void make_member_array(struct dlm_ls *ls)
|
||||
{
|
||||
struct dlm_member *memb;
|
||||
int i, w, x = 0, total = 0, all_zero = 0, *array;
|
||||
|
||||
kfree(ls->ls_node_array);
|
||||
ls->ls_node_array = NULL;
|
||||
|
||||
list_for_each_entry(memb, &ls->ls_nodes, list) {
|
||||
if (memb->weight)
|
||||
total += memb->weight;
|
||||
}
|
||||
|
||||
/* all nodes revert to weight of 1 if all have weight 0 */
|
||||
|
||||
if (!total) {
|
||||
total = ls->ls_num_nodes;
|
||||
all_zero = 1;
|
||||
}
|
||||
|
||||
ls->ls_total_weight = total;
|
||||
|
||||
array = kmalloc(sizeof(int) * total, GFP_KERNEL);
|
||||
if (!array)
|
||||
return;
|
||||
|
||||
list_for_each_entry(memb, &ls->ls_nodes, list) {
|
||||
if (!all_zero && !memb->weight)
|
||||
continue;
|
||||
|
||||
if (all_zero)
|
||||
w = 1;
|
||||
else
|
||||
w = memb->weight;
|
||||
|
||||
DLM_ASSERT(x < total, printk("total %d x %d\n", total, x););
|
||||
|
||||
for (i = 0; i < w; i++)
|
||||
array[x++] = memb->nodeid;
|
||||
}
|
||||
|
||||
ls->ls_node_array = array;
|
||||
}
|
||||
|
||||
/* send a status request to all members just to establish comms connections */
|
||||
|
||||
static int ping_members(struct dlm_ls *ls)
|
||||
{
|
||||
struct dlm_member *memb;
|
||||
int error = 0;
|
||||
|
||||
list_for_each_entry(memb, &ls->ls_nodes, list) {
|
||||
error = dlm_recovery_stopped(ls);
|
||||
if (error)
|
||||
break;
|
||||
error = dlm_rcom_status(ls, memb->nodeid);
|
||||
if (error)
|
||||
break;
|
||||
}
|
||||
if (error)
|
||||
log_debug(ls, "ping_members aborted %d last nodeid %d",
|
||||
error, ls->ls_recover_nodeid);
|
||||
return error;
|
||||
}
|
||||
|
||||
int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
|
||||
{
|
||||
struct dlm_member *memb, *safe;
|
||||
int i, error, found, pos = 0, neg = 0, low = -1;
|
||||
|
||||
/* move departed members from ls_nodes to ls_nodes_gone */
|
||||
|
||||
list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) {
|
||||
found = 0;
|
||||
for (i = 0; i < rv->node_count; i++) {
|
||||
if (memb->nodeid == rv->nodeids[i]) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
neg++;
|
||||
dlm_remove_member(ls, memb);
|
||||
log_debug(ls, "remove member %d", memb->nodeid);
|
||||
}
|
||||
}
|
||||
|
||||
/* add new members to ls_nodes */
|
||||
|
||||
for (i = 0; i < rv->node_count; i++) {
|
||||
if (dlm_is_member(ls, rv->nodeids[i]))
|
||||
continue;
|
||||
dlm_add_member(ls, rv->nodeids[i]);
|
||||
pos++;
|
||||
log_debug(ls, "add member %d", rv->nodeids[i]);
|
||||
}
|
||||
|
||||
list_for_each_entry(memb, &ls->ls_nodes, list) {
|
||||
if (low == -1 || memb->nodeid < low)
|
||||
low = memb->nodeid;
|
||||
}
|
||||
ls->ls_low_nodeid = low;
|
||||
|
||||
make_member_array(ls);
|
||||
dlm_set_recover_status(ls, DLM_RS_NODES);
|
||||
*neg_out = neg;
|
||||
|
||||
error = ping_members(ls);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
error = dlm_recover_members_wait(ls);
|
||||
out:
|
||||
log_debug(ls, "total members %d error %d", ls->ls_num_nodes, error);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Following called from lockspace.c
|
||||
*/
|
||||
|
||||
int dlm_ls_stop(struct dlm_ls *ls)
|
||||
{
|
||||
int new;
|
||||
|
||||
/*
|
||||
* A stop cancels any recovery that's in progress (see RECOVERY_STOP,
|
||||
* dlm_recovery_stopped()) and prevents any new locks from being
|
||||
* processed (see RUNNING, dlm_locking_stopped()).
|
||||
*/
|
||||
|
||||
spin_lock(&ls->ls_recover_lock);
|
||||
set_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
|
||||
new = test_and_clear_bit(LSFL_RUNNING, &ls->ls_flags);
|
||||
ls->ls_recover_seq++;
|
||||
spin_unlock(&ls->ls_recover_lock);
|
||||
|
||||
/*
|
||||
* This in_recovery lock does two things:
|
||||
*
|
||||
* 1) Keeps this function from returning until all threads are out
|
||||
* of locking routines and locking is truely stopped.
|
||||
* 2) Keeps any new requests from being processed until it's unlocked
|
||||
* when recovery is complete.
|
||||
*/
|
||||
|
||||
if (new)
|
||||
down_write(&ls->ls_in_recovery);
|
||||
|
||||
/*
|
||||
* The recoverd suspend/resume makes sure that dlm_recoverd (if
|
||||
* running) has noticed the clearing of RUNNING above and quit
|
||||
* processing the previous recovery. This will be true for all nodes
|
||||
* before any nodes start the new recovery.
|
||||
*/
|
||||
|
||||
dlm_recoverd_suspend(ls);
|
||||
ls->ls_recover_status = 0;
|
||||
dlm_recoverd_resume(ls);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dlm_ls_start(struct dlm_ls *ls)
|
||||
{
|
||||
struct dlm_recover *rv = NULL, *rv_old;
|
||||
int *ids = NULL;
|
||||
int error, count;
|
||||
|
||||
rv = kzalloc(sizeof(struct dlm_recover), GFP_KERNEL);
|
||||
if (!rv)
|
||||
return -ENOMEM;
|
||||
|
||||
error = count = dlm_nodeid_list(ls->ls_name, &ids);
|
||||
if (error <= 0)
|
||||
goto fail;
|
||||
|
||||
spin_lock(&ls->ls_recover_lock);
|
||||
|
||||
/* the lockspace needs to be stopped before it can be started */
|
||||
|
||||
if (!dlm_locking_stopped(ls)) {
|
||||
spin_unlock(&ls->ls_recover_lock);
|
||||
log_error(ls, "start ignored: lockspace running");
|
||||
error = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
rv->nodeids = ids;
|
||||
rv->node_count = count;
|
||||
rv->seq = ++ls->ls_recover_seq;
|
||||
rv_old = ls->ls_recover_args;
|
||||
ls->ls_recover_args = rv;
|
||||
spin_unlock(&ls->ls_recover_lock);
|
||||
|
||||
if (rv_old) {
|
||||
kfree(rv_old->nodeids);
|
||||
kfree(rv_old);
|
||||
}
|
||||
|
||||
dlm_recoverd_kick(ls);
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
kfree(rv);
|
||||
kfree(ids);
|
||||
return error;
|
||||
}
|
||||
|
24
fs/dlm/member.h
Normal file
24
fs/dlm/member.h
Normal file
|
@ -0,0 +1,24 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#ifndef __MEMBER_DOT_H__
|
||||
#define __MEMBER_DOT_H__
|
||||
|
||||
int dlm_ls_stop(struct dlm_ls *ls);
|
||||
int dlm_ls_start(struct dlm_ls *ls);
|
||||
void dlm_clear_members(struct dlm_ls *ls);
|
||||
void dlm_clear_members_gone(struct dlm_ls *ls);
|
||||
int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv,int *neg_out);
|
||||
int dlm_is_removed(struct dlm_ls *ls, int nodeid);
|
||||
|
||||
#endif /* __MEMBER_DOT_H__ */
|
||||
|
116
fs/dlm/memory.c
Normal file
116
fs/dlm/memory.c
Normal file
|
@ -0,0 +1,116 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#include "dlm_internal.h"
|
||||
#include "config.h"
|
||||
#include "memory.h"
|
||||
|
||||
static kmem_cache_t *lkb_cache;
|
||||
|
||||
|
||||
int dlm_memory_init(void)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
lkb_cache = kmem_cache_create("dlm_lkb", sizeof(struct dlm_lkb),
|
||||
__alignof__(struct dlm_lkb), 0, NULL, NULL);
|
||||
if (!lkb_cache)
|
||||
ret = -ENOMEM;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void dlm_memory_exit(void)
|
||||
{
|
||||
if (lkb_cache)
|
||||
kmem_cache_destroy(lkb_cache);
|
||||
}
|
||||
|
||||
char *allocate_lvb(struct dlm_ls *ls)
|
||||
{
|
||||
char *p;
|
||||
|
||||
p = kmalloc(ls->ls_lvblen, GFP_KERNEL);
|
||||
if (p)
|
||||
memset(p, 0, ls->ls_lvblen);
|
||||
return p;
|
||||
}
|
||||
|
||||
void free_lvb(char *p)
|
||||
{
|
||||
kfree(p);
|
||||
}
|
||||
|
||||
/* FIXME: have some minimal space built-in to rsb for the name and
|
||||
kmalloc a separate name if needed, like dentries are done */
|
||||
|
||||
struct dlm_rsb *allocate_rsb(struct dlm_ls *ls, int namelen)
|
||||
{
|
||||
struct dlm_rsb *r;
|
||||
|
||||
DLM_ASSERT(namelen <= DLM_RESNAME_MAXLEN,);
|
||||
|
||||
r = kmalloc(sizeof(*r) + namelen, GFP_KERNEL);
|
||||
if (r)
|
||||
memset(r, 0, sizeof(*r) + namelen);
|
||||
return r;
|
||||
}
|
||||
|
||||
void free_rsb(struct dlm_rsb *r)
|
||||
{
|
||||
if (r->res_lvbptr)
|
||||
free_lvb(r->res_lvbptr);
|
||||
kfree(r);
|
||||
}
|
||||
|
||||
struct dlm_lkb *allocate_lkb(struct dlm_ls *ls)
|
||||
{
|
||||
struct dlm_lkb *lkb;
|
||||
|
||||
lkb = kmem_cache_alloc(lkb_cache, GFP_KERNEL);
|
||||
if (lkb)
|
||||
memset(lkb, 0, sizeof(*lkb));
|
||||
return lkb;
|
||||
}
|
||||
|
||||
void free_lkb(struct dlm_lkb *lkb)
|
||||
{
|
||||
if (lkb->lkb_flags & DLM_IFL_USER) {
|
||||
struct dlm_user_args *ua;
|
||||
ua = (struct dlm_user_args *)lkb->lkb_astparam;
|
||||
if (ua) {
|
||||
if (ua->lksb.sb_lvbptr)
|
||||
kfree(ua->lksb.sb_lvbptr);
|
||||
kfree(ua);
|
||||
}
|
||||
}
|
||||
kmem_cache_free(lkb_cache, lkb);
|
||||
}
|
||||
|
||||
struct dlm_direntry *allocate_direntry(struct dlm_ls *ls, int namelen)
|
||||
{
|
||||
struct dlm_direntry *de;
|
||||
|
||||
DLM_ASSERT(namelen <= DLM_RESNAME_MAXLEN,
|
||||
printk("namelen = %d\n", namelen););
|
||||
|
||||
de = kmalloc(sizeof(*de) + namelen, GFP_KERNEL);
|
||||
if (de)
|
||||
memset(de, 0, sizeof(*de) + namelen);
|
||||
return de;
|
||||
}
|
||||
|
||||
void free_direntry(struct dlm_direntry *de)
|
||||
{
|
||||
kfree(de);
|
||||
}
|
||||
|
29
fs/dlm/memory.h
Normal file
29
fs/dlm/memory.h
Normal file
|
@ -0,0 +1,29 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#ifndef __MEMORY_DOT_H__
|
||||
#define __MEMORY_DOT_H__
|
||||
|
||||
int dlm_memory_init(void);
|
||||
void dlm_memory_exit(void);
|
||||
struct dlm_rsb *allocate_rsb(struct dlm_ls *ls, int namelen);
|
||||
void free_rsb(struct dlm_rsb *r);
|
||||
struct dlm_lkb *allocate_lkb(struct dlm_ls *ls);
|
||||
void free_lkb(struct dlm_lkb *l);
|
||||
struct dlm_direntry *allocate_direntry(struct dlm_ls *ls, int namelen);
|
||||
void free_direntry(struct dlm_direntry *de);
|
||||
char *allocate_lvb(struct dlm_ls *ls);
|
||||
void free_lvb(char *l);
|
||||
|
||||
#endif /* __MEMORY_DOT_H__ */
|
||||
|
140
fs/dlm/midcomms.c
Normal file
140
fs/dlm/midcomms.c
Normal file
|
@ -0,0 +1,140 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
/*
|
||||
* midcomms.c
|
||||
*
|
||||
* This is the appallingly named "mid-level" comms layer.
|
||||
*
|
||||
* Its purpose is to take packets from the "real" comms layer,
|
||||
* split them up into packets and pass them to the interested
|
||||
* part of the locking mechanism.
|
||||
*
|
||||
* It also takes messages from the locking layer, formats them
|
||||
* into packets and sends them to the comms layer.
|
||||
*/
|
||||
|
||||
#include "dlm_internal.h"
|
||||
#include "lowcomms.h"
|
||||
#include "config.h"
|
||||
#include "rcom.h"
|
||||
#include "lock.h"
|
||||
#include "midcomms.h"
|
||||
|
||||
|
||||
static void copy_from_cb(void *dst, const void *base, unsigned offset,
|
||||
unsigned len, unsigned limit)
|
||||
{
|
||||
unsigned copy = len;
|
||||
|
||||
if ((copy + offset) > limit)
|
||||
copy = limit - offset;
|
||||
memcpy(dst, base + offset, copy);
|
||||
len -= copy;
|
||||
if (len)
|
||||
memcpy(dst + copy, base, len);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from the low-level comms layer to process a buffer of
|
||||
* commands.
|
||||
*
|
||||
* Only complete messages are processed here, any "spare" bytes from
|
||||
* the end of a buffer are saved and tacked onto the front of the next
|
||||
* message that comes in. I doubt this will happen very often but we
|
||||
* need to be able to cope with it and I don't want the task to be waiting
|
||||
* for packets to come in when there is useful work to be done.
|
||||
*/
|
||||
|
||||
int dlm_process_incoming_buffer(int nodeid, const void *base,
|
||||
unsigned offset, unsigned len, unsigned limit)
|
||||
{
|
||||
unsigned char __tmp[DLM_INBUF_LEN];
|
||||
struct dlm_header *msg = (struct dlm_header *) __tmp;
|
||||
int ret = 0;
|
||||
int err = 0;
|
||||
uint16_t msglen;
|
||||
uint32_t lockspace;
|
||||
|
||||
while (len > sizeof(struct dlm_header)) {
|
||||
|
||||
/* Copy just the header to check the total length. The
|
||||
message may wrap around the end of the buffer back to the
|
||||
start, so we need to use a temp buffer and copy_from_cb. */
|
||||
|
||||
copy_from_cb(msg, base, offset, sizeof(struct dlm_header),
|
||||
limit);
|
||||
|
||||
msglen = le16_to_cpu(msg->h_length);
|
||||
lockspace = msg->h_lockspace;
|
||||
|
||||
err = -EINVAL;
|
||||
if (msglen < sizeof(struct dlm_header))
|
||||
break;
|
||||
err = -E2BIG;
|
||||
if (msglen > dlm_config.buffer_size) {
|
||||
log_print("message size %d from %d too big, buf len %d",
|
||||
msglen, nodeid, len);
|
||||
break;
|
||||
}
|
||||
err = 0;
|
||||
|
||||
/* If only part of the full message is contained in this
|
||||
buffer, then do nothing and wait for lowcomms to call
|
||||
us again later with more data. We return 0 meaning
|
||||
we've consumed none of the input buffer. */
|
||||
|
||||
if (msglen > len)
|
||||
break;
|
||||
|
||||
/* Allocate a larger temp buffer if the full message won't fit
|
||||
in the buffer on the stack (which should work for most
|
||||
ordinary messages). */
|
||||
|
||||
if (msglen > sizeof(__tmp) &&
|
||||
msg == (struct dlm_header *) __tmp) {
|
||||
msg = kmalloc(dlm_config.buffer_size, GFP_KERNEL);
|
||||
if (msg == NULL)
|
||||
return ret;
|
||||
}
|
||||
|
||||
copy_from_cb(msg, base, offset, msglen, limit);
|
||||
|
||||
BUG_ON(lockspace != msg->h_lockspace);
|
||||
|
||||
ret += msglen;
|
||||
offset += msglen;
|
||||
offset &= (limit - 1);
|
||||
len -= msglen;
|
||||
|
||||
switch (msg->h_cmd) {
|
||||
case DLM_MSG:
|
||||
dlm_receive_message(msg, nodeid, 0);
|
||||
break;
|
||||
|
||||
case DLM_RCOM:
|
||||
dlm_receive_rcom(msg, nodeid);
|
||||
break;
|
||||
|
||||
default:
|
||||
log_print("unknown msg type %x from %u: %u %u %u %u",
|
||||
msg->h_cmd, nodeid, msglen, len, offset, ret);
|
||||
}
|
||||
}
|
||||
|
||||
if (msg != (struct dlm_header *) __tmp)
|
||||
kfree(msg);
|
||||
|
||||
return err ? err : ret;
|
||||
}
|
||||
|
21
fs/dlm/midcomms.h
Normal file
21
fs/dlm/midcomms.h
Normal file
|
@ -0,0 +1,21 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#ifndef __MIDCOMMS_DOT_H__
|
||||
#define __MIDCOMMS_DOT_H__
|
||||
|
||||
int dlm_process_incoming_buffer(int nodeid, const void *base, unsigned offset,
|
||||
unsigned len, unsigned limit);
|
||||
|
||||
#endif /* __MIDCOMMS_DOT_H__ */
|
||||
|
472
fs/dlm/rcom.c
Normal file
472
fs/dlm/rcom.c
Normal file
|
@ -0,0 +1,472 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#include "dlm_internal.h"
|
||||
#include "lockspace.h"
|
||||
#include "member.h"
|
||||
#include "lowcomms.h"
|
||||
#include "midcomms.h"
|
||||
#include "rcom.h"
|
||||
#include "recover.h"
|
||||
#include "dir.h"
|
||||
#include "config.h"
|
||||
#include "memory.h"
|
||||
#include "lock.h"
|
||||
#include "util.h"
|
||||
|
||||
|
||||
static int rcom_response(struct dlm_ls *ls)
|
||||
{
|
||||
return test_bit(LSFL_RCOM_READY, &ls->ls_flags);
|
||||
}
|
||||
|
||||
static int create_rcom(struct dlm_ls *ls, int to_nodeid, int type, int len,
|
||||
struct dlm_rcom **rc_ret, struct dlm_mhandle **mh_ret)
|
||||
{
|
||||
struct dlm_rcom *rc;
|
||||
struct dlm_mhandle *mh;
|
||||
char *mb;
|
||||
int mb_len = sizeof(struct dlm_rcom) + len;
|
||||
|
||||
mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_KERNEL, &mb);
|
||||
if (!mh) {
|
||||
log_print("create_rcom to %d type %d len %d ENOBUFS",
|
||||
to_nodeid, type, len);
|
||||
return -ENOBUFS;
|
||||
}
|
||||
memset(mb, 0, mb_len);
|
||||
|
||||
rc = (struct dlm_rcom *) mb;
|
||||
|
||||
rc->rc_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
|
||||
rc->rc_header.h_lockspace = ls->ls_global_id;
|
||||
rc->rc_header.h_nodeid = dlm_our_nodeid();
|
||||
rc->rc_header.h_length = mb_len;
|
||||
rc->rc_header.h_cmd = DLM_RCOM;
|
||||
|
||||
rc->rc_type = type;
|
||||
|
||||
*mh_ret = mh;
|
||||
*rc_ret = rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void send_rcom(struct dlm_ls *ls, struct dlm_mhandle *mh,
|
||||
struct dlm_rcom *rc)
|
||||
{
|
||||
dlm_rcom_out(rc);
|
||||
dlm_lowcomms_commit_buffer(mh);
|
||||
}
|
||||
|
||||
/* When replying to a status request, a node also sends back its
|
||||
configuration values. The requesting node then checks that the remote
|
||||
node is configured the same way as itself. */
|
||||
|
||||
static void make_config(struct dlm_ls *ls, struct rcom_config *rf)
|
||||
{
|
||||
rf->rf_lvblen = ls->ls_lvblen;
|
||||
rf->rf_lsflags = ls->ls_exflags;
|
||||
}
|
||||
|
||||
static int check_config(struct dlm_ls *ls, struct rcom_config *rf, int nodeid)
|
||||
{
|
||||
if (rf->rf_lvblen != ls->ls_lvblen ||
|
||||
rf->rf_lsflags != ls->ls_exflags) {
|
||||
log_error(ls, "config mismatch: %d,%x nodeid %d: %d,%x",
|
||||
ls->ls_lvblen, ls->ls_exflags,
|
||||
nodeid, rf->rf_lvblen, rf->rf_lsflags);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dlm_rcom_status(struct dlm_ls *ls, int nodeid)
|
||||
{
|
||||
struct dlm_rcom *rc;
|
||||
struct dlm_mhandle *mh;
|
||||
int error = 0;
|
||||
|
||||
memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
|
||||
ls->ls_recover_nodeid = nodeid;
|
||||
|
||||
if (nodeid == dlm_our_nodeid()) {
|
||||
rc = (struct dlm_rcom *) ls->ls_recover_buf;
|
||||
rc->rc_result = dlm_recover_status(ls);
|
||||
goto out;
|
||||
}
|
||||
|
||||
error = create_rcom(ls, nodeid, DLM_RCOM_STATUS, 0, &rc, &mh);
|
||||
if (error)
|
||||
goto out;
|
||||
rc->rc_id = ++ls->ls_rcom_seq;
|
||||
|
||||
send_rcom(ls, mh, rc);
|
||||
|
||||
error = dlm_wait_function(ls, &rcom_response);
|
||||
clear_bit(LSFL_RCOM_READY, &ls->ls_flags);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
rc = (struct dlm_rcom *) ls->ls_recover_buf;
|
||||
|
||||
if (rc->rc_result == -ESRCH) {
|
||||
/* we pretend the remote lockspace exists with 0 status */
|
||||
log_debug(ls, "remote node %d not ready", nodeid);
|
||||
rc->rc_result = 0;
|
||||
} else
|
||||
error = check_config(ls, (struct rcom_config *) rc->rc_buf,
|
||||
nodeid);
|
||||
/* the caller looks at rc_result for the remote recovery status */
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in)
|
||||
{
|
||||
struct dlm_rcom *rc;
|
||||
struct dlm_mhandle *mh;
|
||||
int error, nodeid = rc_in->rc_header.h_nodeid;
|
||||
|
||||
error = create_rcom(ls, nodeid, DLM_RCOM_STATUS_REPLY,
|
||||
sizeof(struct rcom_config), &rc, &mh);
|
||||
if (error)
|
||||
return;
|
||||
rc->rc_id = rc_in->rc_id;
|
||||
rc->rc_result = dlm_recover_status(ls);
|
||||
make_config(ls, (struct rcom_config *) rc->rc_buf);
|
||||
|
||||
send_rcom(ls, mh, rc);
|
||||
}
|
||||
|
||||
static void receive_sync_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
|
||||
{
|
||||
if (rc_in->rc_id != ls->ls_rcom_seq) {
|
||||
log_debug(ls, "reject old reply %d got %llx wanted %llx",
|
||||
rc_in->rc_type, rc_in->rc_id, ls->ls_rcom_seq);
|
||||
return;
|
||||
}
|
||||
memcpy(ls->ls_recover_buf, rc_in, rc_in->rc_header.h_length);
|
||||
set_bit(LSFL_RCOM_READY, &ls->ls_flags);
|
||||
wake_up(&ls->ls_wait_general);
|
||||
}
|
||||
|
||||
static void receive_rcom_status_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
|
||||
{
|
||||
receive_sync_reply(ls, rc_in);
|
||||
}
|
||||
|
||||
int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len)
|
||||
{
|
||||
struct dlm_rcom *rc;
|
||||
struct dlm_mhandle *mh;
|
||||
int error = 0, len = sizeof(struct dlm_rcom);
|
||||
|
||||
memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
|
||||
ls->ls_recover_nodeid = nodeid;
|
||||
|
||||
if (nodeid == dlm_our_nodeid()) {
|
||||
dlm_copy_master_names(ls, last_name, last_len,
|
||||
ls->ls_recover_buf + len,
|
||||
dlm_config.buffer_size - len, nodeid);
|
||||
goto out;
|
||||
}
|
||||
|
||||
error = create_rcom(ls, nodeid, DLM_RCOM_NAMES, last_len, &rc, &mh);
|
||||
if (error)
|
||||
goto out;
|
||||
memcpy(rc->rc_buf, last_name, last_len);
|
||||
rc->rc_id = ++ls->ls_rcom_seq;
|
||||
|
||||
send_rcom(ls, mh, rc);
|
||||
|
||||
error = dlm_wait_function(ls, &rcom_response);
|
||||
clear_bit(LSFL_RCOM_READY, &ls->ls_flags);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
static void receive_rcom_names(struct dlm_ls *ls, struct dlm_rcom *rc_in)
|
||||
{
|
||||
struct dlm_rcom *rc;
|
||||
struct dlm_mhandle *mh;
|
||||
int error, inlen, outlen;
|
||||
int nodeid = rc_in->rc_header.h_nodeid;
|
||||
uint32_t status = dlm_recover_status(ls);
|
||||
|
||||
/*
|
||||
* We can't run dlm_dir_rebuild_send (which uses ls_nodes) while
|
||||
* dlm_recoverd is running ls_nodes_reconfig (which changes ls_nodes).
|
||||
* It could only happen in rare cases where we get a late NAMES
|
||||
* message from a previous instance of recovery.
|
||||
*/
|
||||
|
||||
if (!(status & DLM_RS_NODES)) {
|
||||
log_debug(ls, "ignoring RCOM_NAMES from %u", nodeid);
|
||||
return;
|
||||
}
|
||||
|
||||
nodeid = rc_in->rc_header.h_nodeid;
|
||||
inlen = rc_in->rc_header.h_length - sizeof(struct dlm_rcom);
|
||||
outlen = dlm_config.buffer_size - sizeof(struct dlm_rcom);
|
||||
|
||||
error = create_rcom(ls, nodeid, DLM_RCOM_NAMES_REPLY, outlen, &rc, &mh);
|
||||
if (error)
|
||||
return;
|
||||
rc->rc_id = rc_in->rc_id;
|
||||
|
||||
dlm_copy_master_names(ls, rc_in->rc_buf, inlen, rc->rc_buf, outlen,
|
||||
nodeid);
|
||||
send_rcom(ls, mh, rc);
|
||||
}
|
||||
|
||||
static void receive_rcom_names_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
|
||||
{
|
||||
receive_sync_reply(ls, rc_in);
|
||||
}
|
||||
|
||||
int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid)
|
||||
{
|
||||
struct dlm_rcom *rc;
|
||||
struct dlm_mhandle *mh;
|
||||
struct dlm_ls *ls = r->res_ls;
|
||||
int error;
|
||||
|
||||
error = create_rcom(ls, dir_nodeid, DLM_RCOM_LOOKUP, r->res_length,
|
||||
&rc, &mh);
|
||||
if (error)
|
||||
goto out;
|
||||
memcpy(rc->rc_buf, r->res_name, r->res_length);
|
||||
rc->rc_id = (unsigned long) r;
|
||||
|
||||
send_rcom(ls, mh, rc);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
static void receive_rcom_lookup(struct dlm_ls *ls, struct dlm_rcom *rc_in)
|
||||
{
|
||||
struct dlm_rcom *rc;
|
||||
struct dlm_mhandle *mh;
|
||||
int error, ret_nodeid, nodeid = rc_in->rc_header.h_nodeid;
|
||||
int len = rc_in->rc_header.h_length - sizeof(struct dlm_rcom);
|
||||
|
||||
error = create_rcom(ls, nodeid, DLM_RCOM_LOOKUP_REPLY, 0, &rc, &mh);
|
||||
if (error)
|
||||
return;
|
||||
|
||||
error = dlm_dir_lookup(ls, nodeid, rc_in->rc_buf, len, &ret_nodeid);
|
||||
if (error)
|
||||
ret_nodeid = error;
|
||||
rc->rc_result = ret_nodeid;
|
||||
rc->rc_id = rc_in->rc_id;
|
||||
|
||||
send_rcom(ls, mh, rc);
|
||||
}
|
||||
|
||||
static void receive_rcom_lookup_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
|
||||
{
|
||||
dlm_recover_master_reply(ls, rc_in);
|
||||
}
|
||||
|
||||
static void pack_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb,
|
||||
struct rcom_lock *rl)
|
||||
{
|
||||
memset(rl, 0, sizeof(*rl));
|
||||
|
||||
rl->rl_ownpid = lkb->lkb_ownpid;
|
||||
rl->rl_lkid = lkb->lkb_id;
|
||||
rl->rl_exflags = lkb->lkb_exflags;
|
||||
rl->rl_flags = lkb->lkb_flags;
|
||||
rl->rl_lvbseq = lkb->lkb_lvbseq;
|
||||
rl->rl_rqmode = lkb->lkb_rqmode;
|
||||
rl->rl_grmode = lkb->lkb_grmode;
|
||||
rl->rl_status = lkb->lkb_status;
|
||||
rl->rl_wait_type = lkb->lkb_wait_type;
|
||||
|
||||
if (lkb->lkb_bastaddr)
|
||||
rl->rl_asts |= AST_BAST;
|
||||
if (lkb->lkb_astaddr)
|
||||
rl->rl_asts |= AST_COMP;
|
||||
|
||||
rl->rl_namelen = r->res_length;
|
||||
memcpy(rl->rl_name, r->res_name, r->res_length);
|
||||
|
||||
/* FIXME: might we have an lvb without DLM_LKF_VALBLK set ?
|
||||
If so, receive_rcom_lock_args() won't take this copy. */
|
||||
|
||||
if (lkb->lkb_lvbptr)
|
||||
memcpy(rl->rl_lvb, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
|
||||
}
|
||||
|
||||
int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
|
||||
{
|
||||
struct dlm_ls *ls = r->res_ls;
|
||||
struct dlm_rcom *rc;
|
||||
struct dlm_mhandle *mh;
|
||||
struct rcom_lock *rl;
|
||||
int error, len = sizeof(struct rcom_lock);
|
||||
|
||||
if (lkb->lkb_lvbptr)
|
||||
len += ls->ls_lvblen;
|
||||
|
||||
error = create_rcom(ls, r->res_nodeid, DLM_RCOM_LOCK, len, &rc, &mh);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
rl = (struct rcom_lock *) rc->rc_buf;
|
||||
pack_rcom_lock(r, lkb, rl);
|
||||
rc->rc_id = (unsigned long) r;
|
||||
|
||||
send_rcom(ls, mh, rc);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
static void receive_rcom_lock(struct dlm_ls *ls, struct dlm_rcom *rc_in)
|
||||
{
|
||||
struct dlm_rcom *rc;
|
||||
struct dlm_mhandle *mh;
|
||||
int error, nodeid = rc_in->rc_header.h_nodeid;
|
||||
|
||||
dlm_recover_master_copy(ls, rc_in);
|
||||
|
||||
error = create_rcom(ls, nodeid, DLM_RCOM_LOCK_REPLY,
|
||||
sizeof(struct rcom_lock), &rc, &mh);
|
||||
if (error)
|
||||
return;
|
||||
|
||||
/* We send back the same rcom_lock struct we received, but
|
||||
dlm_recover_master_copy() has filled in rl_remid and rl_result */
|
||||
|
||||
memcpy(rc->rc_buf, rc_in->rc_buf, sizeof(struct rcom_lock));
|
||||
rc->rc_id = rc_in->rc_id;
|
||||
|
||||
send_rcom(ls, mh, rc);
|
||||
}
|
||||
|
||||
static void receive_rcom_lock_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
|
||||
{
|
||||
uint32_t status = dlm_recover_status(ls);
|
||||
|
||||
if (!(status & DLM_RS_DIR)) {
|
||||
log_debug(ls, "ignoring RCOM_LOCK_REPLY from %u",
|
||||
rc_in->rc_header.h_nodeid);
|
||||
return;
|
||||
}
|
||||
|
||||
dlm_recover_process_copy(ls, rc_in);
|
||||
}
|
||||
|
||||
static int send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
|
||||
{
|
||||
struct dlm_rcom *rc;
|
||||
struct dlm_mhandle *mh;
|
||||
char *mb;
|
||||
int mb_len = sizeof(struct dlm_rcom);
|
||||
|
||||
mh = dlm_lowcomms_get_buffer(nodeid, mb_len, GFP_KERNEL, &mb);
|
||||
if (!mh)
|
||||
return -ENOBUFS;
|
||||
memset(mb, 0, mb_len);
|
||||
|
||||
rc = (struct dlm_rcom *) mb;
|
||||
|
||||
rc->rc_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
|
||||
rc->rc_header.h_lockspace = rc_in->rc_header.h_lockspace;
|
||||
rc->rc_header.h_nodeid = dlm_our_nodeid();
|
||||
rc->rc_header.h_length = mb_len;
|
||||
rc->rc_header.h_cmd = DLM_RCOM;
|
||||
|
||||
rc->rc_type = DLM_RCOM_STATUS_REPLY;
|
||||
rc->rc_id = rc_in->rc_id;
|
||||
rc->rc_result = -ESRCH;
|
||||
|
||||
dlm_rcom_out(rc);
|
||||
dlm_lowcomms_commit_buffer(mh);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Called by dlm_recvd; corresponds to dlm_receive_message() but special
|
||||
recovery-only comms are sent through here. */
|
||||
|
||||
void dlm_receive_rcom(struct dlm_header *hd, int nodeid)
|
||||
{
|
||||
struct dlm_rcom *rc = (struct dlm_rcom *) hd;
|
||||
struct dlm_ls *ls;
|
||||
|
||||
dlm_rcom_in(rc);
|
||||
|
||||
/* If the lockspace doesn't exist then still send a status message
|
||||
back; it's possible that it just doesn't have its global_id yet. */
|
||||
|
||||
ls = dlm_find_lockspace_global(hd->h_lockspace);
|
||||
if (!ls) {
|
||||
log_print("lockspace %x from %d not found",
|
||||
hd->h_lockspace, nodeid);
|
||||
send_ls_not_ready(nodeid, rc);
|
||||
return;
|
||||
}
|
||||
|
||||
if (dlm_recovery_stopped(ls) && (rc->rc_type != DLM_RCOM_STATUS)) {
|
||||
log_error(ls, "ignoring recovery message %x from %d",
|
||||
rc->rc_type, nodeid);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (nodeid != rc->rc_header.h_nodeid) {
|
||||
log_error(ls, "bad rcom nodeid %d from %d",
|
||||
rc->rc_header.h_nodeid, nodeid);
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (rc->rc_type) {
|
||||
case DLM_RCOM_STATUS:
|
||||
receive_rcom_status(ls, rc);
|
||||
break;
|
||||
|
||||
case DLM_RCOM_NAMES:
|
||||
receive_rcom_names(ls, rc);
|
||||
break;
|
||||
|
||||
case DLM_RCOM_LOOKUP:
|
||||
receive_rcom_lookup(ls, rc);
|
||||
break;
|
||||
|
||||
case DLM_RCOM_LOCK:
|
||||
receive_rcom_lock(ls, rc);
|
||||
break;
|
||||
|
||||
case DLM_RCOM_STATUS_REPLY:
|
||||
receive_rcom_status_reply(ls, rc);
|
||||
break;
|
||||
|
||||
case DLM_RCOM_NAMES_REPLY:
|
||||
receive_rcom_names_reply(ls, rc);
|
||||
break;
|
||||
|
||||
case DLM_RCOM_LOOKUP_REPLY:
|
||||
receive_rcom_lookup_reply(ls, rc);
|
||||
break;
|
||||
|
||||
case DLM_RCOM_LOCK_REPLY:
|
||||
receive_rcom_lock_reply(ls, rc);
|
||||
break;
|
||||
|
||||
default:
|
||||
DLM_ASSERT(0, printk("rc_type=%x\n", rc->rc_type););
|
||||
}
|
||||
out:
|
||||
dlm_put_lockspace(ls);
|
||||
}
|
||||
|
24
fs/dlm/rcom.h
Normal file
24
fs/dlm/rcom.h
Normal file
|
@ -0,0 +1,24 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#ifndef __RCOM_DOT_H__
|
||||
#define __RCOM_DOT_H__
|
||||
|
||||
int dlm_rcom_status(struct dlm_ls *ls, int nodeid);
|
||||
int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name,int last_len);
|
||||
int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid);
|
||||
int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
|
||||
void dlm_receive_rcom(struct dlm_header *hd, int nodeid);
|
||||
|
||||
#endif
|
||||
|
765
fs/dlm/recover.c
Normal file
765
fs/dlm/recover.c
Normal file
|
@ -0,0 +1,765 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#include "dlm_internal.h"
|
||||
#include "lockspace.h"
|
||||
#include "dir.h"
|
||||
#include "config.h"
|
||||
#include "ast.h"
|
||||
#include "memory.h"
|
||||
#include "rcom.h"
|
||||
#include "lock.h"
|
||||
#include "lowcomms.h"
|
||||
#include "member.h"
|
||||
#include "recover.h"
|
||||
|
||||
|
||||
/*
|
||||
* Recovery waiting routines: these functions wait for a particular reply from
|
||||
* a remote node, or for the remote node to report a certain status. They need
|
||||
* to abort if the lockspace is stopped indicating a node has failed (perhaps
|
||||
* the one being waited for).
|
||||
*/
|
||||
|
||||
/*
|
||||
* Wait until given function returns non-zero or lockspace is stopped
|
||||
* (LS_RECOVERY_STOP set due to failure of a node in ls_nodes). When another
|
||||
* function thinks it could have completed the waited-on task, they should wake
|
||||
* up ls_wait_general to get an immediate response rather than waiting for the
|
||||
* timer to detect the result. A timer wakes us up periodically while waiting
|
||||
* to see if we should abort due to a node failure. This should only be called
|
||||
* by the dlm_recoverd thread.
|
||||
*/
|
||||
|
||||
static void dlm_wait_timer_fn(unsigned long data)
|
||||
{
|
||||
struct dlm_ls *ls = (struct dlm_ls *) data;
|
||||
mod_timer(&ls->ls_timer, jiffies + (dlm_config.recover_timer * HZ));
|
||||
wake_up(&ls->ls_wait_general);
|
||||
}
|
||||
|
||||
int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls))
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
init_timer(&ls->ls_timer);
|
||||
ls->ls_timer.function = dlm_wait_timer_fn;
|
||||
ls->ls_timer.data = (long) ls;
|
||||
ls->ls_timer.expires = jiffies + (dlm_config.recover_timer * HZ);
|
||||
add_timer(&ls->ls_timer);
|
||||
|
||||
wait_event(ls->ls_wait_general, testfn(ls) || dlm_recovery_stopped(ls));
|
||||
del_timer_sync(&ls->ls_timer);
|
||||
|
||||
if (dlm_recovery_stopped(ls)) {
|
||||
log_debug(ls, "dlm_wait_function aborted");
|
||||
error = -EINTR;
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* An efficient way for all nodes to wait for all others to have a certain
|
||||
* status. The node with the lowest nodeid polls all the others for their
|
||||
* status (wait_status_all) and all the others poll the node with the low id
|
||||
* for its accumulated result (wait_status_low). When all nodes have set
|
||||
* status flag X, then status flag X_ALL will be set on the low nodeid.
|
||||
*/
|
||||
|
||||
uint32_t dlm_recover_status(struct dlm_ls *ls)
|
||||
{
|
||||
uint32_t status;
|
||||
spin_lock(&ls->ls_recover_lock);
|
||||
status = ls->ls_recover_status;
|
||||
spin_unlock(&ls->ls_recover_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status)
|
||||
{
|
||||
spin_lock(&ls->ls_recover_lock);
|
||||
ls->ls_recover_status |= status;
|
||||
spin_unlock(&ls->ls_recover_lock);
|
||||
}
|
||||
|
||||
static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status)
|
||||
{
|
||||
struct dlm_rcom *rc = (struct dlm_rcom *) ls->ls_recover_buf;
|
||||
struct dlm_member *memb;
|
||||
int error = 0, delay;
|
||||
|
||||
list_for_each_entry(memb, &ls->ls_nodes, list) {
|
||||
delay = 0;
|
||||
for (;;) {
|
||||
if (dlm_recovery_stopped(ls)) {
|
||||
error = -EINTR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
error = dlm_rcom_status(ls, memb->nodeid);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
if (rc->rc_result & wait_status)
|
||||
break;
|
||||
if (delay < 1000)
|
||||
delay += 20;
|
||||
msleep(delay);
|
||||
}
|
||||
}
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status)
|
||||
{
|
||||
struct dlm_rcom *rc = (struct dlm_rcom *) ls->ls_recover_buf;
|
||||
int error = 0, delay = 0, nodeid = ls->ls_low_nodeid;
|
||||
|
||||
for (;;) {
|
||||
if (dlm_recovery_stopped(ls)) {
|
||||
error = -EINTR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
error = dlm_rcom_status(ls, nodeid);
|
||||
if (error)
|
||||
break;
|
||||
|
||||
if (rc->rc_result & wait_status)
|
||||
break;
|
||||
if (delay < 1000)
|
||||
delay += 20;
|
||||
msleep(delay);
|
||||
}
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
static int wait_status(struct dlm_ls *ls, uint32_t status)
|
||||
{
|
||||
uint32_t status_all = status << 1;
|
||||
int error;
|
||||
|
||||
if (ls->ls_low_nodeid == dlm_our_nodeid()) {
|
||||
error = wait_status_all(ls, status);
|
||||
if (!error)
|
||||
dlm_set_recover_status(ls, status_all);
|
||||
} else
|
||||
error = wait_status_low(ls, status_all);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
int dlm_recover_members_wait(struct dlm_ls *ls)
|
||||
{
|
||||
return wait_status(ls, DLM_RS_NODES);
|
||||
}
|
||||
|
||||
int dlm_recover_directory_wait(struct dlm_ls *ls)
|
||||
{
|
||||
return wait_status(ls, DLM_RS_DIR);
|
||||
}
|
||||
|
||||
int dlm_recover_locks_wait(struct dlm_ls *ls)
|
||||
{
|
||||
return wait_status(ls, DLM_RS_LOCKS);
|
||||
}
|
||||
|
||||
int dlm_recover_done_wait(struct dlm_ls *ls)
|
||||
{
|
||||
return wait_status(ls, DLM_RS_DONE);
|
||||
}
|
||||
|
||||
/*
|
||||
* The recover_list contains all the rsb's for which we've requested the new
|
||||
* master nodeid. As replies are returned from the resource directories the
|
||||
* rsb's are removed from the list. When the list is empty we're done.
|
||||
*
|
||||
* The recover_list is later similarly used for all rsb's for which we've sent
|
||||
* new lkb's and need to receive new corresponding lkid's.
|
||||
*
|
||||
* We use the address of the rsb struct as a simple local identifier for the
|
||||
* rsb so we can match an rcom reply with the rsb it was sent for.
|
||||
*/
|
||||
|
||||
static int recover_list_empty(struct dlm_ls *ls)
|
||||
{
|
||||
int empty;
|
||||
|
||||
spin_lock(&ls->ls_recover_list_lock);
|
||||
empty = list_empty(&ls->ls_recover_list);
|
||||
spin_unlock(&ls->ls_recover_list_lock);
|
||||
|
||||
return empty;
|
||||
}
|
||||
|
||||
static void recover_list_add(struct dlm_rsb *r)
|
||||
{
|
||||
struct dlm_ls *ls = r->res_ls;
|
||||
|
||||
spin_lock(&ls->ls_recover_list_lock);
|
||||
if (list_empty(&r->res_recover_list)) {
|
||||
list_add_tail(&r->res_recover_list, &ls->ls_recover_list);
|
||||
ls->ls_recover_list_count++;
|
||||
dlm_hold_rsb(r);
|
||||
}
|
||||
spin_unlock(&ls->ls_recover_list_lock);
|
||||
}
|
||||
|
||||
static void recover_list_del(struct dlm_rsb *r)
|
||||
{
|
||||
struct dlm_ls *ls = r->res_ls;
|
||||
|
||||
spin_lock(&ls->ls_recover_list_lock);
|
||||
list_del_init(&r->res_recover_list);
|
||||
ls->ls_recover_list_count--;
|
||||
spin_unlock(&ls->ls_recover_list_lock);
|
||||
|
||||
dlm_put_rsb(r);
|
||||
}
|
||||
|
||||
static struct dlm_rsb *recover_list_find(struct dlm_ls *ls, uint64_t id)
|
||||
{
|
||||
struct dlm_rsb *r = NULL;
|
||||
|
||||
spin_lock(&ls->ls_recover_list_lock);
|
||||
|
||||
list_for_each_entry(r, &ls->ls_recover_list, res_recover_list) {
|
||||
if (id == (unsigned long) r)
|
||||
goto out;
|
||||
}
|
||||
r = NULL;
|
||||
out:
|
||||
spin_unlock(&ls->ls_recover_list_lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
static void recover_list_clear(struct dlm_ls *ls)
|
||||
{
|
||||
struct dlm_rsb *r, *s;
|
||||
|
||||
spin_lock(&ls->ls_recover_list_lock);
|
||||
list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) {
|
||||
list_del_init(&r->res_recover_list);
|
||||
dlm_put_rsb(r);
|
||||
ls->ls_recover_list_count--;
|
||||
}
|
||||
|
||||
if (ls->ls_recover_list_count != 0) {
|
||||
log_error(ls, "warning: recover_list_count %d",
|
||||
ls->ls_recover_list_count);
|
||||
ls->ls_recover_list_count = 0;
|
||||
}
|
||||
spin_unlock(&ls->ls_recover_list_lock);
|
||||
}
|
||||
|
||||
|
||||
/* Master recovery: find new master node for rsb's that were
|
||||
mastered on nodes that have been removed.
|
||||
|
||||
dlm_recover_masters
|
||||
recover_master
|
||||
dlm_send_rcom_lookup -> receive_rcom_lookup
|
||||
dlm_dir_lookup
|
||||
receive_rcom_lookup_reply <-
|
||||
dlm_recover_master_reply
|
||||
set_new_master
|
||||
set_master_lkbs
|
||||
set_lock_master
|
||||
*/
|
||||
|
||||
/*
|
||||
* Set the lock master for all LKBs in a lock queue
|
||||
* If we are the new master of the rsb, we may have received new
|
||||
* MSTCPY locks from other nodes already which we need to ignore
|
||||
* when setting the new nodeid.
|
||||
*/
|
||||
|
||||
static void set_lock_master(struct list_head *queue, int nodeid)
|
||||
{
|
||||
struct dlm_lkb *lkb;
|
||||
|
||||
list_for_each_entry(lkb, queue, lkb_statequeue)
|
||||
if (!(lkb->lkb_flags & DLM_IFL_MSTCPY))
|
||||
lkb->lkb_nodeid = nodeid;
|
||||
}
|
||||
|
||||
static void set_master_lkbs(struct dlm_rsb *r)
|
||||
{
|
||||
set_lock_master(&r->res_grantqueue, r->res_nodeid);
|
||||
set_lock_master(&r->res_convertqueue, r->res_nodeid);
|
||||
set_lock_master(&r->res_waitqueue, r->res_nodeid);
|
||||
}
|
||||
|
||||
/*
|
||||
* Propogate the new master nodeid to locks
|
||||
* The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider.
|
||||
* The NEW_MASTER2 flag tells recover_lvb() and set_locks_purged() which
|
||||
* rsb's to consider.
|
||||
*/
|
||||
|
||||
static void set_new_master(struct dlm_rsb *r, int nodeid)
|
||||
{
|
||||
lock_rsb(r);
|
||||
r->res_nodeid = nodeid;
|
||||
set_master_lkbs(r);
|
||||
rsb_set_flag(r, RSB_NEW_MASTER);
|
||||
rsb_set_flag(r, RSB_NEW_MASTER2);
|
||||
unlock_rsb(r);
|
||||
}
|
||||
|
||||
/*
|
||||
* We do async lookups on rsb's that need new masters. The rsb's
|
||||
* waiting for a lookup reply are kept on the recover_list.
|
||||
*/
|
||||
|
||||
static int recover_master(struct dlm_rsb *r)
|
||||
{
|
||||
struct dlm_ls *ls = r->res_ls;
|
||||
int error, dir_nodeid, ret_nodeid, our_nodeid = dlm_our_nodeid();
|
||||
|
||||
dir_nodeid = dlm_dir_nodeid(r);
|
||||
|
||||
if (dir_nodeid == our_nodeid) {
|
||||
error = dlm_dir_lookup(ls, our_nodeid, r->res_name,
|
||||
r->res_length, &ret_nodeid);
|
||||
if (error)
|
||||
log_error(ls, "recover dir lookup error %d", error);
|
||||
|
||||
if (ret_nodeid == our_nodeid)
|
||||
ret_nodeid = 0;
|
||||
set_new_master(r, ret_nodeid);
|
||||
} else {
|
||||
recover_list_add(r);
|
||||
error = dlm_send_rcom_lookup(r, dir_nodeid);
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* When not using a directory, most resource names will hash to a new static
|
||||
* master nodeid and the resource will need to be remastered.
|
||||
*/
|
||||
|
||||
static int recover_master_static(struct dlm_rsb *r)
|
||||
{
|
||||
int master = dlm_dir_nodeid(r);
|
||||
|
||||
if (master == dlm_our_nodeid())
|
||||
master = 0;
|
||||
|
||||
if (r->res_nodeid != master) {
|
||||
if (is_master(r))
|
||||
dlm_purge_mstcpy_locks(r);
|
||||
set_new_master(r, master);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Go through local root resources and for each rsb which has a master which
|
||||
* has departed, get the new master nodeid from the directory. The dir will
|
||||
* assign mastery to the first node to look up the new master. That means
|
||||
* we'll discover in this lookup if we're the new master of any rsb's.
|
||||
*
|
||||
* We fire off all the dir lookup requests individually and asynchronously to
|
||||
* the correct dir node.
|
||||
*/
|
||||
|
||||
int dlm_recover_masters(struct dlm_ls *ls)
|
||||
{
|
||||
struct dlm_rsb *r;
|
||||
int error = 0, count = 0;
|
||||
|
||||
log_debug(ls, "dlm_recover_masters");
|
||||
|
||||
down_read(&ls->ls_root_sem);
|
||||
list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
|
||||
if (dlm_recovery_stopped(ls)) {
|
||||
up_read(&ls->ls_root_sem);
|
||||
error = -EINTR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (dlm_no_directory(ls))
|
||||
count += recover_master_static(r);
|
||||
else if (!is_master(r) && dlm_is_removed(ls, r->res_nodeid)) {
|
||||
recover_master(r);
|
||||
count++;
|
||||
}
|
||||
|
||||
schedule();
|
||||
}
|
||||
up_read(&ls->ls_root_sem);
|
||||
|
||||
log_debug(ls, "dlm_recover_masters %d resources", count);
|
||||
|
||||
error = dlm_wait_function(ls, &recover_list_empty);
|
||||
out:
|
||||
if (error)
|
||||
recover_list_clear(ls);
|
||||
return error;
|
||||
}
|
||||
|
||||
int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc)
|
||||
{
|
||||
struct dlm_rsb *r;
|
||||
int nodeid;
|
||||
|
||||
r = recover_list_find(ls, rc->rc_id);
|
||||
if (!r) {
|
||||
log_error(ls, "dlm_recover_master_reply no id %llx",
|
||||
(unsigned long long)rc->rc_id);
|
||||
goto out;
|
||||
}
|
||||
|
||||
nodeid = rc->rc_result;
|
||||
if (nodeid == dlm_our_nodeid())
|
||||
nodeid = 0;
|
||||
|
||||
set_new_master(r, nodeid);
|
||||
recover_list_del(r);
|
||||
|
||||
if (recover_list_empty(ls))
|
||||
wake_up(&ls->ls_wait_general);
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* Lock recovery: rebuild the process-copy locks we hold on a
|
||||
remastered rsb on the new rsb master.
|
||||
|
||||
dlm_recover_locks
|
||||
recover_locks
|
||||
recover_locks_queue
|
||||
dlm_send_rcom_lock -> receive_rcom_lock
|
||||
dlm_recover_master_copy
|
||||
receive_rcom_lock_reply <-
|
||||
dlm_recover_process_copy
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* keep a count of the number of lkb's we send to the new master; when we get
|
||||
* an equal number of replies then recovery for the rsb is done
|
||||
*/
|
||||
|
||||
static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head)
|
||||
{
|
||||
struct dlm_lkb *lkb;
|
||||
int error = 0;
|
||||
|
||||
list_for_each_entry(lkb, head, lkb_statequeue) {
|
||||
error = dlm_send_rcom_lock(r, lkb);
|
||||
if (error)
|
||||
break;
|
||||
r->res_recover_locks_count++;
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
static int recover_locks(struct dlm_rsb *r)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
lock_rsb(r);
|
||||
|
||||
DLM_ASSERT(!r->res_recover_locks_count, dlm_dump_rsb(r););
|
||||
|
||||
error = recover_locks_queue(r, &r->res_grantqueue);
|
||||
if (error)
|
||||
goto out;
|
||||
error = recover_locks_queue(r, &r->res_convertqueue);
|
||||
if (error)
|
||||
goto out;
|
||||
error = recover_locks_queue(r, &r->res_waitqueue);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
if (r->res_recover_locks_count)
|
||||
recover_list_add(r);
|
||||
else
|
||||
rsb_clear_flag(r, RSB_NEW_MASTER);
|
||||
out:
|
||||
unlock_rsb(r);
|
||||
return error;
|
||||
}
|
||||
|
||||
int dlm_recover_locks(struct dlm_ls *ls)
|
||||
{
|
||||
struct dlm_rsb *r;
|
||||
int error, count = 0;
|
||||
|
||||
log_debug(ls, "dlm_recover_locks");
|
||||
|
||||
down_read(&ls->ls_root_sem);
|
||||
list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
|
||||
if (is_master(r)) {
|
||||
rsb_clear_flag(r, RSB_NEW_MASTER);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!rsb_flag(r, RSB_NEW_MASTER))
|
||||
continue;
|
||||
|
||||
if (dlm_recovery_stopped(ls)) {
|
||||
error = -EINTR;
|
||||
up_read(&ls->ls_root_sem);
|
||||
goto out;
|
||||
}
|
||||
|
||||
error = recover_locks(r);
|
||||
if (error) {
|
||||
up_read(&ls->ls_root_sem);
|
||||
goto out;
|
||||
}
|
||||
|
||||
count += r->res_recover_locks_count;
|
||||
}
|
||||
up_read(&ls->ls_root_sem);
|
||||
|
||||
log_debug(ls, "dlm_recover_locks %d locks", count);
|
||||
|
||||
error = dlm_wait_function(ls, &recover_list_empty);
|
||||
out:
|
||||
if (error)
|
||||
recover_list_clear(ls);
|
||||
else
|
||||
dlm_set_recover_status(ls, DLM_RS_LOCKS);
|
||||
return error;
|
||||
}
|
||||
|
||||
void dlm_recovered_lock(struct dlm_rsb *r)
|
||||
{
|
||||
DLM_ASSERT(rsb_flag(r, RSB_NEW_MASTER), dlm_dump_rsb(r););
|
||||
|
||||
r->res_recover_locks_count--;
|
||||
if (!r->res_recover_locks_count) {
|
||||
rsb_clear_flag(r, RSB_NEW_MASTER);
|
||||
recover_list_del(r);
|
||||
}
|
||||
|
||||
if (recover_list_empty(r->res_ls))
|
||||
wake_up(&r->res_ls->ls_wait_general);
|
||||
}
|
||||
|
||||
/*
|
||||
* The lvb needs to be recovered on all master rsb's. This includes setting
|
||||
* the VALNOTVALID flag if necessary, and determining the correct lvb contents
|
||||
* based on the lvb's of the locks held on the rsb.
|
||||
*
|
||||
* RSB_VALNOTVALID is set if there are only NL/CR locks on the rsb. If it
|
||||
* was already set prior to recovery, it's not cleared, regardless of locks.
|
||||
*
|
||||
* The LVB contents are only considered for changing when this is a new master
|
||||
* of the rsb (NEW_MASTER2). Then, the rsb's lvb is taken from any lkb with
|
||||
* mode > CR. If no lkb's exist with mode above CR, the lvb contents are taken
|
||||
* from the lkb with the largest lvb sequence number.
|
||||
*/
|
||||
|
||||
static void recover_lvb(struct dlm_rsb *r)
|
||||
{
|
||||
struct dlm_lkb *lkb, *high_lkb = NULL;
|
||||
uint32_t high_seq = 0;
|
||||
int lock_lvb_exists = 0;
|
||||
int big_lock_exists = 0;
|
||||
int lvblen = r->res_ls->ls_lvblen;
|
||||
|
||||
list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
|
||||
if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
|
||||
continue;
|
||||
|
||||
lock_lvb_exists = 1;
|
||||
|
||||
if (lkb->lkb_grmode > DLM_LOCK_CR) {
|
||||
big_lock_exists = 1;
|
||||
goto setflag;
|
||||
}
|
||||
|
||||
if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
|
||||
high_lkb = lkb;
|
||||
high_seq = lkb->lkb_lvbseq;
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
|
||||
if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
|
||||
continue;
|
||||
|
||||
lock_lvb_exists = 1;
|
||||
|
||||
if (lkb->lkb_grmode > DLM_LOCK_CR) {
|
||||
big_lock_exists = 1;
|
||||
goto setflag;
|
||||
}
|
||||
|
||||
if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
|
||||
high_lkb = lkb;
|
||||
high_seq = lkb->lkb_lvbseq;
|
||||
}
|
||||
}
|
||||
|
||||
setflag:
|
||||
if (!lock_lvb_exists)
|
||||
goto out;
|
||||
|
||||
if (!big_lock_exists)
|
||||
rsb_set_flag(r, RSB_VALNOTVALID);
|
||||
|
||||
/* don't mess with the lvb unless we're the new master */
|
||||
if (!rsb_flag(r, RSB_NEW_MASTER2))
|
||||
goto out;
|
||||
|
||||
if (!r->res_lvbptr) {
|
||||
r->res_lvbptr = allocate_lvb(r->res_ls);
|
||||
if (!r->res_lvbptr)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (big_lock_exists) {
|
||||
r->res_lvbseq = lkb->lkb_lvbseq;
|
||||
memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen);
|
||||
} else if (high_lkb) {
|
||||
r->res_lvbseq = high_lkb->lkb_lvbseq;
|
||||
memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen);
|
||||
} else {
|
||||
r->res_lvbseq = 0;
|
||||
memset(r->res_lvbptr, 0, lvblen);
|
||||
}
|
||||
out:
|
||||
return;
|
||||
}
|
||||
|
||||
/* All master rsb's flagged RECOVER_CONVERT need to be looked at. The locks
|
||||
converting PR->CW or CW->PR need to have their lkb_grmode set. */
|
||||
|
||||
static void recover_conversion(struct dlm_rsb *r)
|
||||
{
|
||||
struct dlm_lkb *lkb;
|
||||
int grmode = -1;
|
||||
|
||||
list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
|
||||
if (lkb->lkb_grmode == DLM_LOCK_PR ||
|
||||
lkb->lkb_grmode == DLM_LOCK_CW) {
|
||||
grmode = lkb->lkb_grmode;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
|
||||
if (lkb->lkb_grmode != DLM_LOCK_IV)
|
||||
continue;
|
||||
if (grmode == -1)
|
||||
lkb->lkb_grmode = lkb->lkb_rqmode;
|
||||
else
|
||||
lkb->lkb_grmode = grmode;
|
||||
}
|
||||
}
|
||||
|
||||
/* We've become the new master for this rsb and waiting/converting locks may
|
||||
need to be granted in dlm_grant_after_purge() due to locks that may have
|
||||
existed from a removed node. */
|
||||
|
||||
static void set_locks_purged(struct dlm_rsb *r)
|
||||
{
|
||||
if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
|
||||
rsb_set_flag(r, RSB_LOCKS_PURGED);
|
||||
}
|
||||
|
||||
void dlm_recover_rsbs(struct dlm_ls *ls)
|
||||
{
|
||||
struct dlm_rsb *r;
|
||||
int count = 0;
|
||||
|
||||
log_debug(ls, "dlm_recover_rsbs");
|
||||
|
||||
down_read(&ls->ls_root_sem);
|
||||
list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
|
||||
lock_rsb(r);
|
||||
if (is_master(r)) {
|
||||
if (rsb_flag(r, RSB_RECOVER_CONVERT))
|
||||
recover_conversion(r);
|
||||
if (rsb_flag(r, RSB_NEW_MASTER2))
|
||||
set_locks_purged(r);
|
||||
recover_lvb(r);
|
||||
count++;
|
||||
}
|
||||
rsb_clear_flag(r, RSB_RECOVER_CONVERT);
|
||||
rsb_clear_flag(r, RSB_NEW_MASTER2);
|
||||
unlock_rsb(r);
|
||||
}
|
||||
up_read(&ls->ls_root_sem);
|
||||
|
||||
log_debug(ls, "dlm_recover_rsbs %d rsbs", count);
|
||||
}
|
||||
|
||||
/* Create a single list of all root rsb's to be used during recovery */
|
||||
|
||||
int dlm_create_root_list(struct dlm_ls *ls)
|
||||
{
|
||||
struct dlm_rsb *r;
|
||||
int i, error = 0;
|
||||
|
||||
down_write(&ls->ls_root_sem);
|
||||
if (!list_empty(&ls->ls_root_list)) {
|
||||
log_error(ls, "root list not empty");
|
||||
error = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < ls->ls_rsbtbl_size; i++) {
|
||||
read_lock(&ls->ls_rsbtbl[i].lock);
|
||||
list_for_each_entry(r, &ls->ls_rsbtbl[i].list, res_hashchain) {
|
||||
list_add(&r->res_root_list, &ls->ls_root_list);
|
||||
dlm_hold_rsb(r);
|
||||
}
|
||||
read_unlock(&ls->ls_rsbtbl[i].lock);
|
||||
}
|
||||
out:
|
||||
up_write(&ls->ls_root_sem);
|
||||
return error;
|
||||
}
|
||||
|
||||
void dlm_release_root_list(struct dlm_ls *ls)
|
||||
{
|
||||
struct dlm_rsb *r, *safe;
|
||||
|
||||
down_write(&ls->ls_root_sem);
|
||||
list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) {
|
||||
list_del_init(&r->res_root_list);
|
||||
dlm_put_rsb(r);
|
||||
}
|
||||
up_write(&ls->ls_root_sem);
|
||||
}
|
||||
|
||||
void dlm_clear_toss_list(struct dlm_ls *ls)
|
||||
{
|
||||
struct dlm_rsb *r, *safe;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ls->ls_rsbtbl_size; i++) {
|
||||
write_lock(&ls->ls_rsbtbl[i].lock);
|
||||
list_for_each_entry_safe(r, safe, &ls->ls_rsbtbl[i].toss,
|
||||
res_hashchain) {
|
||||
list_del(&r->res_hashchain);
|
||||
free_rsb(r);
|
||||
}
|
||||
write_unlock(&ls->ls_rsbtbl[i].lock);
|
||||
}
|
||||
}
|
||||
|
34
fs/dlm/recover.h
Normal file
34
fs/dlm/recover.h
Normal file
|
@ -0,0 +1,34 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#ifndef __RECOVER_DOT_H__
|
||||
#define __RECOVER_DOT_H__
|
||||
|
||||
int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls));
|
||||
uint32_t dlm_recover_status(struct dlm_ls *ls);
|
||||
void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status);
|
||||
int dlm_recover_members_wait(struct dlm_ls *ls);
|
||||
int dlm_recover_directory_wait(struct dlm_ls *ls);
|
||||
int dlm_recover_locks_wait(struct dlm_ls *ls);
|
||||
int dlm_recover_done_wait(struct dlm_ls *ls);
|
||||
int dlm_recover_masters(struct dlm_ls *ls);
|
||||
int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc);
|
||||
int dlm_recover_locks(struct dlm_ls *ls);
|
||||
void dlm_recovered_lock(struct dlm_rsb *r);
|
||||
int dlm_create_root_list(struct dlm_ls *ls);
|
||||
void dlm_release_root_list(struct dlm_ls *ls);
|
||||
void dlm_clear_toss_list(struct dlm_ls *ls);
|
||||
void dlm_recover_rsbs(struct dlm_ls *ls);
|
||||
|
||||
#endif /* __RECOVER_DOT_H__ */
|
||||
|
290
fs/dlm/recoverd.c
Normal file
290
fs/dlm/recoverd.c
Normal file
|
@ -0,0 +1,290 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#include "dlm_internal.h"
|
||||
#include "lockspace.h"
|
||||
#include "member.h"
|
||||
#include "dir.h"
|
||||
#include "ast.h"
|
||||
#include "recover.h"
|
||||
#include "lowcomms.h"
|
||||
#include "lock.h"
|
||||
#include "requestqueue.h"
|
||||
#include "recoverd.h"
|
||||
|
||||
|
||||
/* If the start for which we're re-enabling locking (seq) has been superseded
|
||||
by a newer stop (ls_recover_seq), we need to leave locking disabled. */
|
||||
|
||||
static int enable_locking(struct dlm_ls *ls, uint64_t seq)
|
||||
{
|
||||
int error = -EINTR;
|
||||
|
||||
spin_lock(&ls->ls_recover_lock);
|
||||
if (ls->ls_recover_seq == seq) {
|
||||
set_bit(LSFL_RUNNING, &ls->ls_flags);
|
||||
up_write(&ls->ls_in_recovery);
|
||||
error = 0;
|
||||
}
|
||||
spin_unlock(&ls->ls_recover_lock);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
|
||||
{
|
||||
unsigned long start;
|
||||
int error, neg = 0;
|
||||
|
||||
log_debug(ls, "recover %llx", rv->seq);
|
||||
|
||||
mutex_lock(&ls->ls_recoverd_active);
|
||||
|
||||
/*
|
||||
* Suspending and resuming dlm_astd ensures that no lkb's from this ls
|
||||
* will be processed by dlm_astd during recovery.
|
||||
*/
|
||||
|
||||
dlm_astd_suspend();
|
||||
dlm_astd_resume();
|
||||
|
||||
/*
|
||||
* This list of root rsb's will be the basis of most of the recovery
|
||||
* routines.
|
||||
*/
|
||||
|
||||
dlm_create_root_list(ls);
|
||||
|
||||
/*
|
||||
* Free all the tossed rsb's so we don't have to recover them.
|
||||
*/
|
||||
|
||||
dlm_clear_toss_list(ls);
|
||||
|
||||
/*
|
||||
* Add or remove nodes from the lockspace's ls_nodes list.
|
||||
* Also waits for all nodes to complete dlm_recover_members.
|
||||
*/
|
||||
|
||||
error = dlm_recover_members(ls, rv, &neg);
|
||||
if (error) {
|
||||
log_error(ls, "recover_members failed %d", error);
|
||||
goto fail;
|
||||
}
|
||||
start = jiffies;
|
||||
|
||||
/*
|
||||
* Rebuild our own share of the directory by collecting from all other
|
||||
* nodes their master rsb names that hash to us.
|
||||
*/
|
||||
|
||||
error = dlm_recover_directory(ls);
|
||||
if (error) {
|
||||
log_error(ls, "recover_directory failed %d", error);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/*
|
||||
* Purge directory-related requests that are saved in requestqueue.
|
||||
* All dir requests from before recovery are invalid now due to the dir
|
||||
* rebuild and will be resent by the requesting nodes.
|
||||
*/
|
||||
|
||||
dlm_purge_requestqueue(ls);
|
||||
|
||||
/*
|
||||
* Wait for all nodes to complete directory rebuild.
|
||||
*/
|
||||
|
||||
error = dlm_recover_directory_wait(ls);
|
||||
if (error) {
|
||||
log_error(ls, "recover_directory_wait failed %d", error);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/*
|
||||
* We may have outstanding operations that are waiting for a reply from
|
||||
* a failed node. Mark these to be resent after recovery. Unlock and
|
||||
* cancel ops can just be completed.
|
||||
*/
|
||||
|
||||
dlm_recover_waiters_pre(ls);
|
||||
|
||||
error = dlm_recovery_stopped(ls);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
if (neg || dlm_no_directory(ls)) {
|
||||
/*
|
||||
* Clear lkb's for departed nodes.
|
||||
*/
|
||||
|
||||
dlm_purge_locks(ls);
|
||||
|
||||
/*
|
||||
* Get new master nodeid's for rsb's that were mastered on
|
||||
* departed nodes.
|
||||
*/
|
||||
|
||||
error = dlm_recover_masters(ls);
|
||||
if (error) {
|
||||
log_error(ls, "recover_masters failed %d", error);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/*
|
||||
* Send our locks on remastered rsb's to the new masters.
|
||||
*/
|
||||
|
||||
error = dlm_recover_locks(ls);
|
||||
if (error) {
|
||||
log_error(ls, "recover_locks failed %d", error);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
error = dlm_recover_locks_wait(ls);
|
||||
if (error) {
|
||||
log_error(ls, "recover_locks_wait failed %d", error);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/*
|
||||
* Finalize state in master rsb's now that all locks can be
|
||||
* checked. This includes conversion resolution and lvb
|
||||
* settings.
|
||||
*/
|
||||
|
||||
dlm_recover_rsbs(ls);
|
||||
}
|
||||
|
||||
dlm_release_root_list(ls);
|
||||
|
||||
dlm_set_recover_status(ls, DLM_RS_DONE);
|
||||
error = dlm_recover_done_wait(ls);
|
||||
if (error) {
|
||||
log_error(ls, "recover_done_wait failed %d", error);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
dlm_clear_members_gone(ls);
|
||||
|
||||
error = enable_locking(ls, rv->seq);
|
||||
if (error) {
|
||||
log_error(ls, "enable_locking failed %d", error);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
error = dlm_process_requestqueue(ls);
|
||||
if (error) {
|
||||
log_error(ls, "process_requestqueue failed %d", error);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
error = dlm_recover_waiters_post(ls);
|
||||
if (error) {
|
||||
log_error(ls, "recover_waiters_post failed %d", error);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
dlm_grant_after_purge(ls);
|
||||
|
||||
dlm_astd_wake();
|
||||
|
||||
log_debug(ls, "recover %llx done: %u ms", rv->seq,
|
||||
jiffies_to_msecs(jiffies - start));
|
||||
mutex_unlock(&ls->ls_recoverd_active);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
dlm_release_root_list(ls);
|
||||
log_debug(ls, "recover %llx error %d", rv->seq, error);
|
||||
mutex_unlock(&ls->ls_recoverd_active);
|
||||
return error;
|
||||
}
|
||||
|
||||
static void do_ls_recovery(struct dlm_ls *ls)
|
||||
{
|
||||
struct dlm_recover *rv = NULL;
|
||||
|
||||
spin_lock(&ls->ls_recover_lock);
|
||||
rv = ls->ls_recover_args;
|
||||
ls->ls_recover_args = NULL;
|
||||
clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
|
||||
spin_unlock(&ls->ls_recover_lock);
|
||||
|
||||
if (rv) {
|
||||
ls_recover(ls, rv);
|
||||
kfree(rv->nodeids);
|
||||
kfree(rv);
|
||||
}
|
||||
}
|
||||
|
||||
static int dlm_recoverd(void *arg)
|
||||
{
|
||||
struct dlm_ls *ls;
|
||||
|
||||
ls = dlm_find_lockspace_local(arg);
|
||||
if (!ls) {
|
||||
log_print("dlm_recoverd: no lockspace %p", arg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (!test_bit(LSFL_WORK, &ls->ls_flags))
|
||||
schedule();
|
||||
set_current_state(TASK_RUNNING);
|
||||
|
||||
if (test_and_clear_bit(LSFL_WORK, &ls->ls_flags))
|
||||
do_ls_recovery(ls);
|
||||
}
|
||||
|
||||
dlm_put_lockspace(ls);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dlm_recoverd_kick(struct dlm_ls *ls)
|
||||
{
|
||||
set_bit(LSFL_WORK, &ls->ls_flags);
|
||||
wake_up_process(ls->ls_recoverd_task);
|
||||
}
|
||||
|
||||
int dlm_recoverd_start(struct dlm_ls *ls)
|
||||
{
|
||||
struct task_struct *p;
|
||||
int error = 0;
|
||||
|
||||
p = kthread_run(dlm_recoverd, ls, "dlm_recoverd");
|
||||
if (IS_ERR(p))
|
||||
error = PTR_ERR(p);
|
||||
else
|
||||
ls->ls_recoverd_task = p;
|
||||
return error;
|
||||
}
|
||||
|
||||
void dlm_recoverd_stop(struct dlm_ls *ls)
|
||||
{
|
||||
kthread_stop(ls->ls_recoverd_task);
|
||||
}
|
||||
|
||||
void dlm_recoverd_suspend(struct dlm_ls *ls)
|
||||
{
|
||||
wake_up(&ls->ls_wait_general);
|
||||
mutex_lock(&ls->ls_recoverd_active);
|
||||
}
|
||||
|
||||
void dlm_recoverd_resume(struct dlm_ls *ls)
|
||||
{
|
||||
mutex_unlock(&ls->ls_recoverd_active);
|
||||
}
|
||||
|
24
fs/dlm/recoverd.h
Normal file
24
fs/dlm/recoverd.h
Normal file
|
@ -0,0 +1,24 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#ifndef __RECOVERD_DOT_H__
|
||||
#define __RECOVERD_DOT_H__
|
||||
|
||||
void dlm_recoverd_kick(struct dlm_ls *ls);
|
||||
void dlm_recoverd_stop(struct dlm_ls *ls);
|
||||
int dlm_recoverd_start(struct dlm_ls *ls);
|
||||
void dlm_recoverd_suspend(struct dlm_ls *ls);
|
||||
void dlm_recoverd_resume(struct dlm_ls *ls);
|
||||
|
||||
#endif /* __RECOVERD_DOT_H__ */
|
||||
|
184
fs/dlm/requestqueue.c
Normal file
184
fs/dlm/requestqueue.c
Normal file
|
@ -0,0 +1,184 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#include "dlm_internal.h"
|
||||
#include "member.h"
|
||||
#include "lock.h"
|
||||
#include "dir.h"
|
||||
#include "config.h"
|
||||
#include "requestqueue.h"
|
||||
|
||||
struct rq_entry {
|
||||
struct list_head list;
|
||||
int nodeid;
|
||||
char request[1];
|
||||
};
|
||||
|
||||
/*
|
||||
* Requests received while the lockspace is in recovery get added to the
|
||||
* request queue and processed when recovery is complete. This happens when
|
||||
* the lockspace is suspended on some nodes before it is on others, or the
|
||||
* lockspace is enabled on some while still suspended on others.
|
||||
*/
|
||||
|
||||
void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd)
|
||||
{
|
||||
struct rq_entry *e;
|
||||
int length = hd->h_length;
|
||||
|
||||
if (dlm_is_removed(ls, nodeid))
|
||||
return;
|
||||
|
||||
e = kmalloc(sizeof(struct rq_entry) + length, GFP_KERNEL);
|
||||
if (!e) {
|
||||
log_print("dlm_add_requestqueue: out of memory\n");
|
||||
return;
|
||||
}
|
||||
|
||||
e->nodeid = nodeid;
|
||||
memcpy(e->request, hd, length);
|
||||
|
||||
mutex_lock(&ls->ls_requestqueue_mutex);
|
||||
list_add_tail(&e->list, &ls->ls_requestqueue);
|
||||
mutex_unlock(&ls->ls_requestqueue_mutex);
|
||||
}
|
||||
|
||||
int dlm_process_requestqueue(struct dlm_ls *ls)
|
||||
{
|
||||
struct rq_entry *e;
|
||||
struct dlm_header *hd;
|
||||
int error = 0;
|
||||
|
||||
mutex_lock(&ls->ls_requestqueue_mutex);
|
||||
|
||||
for (;;) {
|
||||
if (list_empty(&ls->ls_requestqueue)) {
|
||||
mutex_unlock(&ls->ls_requestqueue_mutex);
|
||||
error = 0;
|
||||
break;
|
||||
}
|
||||
e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list);
|
||||
mutex_unlock(&ls->ls_requestqueue_mutex);
|
||||
|
||||
hd = (struct dlm_header *) e->request;
|
||||
error = dlm_receive_message(hd, e->nodeid, 1);
|
||||
|
||||
if (error == -EINTR) {
|
||||
/* entry is left on requestqueue */
|
||||
log_debug(ls, "process_requestqueue abort eintr");
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_lock(&ls->ls_requestqueue_mutex);
|
||||
list_del(&e->list);
|
||||
kfree(e);
|
||||
|
||||
if (dlm_locking_stopped(ls)) {
|
||||
log_debug(ls, "process_requestqueue abort running");
|
||||
mutex_unlock(&ls->ls_requestqueue_mutex);
|
||||
error = -EINTR;
|
||||
break;
|
||||
}
|
||||
schedule();
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* After recovery is done, locking is resumed and dlm_recoverd takes all the
|
||||
* saved requests and processes them as they would have been by dlm_recvd. At
|
||||
* the same time, dlm_recvd will start receiving new requests from remote
|
||||
* nodes. We want to delay dlm_recvd processing new requests until
|
||||
* dlm_recoverd has finished processing the old saved requests.
|
||||
*/
|
||||
|
||||
void dlm_wait_requestqueue(struct dlm_ls *ls)
|
||||
{
|
||||
for (;;) {
|
||||
mutex_lock(&ls->ls_requestqueue_mutex);
|
||||
if (list_empty(&ls->ls_requestqueue))
|
||||
break;
|
||||
if (dlm_locking_stopped(ls))
|
||||
break;
|
||||
mutex_unlock(&ls->ls_requestqueue_mutex);
|
||||
schedule();
|
||||
}
|
||||
mutex_unlock(&ls->ls_requestqueue_mutex);
|
||||
}
|
||||
|
||||
static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid)
|
||||
{
|
||||
uint32_t type = ms->m_type;
|
||||
|
||||
if (dlm_is_removed(ls, nodeid))
|
||||
return 1;
|
||||
|
||||
/* directory operations are always purged because the directory is
|
||||
always rebuilt during recovery and the lookups resent */
|
||||
|
||||
if (type == DLM_MSG_REMOVE ||
|
||||
type == DLM_MSG_LOOKUP ||
|
||||
type == DLM_MSG_LOOKUP_REPLY)
|
||||
return 1;
|
||||
|
||||
if (!dlm_no_directory(ls))
|
||||
return 0;
|
||||
|
||||
/* with no directory, the master is likely to change as a part of
|
||||
recovery; requests to/from the defunct master need to be purged */
|
||||
|
||||
switch (type) {
|
||||
case DLM_MSG_REQUEST:
|
||||
case DLM_MSG_CONVERT:
|
||||
case DLM_MSG_UNLOCK:
|
||||
case DLM_MSG_CANCEL:
|
||||
/* we're no longer the master of this resource, the sender
|
||||
will resend to the new master (see waiter_needs_recovery) */
|
||||
|
||||
if (dlm_hash2nodeid(ls, ms->m_hash) != dlm_our_nodeid())
|
||||
return 1;
|
||||
break;
|
||||
|
||||
case DLM_MSG_REQUEST_REPLY:
|
||||
case DLM_MSG_CONVERT_REPLY:
|
||||
case DLM_MSG_UNLOCK_REPLY:
|
||||
case DLM_MSG_CANCEL_REPLY:
|
||||
case DLM_MSG_GRANT:
|
||||
/* this reply is from the former master of the resource,
|
||||
we'll resend to the new master if needed */
|
||||
|
||||
if (dlm_hash2nodeid(ls, ms->m_hash) != nodeid)
|
||||
return 1;
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dlm_purge_requestqueue(struct dlm_ls *ls)
|
||||
{
|
||||
struct dlm_message *ms;
|
||||
struct rq_entry *e, *safe;
|
||||
|
||||
mutex_lock(&ls->ls_requestqueue_mutex);
|
||||
list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) {
|
||||
ms = (struct dlm_message *) e->request;
|
||||
|
||||
if (purge_request(ls, ms, e->nodeid)) {
|
||||
list_del(&e->list);
|
||||
kfree(e);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&ls->ls_requestqueue_mutex);
|
||||
}
|
||||
|
22
fs/dlm/requestqueue.h
Normal file
22
fs/dlm/requestqueue.h
Normal file
|
@ -0,0 +1,22 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#ifndef __REQUESTQUEUE_DOT_H__
|
||||
#define __REQUESTQUEUE_DOT_H__
|
||||
|
||||
void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd);
|
||||
int dlm_process_requestqueue(struct dlm_ls *ls);
|
||||
void dlm_wait_requestqueue(struct dlm_ls *ls);
|
||||
void dlm_purge_requestqueue(struct dlm_ls *ls);
|
||||
|
||||
#endif
|
||||
|
788
fs/dlm/user.c
Normal file
788
fs/dlm/user.c
Normal file
|
@ -0,0 +1,788 @@
|
|||
/*
|
||||
* Copyright (C) 2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License v.2.
|
||||
*/
|
||||
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/dlm.h>
|
||||
#include <linux/dlm_device.h>
|
||||
|
||||
#include "dlm_internal.h"
|
||||
#include "lockspace.h"
|
||||
#include "lock.h"
|
||||
#include "lvb_table.h"
|
||||
|
||||
static const char *name_prefix="dlm";
|
||||
static struct miscdevice ctl_device;
|
||||
static struct file_operations device_fops;
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
struct dlm_lock_params32 {
|
||||
__u8 mode;
|
||||
__u8 namelen;
|
||||
__u16 flags;
|
||||
__u32 lkid;
|
||||
__u32 parent;
|
||||
|
||||
__u32 castparam;
|
||||
__u32 castaddr;
|
||||
__u32 bastparam;
|
||||
__u32 bastaddr;
|
||||
__u32 lksb;
|
||||
|
||||
char lvb[DLM_USER_LVB_LEN];
|
||||
char name[0];
|
||||
};
|
||||
|
||||
struct dlm_write_request32 {
|
||||
__u32 version[3];
|
||||
__u8 cmd;
|
||||
__u8 is64bit;
|
||||
__u8 unused[2];
|
||||
|
||||
union {
|
||||
struct dlm_lock_params32 lock;
|
||||
struct dlm_lspace_params lspace;
|
||||
} i;
|
||||
};
|
||||
|
||||
struct dlm_lksb32 {
|
||||
__u32 sb_status;
|
||||
__u32 sb_lkid;
|
||||
__u8 sb_flags;
|
||||
__u32 sb_lvbptr;
|
||||
};
|
||||
|
||||
struct dlm_lock_result32 {
|
||||
__u32 length;
|
||||
__u32 user_astaddr;
|
||||
__u32 user_astparam;
|
||||
__u32 user_lksb;
|
||||
struct dlm_lksb32 lksb;
|
||||
__u8 bast_mode;
|
||||
__u8 unused[3];
|
||||
/* Offsets may be zero if no data is present */
|
||||
__u32 lvb_offset;
|
||||
};
|
||||
|
||||
static void compat_input(struct dlm_write_request *kb,
|
||||
struct dlm_write_request32 *kb32)
|
||||
{
|
||||
kb->version[0] = kb32->version[0];
|
||||
kb->version[1] = kb32->version[1];
|
||||
kb->version[2] = kb32->version[2];
|
||||
|
||||
kb->cmd = kb32->cmd;
|
||||
kb->is64bit = kb32->is64bit;
|
||||
if (kb->cmd == DLM_USER_CREATE_LOCKSPACE ||
|
||||
kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
|
||||
kb->i.lspace.flags = kb32->i.lspace.flags;
|
||||
kb->i.lspace.minor = kb32->i.lspace.minor;
|
||||
strcpy(kb->i.lspace.name, kb32->i.lspace.name);
|
||||
} else {
|
||||
kb->i.lock.mode = kb32->i.lock.mode;
|
||||
kb->i.lock.namelen = kb32->i.lock.namelen;
|
||||
kb->i.lock.flags = kb32->i.lock.flags;
|
||||
kb->i.lock.lkid = kb32->i.lock.lkid;
|
||||
kb->i.lock.parent = kb32->i.lock.parent;
|
||||
kb->i.lock.castparam = (void *)(long)kb32->i.lock.castparam;
|
||||
kb->i.lock.castaddr = (void *)(long)kb32->i.lock.castaddr;
|
||||
kb->i.lock.bastparam = (void *)(long)kb32->i.lock.bastparam;
|
||||
kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr;
|
||||
kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb;
|
||||
memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
|
||||
memcpy(kb->i.lock.name, kb32->i.lock.name, kb->i.lock.namelen);
|
||||
}
|
||||
}
|
||||
|
||||
static void compat_output(struct dlm_lock_result *res,
|
||||
struct dlm_lock_result32 *res32)
|
||||
{
|
||||
res32->length = res->length - (sizeof(struct dlm_lock_result) -
|
||||
sizeof(struct dlm_lock_result32));
|
||||
res32->user_astaddr = (__u32)(long)res->user_astaddr;
|
||||
res32->user_astparam = (__u32)(long)res->user_astparam;
|
||||
res32->user_lksb = (__u32)(long)res->user_lksb;
|
||||
res32->bast_mode = res->bast_mode;
|
||||
|
||||
res32->lvb_offset = res->lvb_offset;
|
||||
res32->length = res->length;
|
||||
|
||||
res32->lksb.sb_status = res->lksb.sb_status;
|
||||
res32->lksb.sb_flags = res->lksb.sb_flags;
|
||||
res32->lksb.sb_lkid = res->lksb.sb_lkid;
|
||||
res32->lksb.sb_lvbptr = (__u32)(long)res->lksb.sb_lvbptr;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
void dlm_user_add_ast(struct dlm_lkb *lkb, int type)
|
||||
{
|
||||
struct dlm_ls *ls;
|
||||
struct dlm_user_args *ua;
|
||||
struct dlm_user_proc *proc;
|
||||
int remove_ownqueue = 0;
|
||||
|
||||
/* dlm_clear_proc_locks() sets ORPHAN/DEAD flag on each
|
||||
lkb before dealing with it. We need to check this
|
||||
flag before taking ls_clear_proc_locks mutex because if
|
||||
it's set, dlm_clear_proc_locks() holds the mutex. */
|
||||
|
||||
if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD)) {
|
||||
/* log_print("user_add_ast skip1 %x", lkb->lkb_flags); */
|
||||
return;
|
||||
}
|
||||
|
||||
ls = lkb->lkb_resource->res_ls;
|
||||
mutex_lock(&ls->ls_clear_proc_locks);
|
||||
|
||||
/* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
|
||||
can't be delivered. For ORPHAN's, dlm_clear_proc_locks() freed
|
||||
lkb->ua so we can't try to use it. */
|
||||
|
||||
if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD)) {
|
||||
/* log_print("user_add_ast skip2 %x", lkb->lkb_flags); */
|
||||
goto out;
|
||||
}
|
||||
|
||||
DLM_ASSERT(lkb->lkb_astparam, dlm_print_lkb(lkb););
|
||||
ua = (struct dlm_user_args *)lkb->lkb_astparam;
|
||||
proc = ua->proc;
|
||||
|
||||
if (type == AST_BAST && ua->bastaddr == NULL)
|
||||
goto out;
|
||||
|
||||
spin_lock(&proc->asts_spin);
|
||||
if (!(lkb->lkb_ast_type & (AST_COMP | AST_BAST))) {
|
||||
kref_get(&lkb->lkb_ref);
|
||||
list_add_tail(&lkb->lkb_astqueue, &proc->asts);
|
||||
lkb->lkb_ast_type |= type;
|
||||
wake_up_interruptible(&proc->wait);
|
||||
}
|
||||
|
||||
/* noqueue requests that fail may need to be removed from the
|
||||
proc's locks list, there should be a better way of detecting
|
||||
this situation than checking all these things... */
|
||||
|
||||
if (type == AST_COMP && lkb->lkb_grmode == DLM_LOCK_IV &&
|
||||
ua->lksb.sb_status == -EAGAIN && !list_empty(&lkb->lkb_ownqueue))
|
||||
remove_ownqueue = 1;
|
||||
|
||||
/* We want to copy the lvb to userspace when the completion
|
||||
ast is read if the status is 0, the lock has an lvb and
|
||||
lvb_ops says we should. We could probably have set_lvb_lock()
|
||||
set update_user_lvb instead and not need old_mode */
|
||||
|
||||
if ((lkb->lkb_ast_type & AST_COMP) &&
|
||||
(lkb->lkb_lksb->sb_status == 0) &&
|
||||
lkb->lkb_lksb->sb_lvbptr &&
|
||||
dlm_lvb_operations[ua->old_mode + 1][lkb->lkb_grmode + 1])
|
||||
ua->update_user_lvb = 1;
|
||||
else
|
||||
ua->update_user_lvb = 0;
|
||||
|
||||
spin_unlock(&proc->asts_spin);
|
||||
|
||||
if (remove_ownqueue) {
|
||||
spin_lock(&ua->proc->locks_spin);
|
||||
list_del_init(&lkb->lkb_ownqueue);
|
||||
spin_unlock(&ua->proc->locks_spin);
|
||||
dlm_put_lkb(lkb);
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&ls->ls_clear_proc_locks);
|
||||
}
|
||||
|
||||
static int device_user_lock(struct dlm_user_proc *proc,
|
||||
struct dlm_lock_params *params)
|
||||
{
|
||||
struct dlm_ls *ls;
|
||||
struct dlm_user_args *ua;
|
||||
int error = -ENOMEM;
|
||||
|
||||
ls = dlm_find_lockspace_local(proc->lockspace);
|
||||
if (!ls)
|
||||
return -ENOENT;
|
||||
|
||||
if (!params->castaddr || !params->lksb) {
|
||||
error = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ua = kzalloc(sizeof(struct dlm_user_args), GFP_KERNEL);
|
||||
if (!ua)
|
||||
goto out;
|
||||
ua->proc = proc;
|
||||
ua->user_lksb = params->lksb;
|
||||
ua->castparam = params->castparam;
|
||||
ua->castaddr = params->castaddr;
|
||||
ua->bastparam = params->bastparam;
|
||||
ua->bastaddr = params->bastaddr;
|
||||
|
||||
if (params->flags & DLM_LKF_CONVERT)
|
||||
error = dlm_user_convert(ls, ua,
|
||||
params->mode, params->flags,
|
||||
params->lkid, params->lvb);
|
||||
else {
|
||||
error = dlm_user_request(ls, ua,
|
||||
params->mode, params->flags,
|
||||
params->name, params->namelen,
|
||||
params->parent);
|
||||
if (!error)
|
||||
error = ua->lksb.sb_lkid;
|
||||
}
|
||||
out:
|
||||
dlm_put_lockspace(ls);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int device_user_unlock(struct dlm_user_proc *proc,
|
||||
struct dlm_lock_params *params)
|
||||
{
|
||||
struct dlm_ls *ls;
|
||||
struct dlm_user_args *ua;
|
||||
int error = -ENOMEM;
|
||||
|
||||
ls = dlm_find_lockspace_local(proc->lockspace);
|
||||
if (!ls)
|
||||
return -ENOENT;
|
||||
|
||||
ua = kzalloc(sizeof(struct dlm_user_args), GFP_KERNEL);
|
||||
if (!ua)
|
||||
goto out;
|
||||
ua->proc = proc;
|
||||
ua->user_lksb = params->lksb;
|
||||
ua->castparam = params->castparam;
|
||||
ua->castaddr = params->castaddr;
|
||||
|
||||
if (params->flags & DLM_LKF_CANCEL)
|
||||
error = dlm_user_cancel(ls, ua, params->flags, params->lkid);
|
||||
else
|
||||
error = dlm_user_unlock(ls, ua, params->flags, params->lkid,
|
||||
params->lvb);
|
||||
out:
|
||||
dlm_put_lockspace(ls);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int device_create_lockspace(struct dlm_lspace_params *params)
|
||||
{
|
||||
dlm_lockspace_t *lockspace;
|
||||
struct dlm_ls *ls;
|
||||
int error, len;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
error = dlm_new_lockspace(params->name, strlen(params->name),
|
||||
&lockspace, 0, DLM_USER_LVB_LEN);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
ls = dlm_find_lockspace_local(lockspace);
|
||||
if (!ls)
|
||||
return -ENOENT;
|
||||
|
||||
error = -ENOMEM;
|
||||
len = strlen(params->name) + strlen(name_prefix) + 2;
|
||||
ls->ls_device.name = kzalloc(len, GFP_KERNEL);
|
||||
if (!ls->ls_device.name)
|
||||
goto fail;
|
||||
snprintf((char *)ls->ls_device.name, len, "%s_%s", name_prefix,
|
||||
params->name);
|
||||
ls->ls_device.fops = &device_fops;
|
||||
ls->ls_device.minor = MISC_DYNAMIC_MINOR;
|
||||
|
||||
error = misc_register(&ls->ls_device);
|
||||
if (error) {
|
||||
kfree(ls->ls_device.name);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
error = ls->ls_device.minor;
|
||||
dlm_put_lockspace(ls);
|
||||
return error;
|
||||
|
||||
fail:
|
||||
dlm_put_lockspace(ls);
|
||||
dlm_release_lockspace(lockspace, 0);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int device_remove_lockspace(struct dlm_lspace_params *params)
|
||||
{
|
||||
dlm_lockspace_t *lockspace;
|
||||
struct dlm_ls *ls;
|
||||
int error, force = 0;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
ls = dlm_find_lockspace_device(params->minor);
|
||||
if (!ls)
|
||||
return -ENOENT;
|
||||
|
||||
error = misc_deregister(&ls->ls_device);
|
||||
if (error) {
|
||||
dlm_put_lockspace(ls);
|
||||
goto out;
|
||||
}
|
||||
kfree(ls->ls_device.name);
|
||||
|
||||
if (params->flags & DLM_USER_LSFLG_FORCEFREE)
|
||||
force = 2;
|
||||
|
||||
lockspace = ls->ls_local_handle;
|
||||
|
||||
/* dlm_release_lockspace waits for references to go to zero,
|
||||
so all processes will need to close their device for the ls
|
||||
before the release will procede */
|
||||
|
||||
dlm_put_lockspace(ls);
|
||||
error = dlm_release_lockspace(lockspace, force);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Check the user's version matches ours */
|
||||
static int check_version(struct dlm_write_request *req)
|
||||
{
|
||||
if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
|
||||
(req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
|
||||
req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
|
||||
|
||||
printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
|
||||
"user (%d.%d.%d) kernel (%d.%d.%d)\n",
|
||||
current->comm,
|
||||
current->pid,
|
||||
req->version[0],
|
||||
req->version[1],
|
||||
req->version[2],
|
||||
DLM_DEVICE_VERSION_MAJOR,
|
||||
DLM_DEVICE_VERSION_MINOR,
|
||||
DLM_DEVICE_VERSION_PATCH);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* device_write
|
||||
*
|
||||
* device_user_lock
|
||||
* dlm_user_request -> request_lock
|
||||
* dlm_user_convert -> convert_lock
|
||||
*
|
||||
* device_user_unlock
|
||||
* dlm_user_unlock -> unlock_lock
|
||||
* dlm_user_cancel -> cancel_lock
|
||||
*
|
||||
* device_create_lockspace
|
||||
* dlm_new_lockspace
|
||||
*
|
||||
* device_remove_lockspace
|
||||
* dlm_release_lockspace
|
||||
*/
|
||||
|
||||
/* a write to a lockspace device is a lock or unlock request, a write
|
||||
to the control device is to create/remove a lockspace */
|
||||
|
||||
static ssize_t device_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct dlm_user_proc *proc = file->private_data;
|
||||
struct dlm_write_request *kbuf;
|
||||
sigset_t tmpsig, allsigs;
|
||||
int error;
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (count < sizeof(struct dlm_write_request32))
|
||||
#else
|
||||
if (count < sizeof(struct dlm_write_request))
|
||||
#endif
|
||||
return -EINVAL;
|
||||
|
||||
kbuf = kmalloc(count, GFP_KERNEL);
|
||||
if (!kbuf)
|
||||
return -ENOMEM;
|
||||
|
||||
if (copy_from_user(kbuf, buf, count)) {
|
||||
error = -EFAULT;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (check_version(kbuf)) {
|
||||
error = -EBADE;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (!kbuf->is64bit) {
|
||||
struct dlm_write_request32 *k32buf;
|
||||
k32buf = (struct dlm_write_request32 *)kbuf;
|
||||
kbuf = kmalloc(count + (sizeof(struct dlm_write_request) -
|
||||
sizeof(struct dlm_write_request32)), GFP_KERNEL);
|
||||
if (!kbuf)
|
||||
return -ENOMEM;
|
||||
|
||||
if (proc)
|
||||
set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
|
||||
compat_input(kbuf, k32buf);
|
||||
kfree(k32buf);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* do we really need this? can a write happen after a close? */
|
||||
if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
|
||||
test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
|
||||
return -EINVAL;
|
||||
|
||||
sigfillset(&allsigs);
|
||||
sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
|
||||
|
||||
error = -EINVAL;
|
||||
|
||||
switch (kbuf->cmd)
|
||||
{
|
||||
case DLM_USER_LOCK:
|
||||
if (!proc) {
|
||||
log_print("no locking on control device");
|
||||
goto out_sig;
|
||||
}
|
||||
error = device_user_lock(proc, &kbuf->i.lock);
|
||||
break;
|
||||
|
||||
case DLM_USER_UNLOCK:
|
||||
if (!proc) {
|
||||
log_print("no locking on control device");
|
||||
goto out_sig;
|
||||
}
|
||||
error = device_user_unlock(proc, &kbuf->i.lock);
|
||||
break;
|
||||
|
||||
case DLM_USER_CREATE_LOCKSPACE:
|
||||
if (proc) {
|
||||
log_print("create/remove only on control device");
|
||||
goto out_sig;
|
||||
}
|
||||
error = device_create_lockspace(&kbuf->i.lspace);
|
||||
break;
|
||||
|
||||
case DLM_USER_REMOVE_LOCKSPACE:
|
||||
if (proc) {
|
||||
log_print("create/remove only on control device");
|
||||
goto out_sig;
|
||||
}
|
||||
error = device_remove_lockspace(&kbuf->i.lspace);
|
||||
break;
|
||||
|
||||
default:
|
||||
log_print("Unknown command passed to DLM device : %d\n",
|
||||
kbuf->cmd);
|
||||
}
|
||||
|
||||
out_sig:
|
||||
sigprocmask(SIG_SETMASK, &tmpsig, NULL);
|
||||
recalc_sigpending();
|
||||
out_free:
|
||||
kfree(kbuf);
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Every process that opens the lockspace device has its own "proc" structure
|
||||
hanging off the open file that's used to keep track of locks owned by the
|
||||
process and asts that need to be delivered to the process. */
|
||||
|
||||
static int device_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct dlm_user_proc *proc;
|
||||
struct dlm_ls *ls;
|
||||
|
||||
ls = dlm_find_lockspace_device(iminor(inode));
|
||||
if (!ls)
|
||||
return -ENOENT;
|
||||
|
||||
proc = kzalloc(sizeof(struct dlm_user_proc), GFP_KERNEL);
|
||||
if (!proc) {
|
||||
dlm_put_lockspace(ls);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
proc->lockspace = ls->ls_local_handle;
|
||||
INIT_LIST_HEAD(&proc->asts);
|
||||
INIT_LIST_HEAD(&proc->locks);
|
||||
spin_lock_init(&proc->asts_spin);
|
||||
spin_lock_init(&proc->locks_spin);
|
||||
init_waitqueue_head(&proc->wait);
|
||||
file->private_data = proc;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int device_close(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct dlm_user_proc *proc = file->private_data;
|
||||
struct dlm_ls *ls;
|
||||
sigset_t tmpsig, allsigs;
|
||||
|
||||
ls = dlm_find_lockspace_local(proc->lockspace);
|
||||
if (!ls)
|
||||
return -ENOENT;
|
||||
|
||||
sigfillset(&allsigs);
|
||||
sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
|
||||
|
||||
set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags);
|
||||
|
||||
dlm_clear_proc_locks(ls, proc);
|
||||
|
||||
/* at this point no more lkb's should exist for this lockspace,
|
||||
so there's no chance of dlm_user_add_ast() being called and
|
||||
looking for lkb->ua->proc */
|
||||
|
||||
kfree(proc);
|
||||
file->private_data = NULL;
|
||||
|
||||
dlm_put_lockspace(ls);
|
||||
dlm_put_lockspace(ls); /* for the find in device_open() */
|
||||
|
||||
/* FIXME: AUTOFREE: if this ls is no longer used do
|
||||
device_remove_lockspace() */
|
||||
|
||||
sigprocmask(SIG_SETMASK, &tmpsig, NULL);
|
||||
recalc_sigpending();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int copy_result_to_user(struct dlm_user_args *ua, int compat, int type,
|
||||
int bmode, char __user *buf, size_t count)
|
||||
{
|
||||
#ifdef CONFIG_COMPAT
|
||||
struct dlm_lock_result32 result32;
|
||||
#endif
|
||||
struct dlm_lock_result result;
|
||||
void *resultptr;
|
||||
int error=0;
|
||||
int len;
|
||||
int struct_len;
|
||||
|
||||
memset(&result, 0, sizeof(struct dlm_lock_result));
|
||||
memcpy(&result.lksb, &ua->lksb, sizeof(struct dlm_lksb));
|
||||
result.user_lksb = ua->user_lksb;
|
||||
|
||||
/* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
|
||||
in a conversion unless the conversion is successful. See code
|
||||
in dlm_user_convert() for updating ua from ua_tmp. OpenVMS, though,
|
||||
notes that a new blocking AST address and parameter are set even if
|
||||
the conversion fails, so maybe we should just do that. */
|
||||
|
||||
if (type == AST_BAST) {
|
||||
result.user_astaddr = ua->bastaddr;
|
||||
result.user_astparam = ua->bastparam;
|
||||
result.bast_mode = bmode;
|
||||
} else {
|
||||
result.user_astaddr = ua->castaddr;
|
||||
result.user_astparam = ua->castparam;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (compat)
|
||||
len = sizeof(struct dlm_lock_result32);
|
||||
else
|
||||
#endif
|
||||
len = sizeof(struct dlm_lock_result);
|
||||
struct_len = len;
|
||||
|
||||
/* copy lvb to userspace if there is one, it's been updated, and
|
||||
the user buffer has space for it */
|
||||
|
||||
if (ua->update_user_lvb && ua->lksb.sb_lvbptr &&
|
||||
count >= len + DLM_USER_LVB_LEN) {
|
||||
if (copy_to_user(buf+len, ua->lksb.sb_lvbptr,
|
||||
DLM_USER_LVB_LEN)) {
|
||||
error = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
result.lvb_offset = len;
|
||||
len += DLM_USER_LVB_LEN;
|
||||
}
|
||||
|
||||
result.length = len;
|
||||
resultptr = &result;
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (compat) {
|
||||
compat_output(&result, &result32);
|
||||
resultptr = &result32;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (copy_to_user(buf, resultptr, struct_len))
|
||||
error = -EFAULT;
|
||||
else
|
||||
error = len;
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
/* a read returns a single ast described in a struct dlm_lock_result */
|
||||
|
||||
static ssize_t device_read(struct file *file, char __user *buf, size_t count,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct dlm_user_proc *proc = file->private_data;
|
||||
struct dlm_lkb *lkb;
|
||||
struct dlm_user_args *ua;
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
int error, type=0, bmode=0, removed = 0;
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (count < sizeof(struct dlm_lock_result32))
|
||||
#else
|
||||
if (count < sizeof(struct dlm_lock_result))
|
||||
#endif
|
||||
return -EINVAL;
|
||||
|
||||
/* do we really need this? can a read happen after a close? */
|
||||
if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&proc->asts_spin);
|
||||
if (list_empty(&proc->asts)) {
|
||||
if (file->f_flags & O_NONBLOCK) {
|
||||
spin_unlock(&proc->asts_spin);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
add_wait_queue(&proc->wait, &wait);
|
||||
|
||||
repeat:
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (list_empty(&proc->asts) && !signal_pending(current)) {
|
||||
spin_unlock(&proc->asts_spin);
|
||||
schedule();
|
||||
spin_lock(&proc->asts_spin);
|
||||
goto repeat;
|
||||
}
|
||||
set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(&proc->wait, &wait);
|
||||
|
||||
if (signal_pending(current)) {
|
||||
spin_unlock(&proc->asts_spin);
|
||||
return -ERESTARTSYS;
|
||||
}
|
||||
}
|
||||
|
||||
if (list_empty(&proc->asts)) {
|
||||
spin_unlock(&proc->asts_spin);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
/* there may be both completion and blocking asts to return for
|
||||
the lkb, don't remove lkb from asts list unless no asts remain */
|
||||
|
||||
lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_astqueue);
|
||||
|
||||
if (lkb->lkb_ast_type & AST_COMP) {
|
||||
lkb->lkb_ast_type &= ~AST_COMP;
|
||||
type = AST_COMP;
|
||||
} else if (lkb->lkb_ast_type & AST_BAST) {
|
||||
lkb->lkb_ast_type &= ~AST_BAST;
|
||||
type = AST_BAST;
|
||||
bmode = lkb->lkb_bastmode;
|
||||
}
|
||||
|
||||
if (!lkb->lkb_ast_type) {
|
||||
list_del(&lkb->lkb_astqueue);
|
||||
removed = 1;
|
||||
}
|
||||
spin_unlock(&proc->asts_spin);
|
||||
|
||||
ua = (struct dlm_user_args *)lkb->lkb_astparam;
|
||||
error = copy_result_to_user(ua,
|
||||
test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
|
||||
type, bmode, buf, count);
|
||||
|
||||
/* removes reference for the proc->asts lists added by
|
||||
dlm_user_add_ast() and may result in the lkb being freed */
|
||||
if (removed)
|
||||
dlm_put_lkb(lkb);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
static unsigned int device_poll(struct file *file, poll_table *wait)
|
||||
{
|
||||
struct dlm_user_proc *proc = file->private_data;
|
||||
|
||||
poll_wait(file, &proc->wait, wait);
|
||||
|
||||
spin_lock(&proc->asts_spin);
|
||||
if (!list_empty(&proc->asts)) {
|
||||
spin_unlock(&proc->asts_spin);
|
||||
return POLLIN | POLLRDNORM;
|
||||
}
|
||||
spin_unlock(&proc->asts_spin);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ctl_device_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
file->private_data = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ctl_device_close(struct inode *inode, struct file *file)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct file_operations device_fops = {
|
||||
.open = device_open,
|
||||
.release = device_close,
|
||||
.read = device_read,
|
||||
.write = device_write,
|
||||
.poll = device_poll,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static struct file_operations ctl_device_fops = {
|
||||
.open = ctl_device_open,
|
||||
.release = ctl_device_close,
|
||||
.write = device_write,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
int dlm_user_init(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
ctl_device.name = "dlm-control";
|
||||
ctl_device.fops = &ctl_device_fops;
|
||||
ctl_device.minor = MISC_DYNAMIC_MINOR;
|
||||
|
||||
error = misc_register(&ctl_device);
|
||||
if (error)
|
||||
log_print("misc_register failed for control device");
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
void dlm_user_exit(void)
|
||||
{
|
||||
misc_deregister(&ctl_device);
|
||||
}
|
||||
|
16
fs/dlm/user.h
Normal file
16
fs/dlm/user.h
Normal file
|
@ -0,0 +1,16 @@
|
|||
/*
|
||||
* Copyright (C) 2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License v.2.
|
||||
*/
|
||||
|
||||
#ifndef __USER_DOT_H__
|
||||
#define __USER_DOT_H__
|
||||
|
||||
void dlm_user_add_ast(struct dlm_lkb *lkb, int type);
|
||||
int dlm_user_init(void);
|
||||
void dlm_user_exit(void);
|
||||
|
||||
#endif
|
161
fs/dlm/util.c
Normal file
161
fs/dlm/util.c
Normal file
|
@ -0,0 +1,161 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#include "dlm_internal.h"
|
||||
#include "rcom.h"
|
||||
#include "util.h"
|
||||
|
||||
static void header_out(struct dlm_header *hd)
|
||||
{
|
||||
hd->h_version = cpu_to_le32(hd->h_version);
|
||||
hd->h_lockspace = cpu_to_le32(hd->h_lockspace);
|
||||
hd->h_nodeid = cpu_to_le32(hd->h_nodeid);
|
||||
hd->h_length = cpu_to_le16(hd->h_length);
|
||||
}
|
||||
|
||||
static void header_in(struct dlm_header *hd)
|
||||
{
|
||||
hd->h_version = le32_to_cpu(hd->h_version);
|
||||
hd->h_lockspace = le32_to_cpu(hd->h_lockspace);
|
||||
hd->h_nodeid = le32_to_cpu(hd->h_nodeid);
|
||||
hd->h_length = le16_to_cpu(hd->h_length);
|
||||
}
|
||||
|
||||
void dlm_message_out(struct dlm_message *ms)
|
||||
{
|
||||
struct dlm_header *hd = (struct dlm_header *) ms;
|
||||
|
||||
header_out(hd);
|
||||
|
||||
ms->m_type = cpu_to_le32(ms->m_type);
|
||||
ms->m_nodeid = cpu_to_le32(ms->m_nodeid);
|
||||
ms->m_pid = cpu_to_le32(ms->m_pid);
|
||||
ms->m_lkid = cpu_to_le32(ms->m_lkid);
|
||||
ms->m_remid = cpu_to_le32(ms->m_remid);
|
||||
ms->m_parent_lkid = cpu_to_le32(ms->m_parent_lkid);
|
||||
ms->m_parent_remid = cpu_to_le32(ms->m_parent_remid);
|
||||
ms->m_exflags = cpu_to_le32(ms->m_exflags);
|
||||
ms->m_sbflags = cpu_to_le32(ms->m_sbflags);
|
||||
ms->m_flags = cpu_to_le32(ms->m_flags);
|
||||
ms->m_lvbseq = cpu_to_le32(ms->m_lvbseq);
|
||||
ms->m_hash = cpu_to_le32(ms->m_hash);
|
||||
ms->m_status = cpu_to_le32(ms->m_status);
|
||||
ms->m_grmode = cpu_to_le32(ms->m_grmode);
|
||||
ms->m_rqmode = cpu_to_le32(ms->m_rqmode);
|
||||
ms->m_bastmode = cpu_to_le32(ms->m_bastmode);
|
||||
ms->m_asts = cpu_to_le32(ms->m_asts);
|
||||
ms->m_result = cpu_to_le32(ms->m_result);
|
||||
}
|
||||
|
||||
void dlm_message_in(struct dlm_message *ms)
|
||||
{
|
||||
struct dlm_header *hd = (struct dlm_header *) ms;
|
||||
|
||||
header_in(hd);
|
||||
|
||||
ms->m_type = le32_to_cpu(ms->m_type);
|
||||
ms->m_nodeid = le32_to_cpu(ms->m_nodeid);
|
||||
ms->m_pid = le32_to_cpu(ms->m_pid);
|
||||
ms->m_lkid = le32_to_cpu(ms->m_lkid);
|
||||
ms->m_remid = le32_to_cpu(ms->m_remid);
|
||||
ms->m_parent_lkid = le32_to_cpu(ms->m_parent_lkid);
|
||||
ms->m_parent_remid = le32_to_cpu(ms->m_parent_remid);
|
||||
ms->m_exflags = le32_to_cpu(ms->m_exflags);
|
||||
ms->m_sbflags = le32_to_cpu(ms->m_sbflags);
|
||||
ms->m_flags = le32_to_cpu(ms->m_flags);
|
||||
ms->m_lvbseq = le32_to_cpu(ms->m_lvbseq);
|
||||
ms->m_hash = le32_to_cpu(ms->m_hash);
|
||||
ms->m_status = le32_to_cpu(ms->m_status);
|
||||
ms->m_grmode = le32_to_cpu(ms->m_grmode);
|
||||
ms->m_rqmode = le32_to_cpu(ms->m_rqmode);
|
||||
ms->m_bastmode = le32_to_cpu(ms->m_bastmode);
|
||||
ms->m_asts = le32_to_cpu(ms->m_asts);
|
||||
ms->m_result = le32_to_cpu(ms->m_result);
|
||||
}
|
||||
|
||||
static void rcom_lock_out(struct rcom_lock *rl)
|
||||
{
|
||||
rl->rl_ownpid = cpu_to_le32(rl->rl_ownpid);
|
||||
rl->rl_lkid = cpu_to_le32(rl->rl_lkid);
|
||||
rl->rl_remid = cpu_to_le32(rl->rl_remid);
|
||||
rl->rl_parent_lkid = cpu_to_le32(rl->rl_parent_lkid);
|
||||
rl->rl_parent_remid = cpu_to_le32(rl->rl_parent_remid);
|
||||
rl->rl_exflags = cpu_to_le32(rl->rl_exflags);
|
||||
rl->rl_flags = cpu_to_le32(rl->rl_flags);
|
||||
rl->rl_lvbseq = cpu_to_le32(rl->rl_lvbseq);
|
||||
rl->rl_result = cpu_to_le32(rl->rl_result);
|
||||
rl->rl_wait_type = cpu_to_le16(rl->rl_wait_type);
|
||||
rl->rl_namelen = cpu_to_le16(rl->rl_namelen);
|
||||
}
|
||||
|
||||
static void rcom_lock_in(struct rcom_lock *rl)
|
||||
{
|
||||
rl->rl_ownpid = le32_to_cpu(rl->rl_ownpid);
|
||||
rl->rl_lkid = le32_to_cpu(rl->rl_lkid);
|
||||
rl->rl_remid = le32_to_cpu(rl->rl_remid);
|
||||
rl->rl_parent_lkid = le32_to_cpu(rl->rl_parent_lkid);
|
||||
rl->rl_parent_remid = le32_to_cpu(rl->rl_parent_remid);
|
||||
rl->rl_exflags = le32_to_cpu(rl->rl_exflags);
|
||||
rl->rl_flags = le32_to_cpu(rl->rl_flags);
|
||||
rl->rl_lvbseq = le32_to_cpu(rl->rl_lvbseq);
|
||||
rl->rl_result = le32_to_cpu(rl->rl_result);
|
||||
rl->rl_wait_type = le16_to_cpu(rl->rl_wait_type);
|
||||
rl->rl_namelen = le16_to_cpu(rl->rl_namelen);
|
||||
}
|
||||
|
||||
static void rcom_config_out(struct rcom_config *rf)
|
||||
{
|
||||
rf->rf_lvblen = cpu_to_le32(rf->rf_lvblen);
|
||||
rf->rf_lsflags = cpu_to_le32(rf->rf_lsflags);
|
||||
}
|
||||
|
||||
static void rcom_config_in(struct rcom_config *rf)
|
||||
{
|
||||
rf->rf_lvblen = le32_to_cpu(rf->rf_lvblen);
|
||||
rf->rf_lsflags = le32_to_cpu(rf->rf_lsflags);
|
||||
}
|
||||
|
||||
void dlm_rcom_out(struct dlm_rcom *rc)
|
||||
{
|
||||
struct dlm_header *hd = (struct dlm_header *) rc;
|
||||
int type = rc->rc_type;
|
||||
|
||||
header_out(hd);
|
||||
|
||||
rc->rc_type = cpu_to_le32(rc->rc_type);
|
||||
rc->rc_result = cpu_to_le32(rc->rc_result);
|
||||
rc->rc_id = cpu_to_le64(rc->rc_id);
|
||||
|
||||
if (type == DLM_RCOM_LOCK)
|
||||
rcom_lock_out((struct rcom_lock *) rc->rc_buf);
|
||||
|
||||
else if (type == DLM_RCOM_STATUS_REPLY)
|
||||
rcom_config_out((struct rcom_config *) rc->rc_buf);
|
||||
}
|
||||
|
||||
void dlm_rcom_in(struct dlm_rcom *rc)
|
||||
{
|
||||
struct dlm_header *hd = (struct dlm_header *) rc;
|
||||
|
||||
header_in(hd);
|
||||
|
||||
rc->rc_type = le32_to_cpu(rc->rc_type);
|
||||
rc->rc_result = le32_to_cpu(rc->rc_result);
|
||||
rc->rc_id = le64_to_cpu(rc->rc_id);
|
||||
|
||||
if (rc->rc_type == DLM_RCOM_LOCK)
|
||||
rcom_lock_in((struct rcom_lock *) rc->rc_buf);
|
||||
|
||||
else if (rc->rc_type == DLM_RCOM_STATUS_REPLY)
|
||||
rcom_config_in((struct rcom_config *) rc->rc_buf);
|
||||
}
|
||||
|
22
fs/dlm/util.h
Normal file
22
fs/dlm/util.h
Normal file
|
@ -0,0 +1,22 @@
|
|||
/******************************************************************************
|
||||
*******************************************************************************
|
||||
**
|
||||
** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
|
||||
**
|
||||
** This copyrighted material is made available to anyone wishing to use,
|
||||
** modify, copy, or redistribute it subject to the terms and conditions
|
||||
** of the GNU General Public License v.2.
|
||||
**
|
||||
*******************************************************************************
|
||||
******************************************************************************/
|
||||
|
||||
#ifndef __UTIL_DOT_H__
|
||||
#define __UTIL_DOT_H__
|
||||
|
||||
void dlm_message_out(struct dlm_message *ms);
|
||||
void dlm_message_in(struct dlm_message *ms);
|
||||
void dlm_rcom_out(struct dlm_rcom *rc);
|
||||
void dlm_rcom_in(struct dlm_rcom *rc);
|
||||
|
||||
#endif
|
||||
|
44
fs/gfs2/Kconfig
Normal file
44
fs/gfs2/Kconfig
Normal file
|
@ -0,0 +1,44 @@
|
|||
config GFS2_FS
|
||||
tristate "GFS2 file system support"
|
||||
depends on EXPERIMENTAL
|
||||
select FS_POSIX_ACL
|
||||
help
|
||||
A cluster filesystem.
|
||||
|
||||
Allows a cluster of computers to simultaneously use a block device
|
||||
that is shared between them (with FC, iSCSI, NBD, etc...). GFS reads
|
||||
and writes to the block device like a local filesystem, but also uses
|
||||
a lock module to allow the computers coordinate their I/O so
|
||||
filesystem consistency is maintained. One of the nifty features of
|
||||
GFS is perfect consistency -- changes made to the filesystem on one
|
||||
machine show up immediately on all other machines in the cluster.
|
||||
|
||||
To use the GFS2 filesystem, you will need to enable one or more of
|
||||
the below locking modules. Documentation and utilities for GFS2 can
|
||||
be found here: http://sources.redhat.com/cluster
|
||||
|
||||
config GFS2_FS_LOCKING_NOLOCK
|
||||
tristate "GFS2 \"nolock\" locking module"
|
||||
depends on GFS2_FS
|
||||
help
|
||||
Single node locking module for GFS2.
|
||||
|
||||
Use this module if you want to use GFS2 on a single node without
|
||||
its clustering features. You can still take advantage of the
|
||||
large file support, and upgrade to running a full cluster later on
|
||||
if required.
|
||||
|
||||
If you will only be using GFS2 in cluster mode, you do not need this
|
||||
module.
|
||||
|
||||
config GFS2_FS_LOCKING_DLM
|
||||
tristate "GFS2 DLM locking module"
|
||||
depends on GFS2_FS
|
||||
select DLM
|
||||
help
|
||||
Multiple node locking module for GFS2
|
||||
|
||||
Most users of GFS2 will require this module. It provides the locking
|
||||
interface between GFS2 and the DLM, which is required to use GFS2
|
||||
in a cluster environment.
|
||||
|
10
fs/gfs2/Makefile
Normal file
10
fs/gfs2/Makefile
Normal file
|
@ -0,0 +1,10 @@
|
|||
obj-$(CONFIG_GFS2_FS) += gfs2.o
|
||||
gfs2-y := acl.o bmap.o daemon.o dir.o eaops.o eattr.o glock.o \
|
||||
glops.o inode.o lm.o log.o lops.o locking.o main.o meta_io.o \
|
||||
mount.o ondisk.o ops_address.o ops_dentry.o ops_export.o ops_file.o \
|
||||
ops_fstype.o ops_inode.o ops_super.o ops_vm.o quota.o \
|
||||
recovery.o rgrp.o super.o sys.o trans.o util.o
|
||||
|
||||
obj-$(CONFIG_GFS2_FS_LOCKING_NOLOCK) += locking/nolock/
|
||||
obj-$(CONFIG_GFS2_FS_LOCKING_DLM) += locking/dlm/
|
||||
|
309
fs/gfs2/acl.c
Normal file
309
fs/gfs2/acl.c
Normal file
|
@ -0,0 +1,309 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/posix_acl.h>
|
||||
#include <linux/posix_acl_xattr.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/lm_interface.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
#include "acl.h"
|
||||
#include "eaops.h"
|
||||
#include "eattr.h"
|
||||
#include "glock.h"
|
||||
#include "inode.h"
|
||||
#include "meta_io.h"
|
||||
#include "trans.h"
|
||||
#include "util.h"
|
||||
|
||||
#define ACL_ACCESS 1
|
||||
#define ACL_DEFAULT 0
|
||||
|
||||
int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
|
||||
struct gfs2_ea_request *er,
|
||||
int *remove, mode_t *mode)
|
||||
{
|
||||
struct posix_acl *acl;
|
||||
int error;
|
||||
|
||||
error = gfs2_acl_validate_remove(ip, access);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (!er->er_data)
|
||||
return -EINVAL;
|
||||
|
||||
acl = posix_acl_from_xattr(er->er_data, er->er_data_len);
|
||||
if (IS_ERR(acl))
|
||||
return PTR_ERR(acl);
|
||||
if (!acl) {
|
||||
*remove = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
error = posix_acl_valid(acl);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
if (access) {
|
||||
error = posix_acl_equiv_mode(acl, mode);
|
||||
if (!error)
|
||||
*remove = 1;
|
||||
else if (error > 0)
|
||||
error = 0;
|
||||
}
|
||||
|
||||
out:
|
||||
posix_acl_release(acl);
|
||||
return error;
|
||||
}
|
||||
|
||||
int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access)
|
||||
{
|
||||
if (!GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl)
|
||||
return -EOPNOTSUPP;
|
||||
if (current->fsuid != ip->i_di.di_uid && !capable(CAP_FOWNER))
|
||||
return -EPERM;
|
||||
if (S_ISLNK(ip->i_di.di_mode))
|
||||
return -EOPNOTSUPP;
|
||||
if (!access && !S_ISDIR(ip->i_di.di_mode))
|
||||
return -EACCES;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int acl_get(struct gfs2_inode *ip, int access, struct posix_acl **acl,
|
||||
struct gfs2_ea_location *el, char **data, unsigned int *len)
|
||||
{
|
||||
struct gfs2_ea_request er;
|
||||
struct gfs2_ea_location el_this;
|
||||
int error;
|
||||
|
||||
if (!ip->i_di.di_eattr)
|
||||
return 0;
|
||||
|
||||
memset(&er, 0, sizeof(struct gfs2_ea_request));
|
||||
if (access) {
|
||||
er.er_name = GFS2_POSIX_ACL_ACCESS;
|
||||
er.er_name_len = GFS2_POSIX_ACL_ACCESS_LEN;
|
||||
} else {
|
||||
er.er_name = GFS2_POSIX_ACL_DEFAULT;
|
||||
er.er_name_len = GFS2_POSIX_ACL_DEFAULT_LEN;
|
||||
}
|
||||
er.er_type = GFS2_EATYPE_SYS;
|
||||
|
||||
if (!el)
|
||||
el = &el_this;
|
||||
|
||||
error = gfs2_ea_find(ip, &er, el);
|
||||
if (error)
|
||||
return error;
|
||||
if (!el->el_ea)
|
||||
return 0;
|
||||
if (!GFS2_EA_DATA_LEN(el->el_ea))
|
||||
goto out;
|
||||
|
||||
er.er_data_len = GFS2_EA_DATA_LEN(el->el_ea);
|
||||
er.er_data = kmalloc(er.er_data_len, GFP_KERNEL);
|
||||
error = -ENOMEM;
|
||||
if (!er.er_data)
|
||||
goto out;
|
||||
|
||||
error = gfs2_ea_get_copy(ip, el, er.er_data);
|
||||
if (error)
|
||||
goto out_kfree;
|
||||
|
||||
if (acl) {
|
||||
*acl = posix_acl_from_xattr(er.er_data, er.er_data_len);
|
||||
if (IS_ERR(*acl))
|
||||
error = PTR_ERR(*acl);
|
||||
}
|
||||
|
||||
out_kfree:
|
||||
if (error || !data)
|
||||
kfree(er.er_data);
|
||||
else {
|
||||
*data = er.er_data;
|
||||
*len = er.er_data_len;
|
||||
}
|
||||
out:
|
||||
if (error || el == &el_this)
|
||||
brelse(el->el_bh);
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_check_acl_locked - Check an ACL to see if we're allowed to do something
|
||||
* @inode: the file we want to do something to
|
||||
* @mask: what we want to do
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
int gfs2_check_acl_locked(struct inode *inode, int mask)
|
||||
{
|
||||
struct posix_acl *acl = NULL;
|
||||
int error;
|
||||
|
||||
error = acl_get(GFS2_I(inode), ACL_ACCESS, &acl, NULL, NULL, NULL);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (acl) {
|
||||
error = posix_acl_permission(inode, acl, mask);
|
||||
posix_acl_release(acl);
|
||||
return error;
|
||||
}
|
||||
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
int gfs2_check_acl(struct inode *inode, int mask)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
struct gfs2_holder i_gh;
|
||||
int error;
|
||||
|
||||
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
|
||||
if (!error) {
|
||||
error = gfs2_check_acl_locked(inode, mask);
|
||||
gfs2_glock_dq_uninit(&i_gh);
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
static int munge_mode(struct gfs2_inode *ip, mode_t mode)
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
||||
struct buffer_head *dibh;
|
||||
int error;
|
||||
|
||||
error = gfs2_trans_begin(sdp, RES_DINODE, 0);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = gfs2_meta_inode_buffer(ip, &dibh);
|
||||
if (!error) {
|
||||
gfs2_assert_withdraw(sdp,
|
||||
(ip->i_di.di_mode & S_IFMT) == (mode & S_IFMT));
|
||||
ip->i_di.di_mode = mode;
|
||||
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
|
||||
gfs2_dinode_out(&ip->i_di, dibh->b_data);
|
||||
brelse(dibh);
|
||||
}
|
||||
|
||||
gfs2_trans_end(sdp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip)
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
|
||||
struct posix_acl *acl = NULL, *clone;
|
||||
struct gfs2_ea_request er;
|
||||
mode_t mode = ip->i_di.di_mode;
|
||||
int error;
|
||||
|
||||
if (!sdp->sd_args.ar_posix_acl)
|
||||
return 0;
|
||||
if (S_ISLNK(ip->i_di.di_mode))
|
||||
return 0;
|
||||
|
||||
memset(&er, 0, sizeof(struct gfs2_ea_request));
|
||||
er.er_type = GFS2_EATYPE_SYS;
|
||||
|
||||
error = acl_get(dip, ACL_DEFAULT, &acl, NULL,
|
||||
&er.er_data, &er.er_data_len);
|
||||
if (error)
|
||||
return error;
|
||||
if (!acl) {
|
||||
mode &= ~current->fs->umask;
|
||||
if (mode != ip->i_di.di_mode)
|
||||
error = munge_mode(ip, mode);
|
||||
return error;
|
||||
}
|
||||
|
||||
clone = posix_acl_clone(acl, GFP_KERNEL);
|
||||
error = -ENOMEM;
|
||||
if (!clone)
|
||||
goto out;
|
||||
posix_acl_release(acl);
|
||||
acl = clone;
|
||||
|
||||
if (S_ISDIR(ip->i_di.di_mode)) {
|
||||
er.er_name = GFS2_POSIX_ACL_DEFAULT;
|
||||
er.er_name_len = GFS2_POSIX_ACL_DEFAULT_LEN;
|
||||
error = gfs2_system_eaops.eo_set(ip, &er);
|
||||
if (error)
|
||||
goto out;
|
||||
}
|
||||
|
||||
error = posix_acl_create_masq(acl, &mode);
|
||||
if (error < 0)
|
||||
goto out;
|
||||
if (error > 0) {
|
||||
er.er_name = GFS2_POSIX_ACL_ACCESS;
|
||||
er.er_name_len = GFS2_POSIX_ACL_ACCESS_LEN;
|
||||
posix_acl_to_xattr(acl, er.er_data, er.er_data_len);
|
||||
er.er_mode = mode;
|
||||
er.er_flags = GFS2_ERF_MODE;
|
||||
error = gfs2_system_eaops.eo_set(ip, &er);
|
||||
if (error)
|
||||
goto out;
|
||||
} else
|
||||
munge_mode(ip, mode);
|
||||
|
||||
out:
|
||||
posix_acl_release(acl);
|
||||
kfree(er.er_data);
|
||||
return error;
|
||||
}
|
||||
|
||||
int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr)
|
||||
{
|
||||
struct posix_acl *acl = NULL, *clone;
|
||||
struct gfs2_ea_location el;
|
||||
char *data;
|
||||
unsigned int len;
|
||||
int error;
|
||||
|
||||
error = acl_get(ip, ACL_ACCESS, &acl, &el, &data, &len);
|
||||
if (error)
|
||||
return error;
|
||||
if (!acl)
|
||||
return gfs2_setattr_simple(ip, attr);
|
||||
|
||||
clone = posix_acl_clone(acl, GFP_KERNEL);
|
||||
error = -ENOMEM;
|
||||
if (!clone)
|
||||
goto out;
|
||||
posix_acl_release(acl);
|
||||
acl = clone;
|
||||
|
||||
error = posix_acl_chmod_masq(acl, attr->ia_mode);
|
||||
if (!error) {
|
||||
posix_acl_to_xattr(acl, data, len);
|
||||
error = gfs2_ea_acl_chmod(ip, &el, attr, data);
|
||||
}
|
||||
|
||||
out:
|
||||
posix_acl_release(acl);
|
||||
brelse(el.el_bh);
|
||||
kfree(data);
|
||||
return error;
|
||||
}
|
||||
|
39
fs/gfs2/acl.h
Normal file
39
fs/gfs2/acl.h
Normal file
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __ACL_DOT_H__
|
||||
#define __ACL_DOT_H__
|
||||
|
||||
#include "incore.h"
|
||||
|
||||
#define GFS2_POSIX_ACL_ACCESS "posix_acl_access"
|
||||
#define GFS2_POSIX_ACL_ACCESS_LEN 16
|
||||
#define GFS2_POSIX_ACL_DEFAULT "posix_acl_default"
|
||||
#define GFS2_POSIX_ACL_DEFAULT_LEN 17
|
||||
|
||||
#define GFS2_ACL_IS_ACCESS(name, len) \
|
||||
((len) == GFS2_POSIX_ACL_ACCESS_LEN && \
|
||||
!memcmp(GFS2_POSIX_ACL_ACCESS, (name), (len)))
|
||||
|
||||
#define GFS2_ACL_IS_DEFAULT(name, len) \
|
||||
((len) == GFS2_POSIX_ACL_DEFAULT_LEN && \
|
||||
!memcmp(GFS2_POSIX_ACL_DEFAULT, (name), (len)))
|
||||
|
||||
struct gfs2_ea_request;
|
||||
|
||||
int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
|
||||
struct gfs2_ea_request *er,
|
||||
int *remove, mode_t *mode);
|
||||
int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access);
|
||||
int gfs2_check_acl_locked(struct inode *inode, int mask);
|
||||
int gfs2_check_acl(struct inode *inode, int mask);
|
||||
int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip);
|
||||
int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
|
||||
|
||||
#endif /* __ACL_DOT_H__ */
|
1221
fs/gfs2/bmap.c
Normal file
1221
fs/gfs2/bmap.c
Normal file
File diff suppressed because it is too large
Load diff
31
fs/gfs2/bmap.h
Normal file
31
fs/gfs2/bmap.h
Normal file
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __BMAP_DOT_H__
|
||||
#define __BMAP_DOT_H__
|
||||
|
||||
struct inode;
|
||||
struct gfs2_inode;
|
||||
struct page;
|
||||
|
||||
int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page);
|
||||
int gfs2_block_map(struct inode *inode, u64 lblock, int create, struct buffer_head *bh, unsigned int maxlen);
|
||||
int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen);
|
||||
|
||||
int gfs2_truncatei(struct gfs2_inode *ip, u64 size);
|
||||
int gfs2_truncatei_resume(struct gfs2_inode *ip);
|
||||
int gfs2_file_dealloc(struct gfs2_inode *ip);
|
||||
|
||||
void gfs2_write_calc_reserv(struct gfs2_inode *ip, unsigned int len,
|
||||
unsigned int *data_blocks,
|
||||
unsigned int *ind_blocks);
|
||||
int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
|
||||
unsigned int len, int *alloc_required);
|
||||
|
||||
#endif /* __BMAP_DOT_H__ */
|
196
fs/gfs2/daemon.c
Normal file
196
fs/gfs2/daemon.c
Normal file
|
@ -0,0 +1,196 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/lm_interface.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
#include "daemon.h"
|
||||
#include "glock.h"
|
||||
#include "log.h"
|
||||
#include "quota.h"
|
||||
#include "recovery.h"
|
||||
#include "super.h"
|
||||
#include "util.h"
|
||||
|
||||
/* This uses schedule_timeout() instead of msleep() because it's good for
|
||||
the daemons to wake up more often than the timeout when unmounting so
|
||||
the user's unmount doesn't sit there forever.
|
||||
|
||||
The kthread functions used to start these daemons block and flush signals. */
|
||||
|
||||
/**
|
||||
* gfs2_scand - Look for cached glocks and inodes to toss from memory
|
||||
* @sdp: Pointer to GFS2 superblock
|
||||
*
|
||||
* One of these daemons runs, finding candidates to add to sd_reclaim_list.
|
||||
* See gfs2_glockd()
|
||||
*/
|
||||
|
||||
int gfs2_scand(void *data)
|
||||
{
|
||||
struct gfs2_sbd *sdp = data;
|
||||
unsigned long t;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
gfs2_scand_internal(sdp);
|
||||
t = gfs2_tune_get(sdp, gt_scand_secs) * HZ;
|
||||
schedule_timeout_interruptible(t);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_glockd - Reclaim unused glock structures
|
||||
* @sdp: Pointer to GFS2 superblock
|
||||
*
|
||||
* One or more of these daemons run, reclaiming glocks on sd_reclaim_list.
|
||||
* Number of daemons can be set by user, with num_glockd mount option.
|
||||
*/
|
||||
|
||||
int gfs2_glockd(void *data)
|
||||
{
|
||||
struct gfs2_sbd *sdp = data;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
while (atomic_read(&sdp->sd_reclaim_count))
|
||||
gfs2_reclaim_glock(sdp);
|
||||
|
||||
wait_event_interruptible(sdp->sd_reclaim_wq,
|
||||
(atomic_read(&sdp->sd_reclaim_count) ||
|
||||
kthread_should_stop()));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_recoverd - Recover dead machine's journals
|
||||
* @sdp: Pointer to GFS2 superblock
|
||||
*
|
||||
*/
|
||||
|
||||
int gfs2_recoverd(void *data)
|
||||
{
|
||||
struct gfs2_sbd *sdp = data;
|
||||
unsigned long t;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
gfs2_check_journals(sdp);
|
||||
t = gfs2_tune_get(sdp, gt_recoverd_secs) * HZ;
|
||||
schedule_timeout_interruptible(t);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
|
||||
* @sdp: Pointer to GFS2 superblock
|
||||
*
|
||||
* Also, periodically check to make sure that we're using the most recent
|
||||
* journal index.
|
||||
*/
|
||||
|
||||
int gfs2_logd(void *data)
|
||||
{
|
||||
struct gfs2_sbd *sdp = data;
|
||||
struct gfs2_holder ji_gh;
|
||||
unsigned long t;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
/* Advance the log tail */
|
||||
|
||||
t = sdp->sd_log_flush_time +
|
||||
gfs2_tune_get(sdp, gt_log_flush_secs) * HZ;
|
||||
|
||||
gfs2_ail1_empty(sdp, DIO_ALL);
|
||||
|
||||
if (time_after_eq(jiffies, t)) {
|
||||
gfs2_log_flush(sdp, NULL);
|
||||
sdp->sd_log_flush_time = jiffies;
|
||||
}
|
||||
|
||||
/* Check for latest journal index */
|
||||
|
||||
t = sdp->sd_jindex_refresh_time +
|
||||
gfs2_tune_get(sdp, gt_jindex_refresh_secs) * HZ;
|
||||
|
||||
if (time_after_eq(jiffies, t)) {
|
||||
if (!gfs2_jindex_hold(sdp, &ji_gh))
|
||||
gfs2_glock_dq_uninit(&ji_gh);
|
||||
sdp->sd_jindex_refresh_time = jiffies;
|
||||
}
|
||||
|
||||
t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
|
||||
schedule_timeout_interruptible(t);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_quotad - Write cached quota changes into the quota file
|
||||
* @sdp: Pointer to GFS2 superblock
|
||||
*
|
||||
*/
|
||||
|
||||
int gfs2_quotad(void *data)
|
||||
{
|
||||
struct gfs2_sbd *sdp = data;
|
||||
unsigned long t;
|
||||
int error;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
/* Update the master statfs file */
|
||||
|
||||
t = sdp->sd_statfs_sync_time +
|
||||
gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
|
||||
|
||||
if (time_after_eq(jiffies, t)) {
|
||||
error = gfs2_statfs_sync(sdp);
|
||||
if (error &&
|
||||
error != -EROFS &&
|
||||
!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
|
||||
fs_err(sdp, "quotad: (1) error=%d\n", error);
|
||||
sdp->sd_statfs_sync_time = jiffies;
|
||||
}
|
||||
|
||||
/* Update quota file */
|
||||
|
||||
t = sdp->sd_quota_sync_time +
|
||||
gfs2_tune_get(sdp, gt_quota_quantum) * HZ;
|
||||
|
||||
if (time_after_eq(jiffies, t)) {
|
||||
error = gfs2_quota_sync(sdp);
|
||||
if (error &&
|
||||
error != -EROFS &&
|
||||
!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
|
||||
fs_err(sdp, "quotad: (2) error=%d\n", error);
|
||||
sdp->sd_quota_sync_time = jiffies;
|
||||
}
|
||||
|
||||
gfs2_quota_scan(sdp);
|
||||
|
||||
t = gfs2_tune_get(sdp, gt_quotad_secs) * HZ;
|
||||
schedule_timeout_interruptible(t);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
19
fs/gfs2/daemon.h
Normal file
19
fs/gfs2/daemon.h
Normal file
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __DAEMON_DOT_H__
|
||||
#define __DAEMON_DOT_H__
|
||||
|
||||
int gfs2_scand(void *data);
|
||||
int gfs2_glockd(void *data);
|
||||
int gfs2_recoverd(void *data);
|
||||
int gfs2_logd(void *data);
|
||||
int gfs2_quotad(void *data);
|
||||
|
||||
#endif /* __DAEMON_DOT_H__ */
|
1961
fs/gfs2/dir.c
Normal file
1961
fs/gfs2/dir.c
Normal file
File diff suppressed because it is too large
Load diff
79
fs/gfs2/dir.h
Normal file
79
fs/gfs2/dir.h
Normal file
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __DIR_DOT_H__
|
||||
#define __DIR_DOT_H__
|
||||
|
||||
#include <linux/dcache.h>
|
||||
|
||||
struct inode;
|
||||
struct gfs2_inode;
|
||||
struct gfs2_inum;
|
||||
|
||||
/**
|
||||
* gfs2_filldir_t - Report a directory entry to the caller of gfs2_dir_read()
|
||||
* @opaque: opaque data used by the function
|
||||
* @name: the name of the directory entry
|
||||
* @length: the length of the name
|
||||
* @offset: the entry's offset in the directory
|
||||
* @inum: the inode number the entry points to
|
||||
* @type: the type of inode the entry points to
|
||||
*
|
||||
* Returns: 0 on success, 1 if buffer full
|
||||
*/
|
||||
|
||||
typedef int (*gfs2_filldir_t) (void *opaque,
|
||||
const char *name, unsigned int length,
|
||||
u64 offset,
|
||||
struct gfs2_inum *inum, unsigned int type);
|
||||
|
||||
int gfs2_dir_search(struct inode *dir, const struct qstr *filename,
|
||||
struct gfs2_inum *inum, unsigned int *type);
|
||||
int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
|
||||
const struct gfs2_inum *inum, unsigned int type);
|
||||
int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename);
|
||||
int gfs2_dir_read(struct inode *inode, u64 * offset, void *opaque,
|
||||
gfs2_filldir_t filldir);
|
||||
int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
|
||||
struct gfs2_inum *new_inum, unsigned int new_type);
|
||||
|
||||
int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip);
|
||||
|
||||
int gfs2_diradd_alloc_required(struct inode *dir,
|
||||
const struct qstr *filename);
|
||||
int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
|
||||
struct buffer_head **bhp);
|
||||
|
||||
static inline u32 gfs2_disk_hash(const char *data, int len)
|
||||
{
|
||||
return crc32_le((u32)~0, data, len) ^ (u32)~0;
|
||||
}
|
||||
|
||||
|
||||
static inline void gfs2_str2qstr(struct qstr *name, const char *fname)
|
||||
{
|
||||
name->name = fname;
|
||||
name->len = strlen(fname);
|
||||
name->hash = gfs2_disk_hash(name->name, name->len);
|
||||
}
|
||||
|
||||
/* N.B. This probably ought to take inum & type as args as well */
|
||||
static inline void gfs2_qstr2dirent(const struct qstr *name, u16 reclen, struct gfs2_dirent *dent)
|
||||
{
|
||||
dent->de_inum.no_addr = cpu_to_be64(0);
|
||||
dent->de_inum.no_formal_ino = cpu_to_be64(0);
|
||||
dent->de_hash = cpu_to_be32(name->hash);
|
||||
dent->de_rec_len = cpu_to_be16(reclen);
|
||||
dent->de_name_len = cpu_to_be16(name->len);
|
||||
dent->de_type = cpu_to_be16(0);
|
||||
memset(dent->__pad, 0, sizeof(dent->__pad));
|
||||
memcpy(dent + 1, name->name, name->len);
|
||||
}
|
||||
|
||||
#endif /* __DIR_DOT_H__ */
|
230
fs/gfs2/eaops.c
Normal file
230
fs/gfs2/eaops.c
Normal file
|
@ -0,0 +1,230 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/xattr.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/lm_interface.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
#include "acl.h"
|
||||
#include "eaops.h"
|
||||
#include "eattr.h"
|
||||
#include "util.h"
|
||||
|
||||
/**
|
||||
* gfs2_ea_name2type - get the type of the ea, and truncate type from the name
|
||||
* @namep: ea name, possibly with type appended
|
||||
*
|
||||
* Returns: GFS2_EATYPE_XXX
|
||||
*/
|
||||
|
||||
unsigned int gfs2_ea_name2type(const char *name, const char **truncated_name)
|
||||
{
|
||||
unsigned int type;
|
||||
|
||||
if (strncmp(name, "system.", 7) == 0) {
|
||||
type = GFS2_EATYPE_SYS;
|
||||
if (truncated_name)
|
||||
*truncated_name = name + sizeof("system.") - 1;
|
||||
} else if (strncmp(name, "user.", 5) == 0) {
|
||||
type = GFS2_EATYPE_USR;
|
||||
if (truncated_name)
|
||||
*truncated_name = name + sizeof("user.") - 1;
|
||||
} else if (strncmp(name, "security.", 9) == 0) {
|
||||
type = GFS2_EATYPE_SECURITY;
|
||||
if (truncated_name)
|
||||
*truncated_name = name + sizeof("security.") - 1;
|
||||
} else {
|
||||
type = GFS2_EATYPE_UNUSED;
|
||||
if (truncated_name)
|
||||
*truncated_name = NULL;
|
||||
}
|
||||
|
||||
return type;
|
||||
}
|
||||
|
||||
static int user_eo_get(struct gfs2_inode *ip, struct gfs2_ea_request *er)
|
||||
{
|
||||
struct inode *inode = &ip->i_inode;
|
||||
int error = permission(inode, MAY_READ, NULL);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
return gfs2_ea_get_i(ip, er);
|
||||
}
|
||||
|
||||
static int user_eo_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
|
||||
{
|
||||
struct inode *inode = &ip->i_inode;
|
||||
|
||||
if (S_ISREG(inode->i_mode) ||
|
||||
(S_ISDIR(inode->i_mode) && !(inode->i_mode & S_ISVTX))) {
|
||||
int error = permission(inode, MAY_WRITE, NULL);
|
||||
if (error)
|
||||
return error;
|
||||
} else
|
||||
return -EPERM;
|
||||
|
||||
return gfs2_ea_set_i(ip, er);
|
||||
}
|
||||
|
||||
static int user_eo_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er)
|
||||
{
|
||||
struct inode *inode = &ip->i_inode;
|
||||
|
||||
if (S_ISREG(inode->i_mode) ||
|
||||
(S_ISDIR(inode->i_mode) && !(inode->i_mode & S_ISVTX))) {
|
||||
int error = permission(inode, MAY_WRITE, NULL);
|
||||
if (error)
|
||||
return error;
|
||||
} else
|
||||
return -EPERM;
|
||||
|
||||
return gfs2_ea_remove_i(ip, er);
|
||||
}
|
||||
|
||||
static int system_eo_get(struct gfs2_inode *ip, struct gfs2_ea_request *er)
|
||||
{
|
||||
if (!GFS2_ACL_IS_ACCESS(er->er_name, er->er_name_len) &&
|
||||
!GFS2_ACL_IS_DEFAULT(er->er_name, er->er_name_len) &&
|
||||
!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl == 0 &&
|
||||
(GFS2_ACL_IS_ACCESS(er->er_name, er->er_name_len) ||
|
||||
GFS2_ACL_IS_DEFAULT(er->er_name, er->er_name_len)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
|
||||
|
||||
return gfs2_ea_get_i(ip, er);
|
||||
}
|
||||
|
||||
static int system_eo_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
|
||||
{
|
||||
int remove = 0;
|
||||
int error;
|
||||
|
||||
if (GFS2_ACL_IS_ACCESS(er->er_name, er->er_name_len)) {
|
||||
if (!(er->er_flags & GFS2_ERF_MODE)) {
|
||||
er->er_mode = ip->i_di.di_mode;
|
||||
er->er_flags |= GFS2_ERF_MODE;
|
||||
}
|
||||
error = gfs2_acl_validate_set(ip, 1, er,
|
||||
&remove, &er->er_mode);
|
||||
if (error)
|
||||
return error;
|
||||
error = gfs2_ea_set_i(ip, er);
|
||||
if (error)
|
||||
return error;
|
||||
if (remove)
|
||||
gfs2_ea_remove_i(ip, er);
|
||||
return 0;
|
||||
|
||||
} else if (GFS2_ACL_IS_DEFAULT(er->er_name, er->er_name_len)) {
|
||||
error = gfs2_acl_validate_set(ip, 0, er,
|
||||
&remove, NULL);
|
||||
if (error)
|
||||
return error;
|
||||
if (!remove)
|
||||
error = gfs2_ea_set_i(ip, er);
|
||||
else {
|
||||
error = gfs2_ea_remove_i(ip, er);
|
||||
if (error == -ENODATA)
|
||||
error = 0;
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
static int system_eo_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er)
|
||||
{
|
||||
if (GFS2_ACL_IS_ACCESS(er->er_name, er->er_name_len)) {
|
||||
int error = gfs2_acl_validate_remove(ip, 1);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
} else if (GFS2_ACL_IS_DEFAULT(er->er_name, er->er_name_len)) {
|
||||
int error = gfs2_acl_validate_remove(ip, 0);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
} else
|
||||
return -EPERM;
|
||||
|
||||
return gfs2_ea_remove_i(ip, er);
|
||||
}
|
||||
|
||||
static int security_eo_get(struct gfs2_inode *ip, struct gfs2_ea_request *er)
|
||||
{
|
||||
struct inode *inode = &ip->i_inode;
|
||||
int error = permission(inode, MAY_READ, NULL);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
return gfs2_ea_get_i(ip, er);
|
||||
}
|
||||
|
||||
static int security_eo_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
|
||||
{
|
||||
struct inode *inode = &ip->i_inode;
|
||||
int error = permission(inode, MAY_WRITE, NULL);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
return gfs2_ea_set_i(ip, er);
|
||||
}
|
||||
|
||||
static int security_eo_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er)
|
||||
{
|
||||
struct inode *inode = &ip->i_inode;
|
||||
int error = permission(inode, MAY_WRITE, NULL);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
return gfs2_ea_remove_i(ip, er);
|
||||
}
|
||||
|
||||
static struct gfs2_eattr_operations gfs2_user_eaops = {
|
||||
.eo_get = user_eo_get,
|
||||
.eo_set = user_eo_set,
|
||||
.eo_remove = user_eo_remove,
|
||||
.eo_name = "user",
|
||||
};
|
||||
|
||||
struct gfs2_eattr_operations gfs2_system_eaops = {
|
||||
.eo_get = system_eo_get,
|
||||
.eo_set = system_eo_set,
|
||||
.eo_remove = system_eo_remove,
|
||||
.eo_name = "system",
|
||||
};
|
||||
|
||||
static struct gfs2_eattr_operations gfs2_security_eaops = {
|
||||
.eo_get = security_eo_get,
|
||||
.eo_set = security_eo_set,
|
||||
.eo_remove = security_eo_remove,
|
||||
.eo_name = "security",
|
||||
};
|
||||
|
||||
struct gfs2_eattr_operations *gfs2_ea_ops[] = {
|
||||
NULL,
|
||||
&gfs2_user_eaops,
|
||||
&gfs2_system_eaops,
|
||||
&gfs2_security_eaops,
|
||||
};
|
||||
|
30
fs/gfs2/eaops.h
Normal file
30
fs/gfs2/eaops.h
Normal file
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __EAOPS_DOT_H__
|
||||
#define __EAOPS_DOT_H__
|
||||
|
||||
struct gfs2_ea_request;
|
||||
struct gfs2_inode;
|
||||
|
||||
struct gfs2_eattr_operations {
|
||||
int (*eo_get) (struct gfs2_inode *ip, struct gfs2_ea_request *er);
|
||||
int (*eo_set) (struct gfs2_inode *ip, struct gfs2_ea_request *er);
|
||||
int (*eo_remove) (struct gfs2_inode *ip, struct gfs2_ea_request *er);
|
||||
char *eo_name;
|
||||
};
|
||||
|
||||
unsigned int gfs2_ea_name2type(const char *name, const char **truncated_name);
|
||||
|
||||
extern struct gfs2_eattr_operations gfs2_system_eaops;
|
||||
|
||||
extern struct gfs2_eattr_operations *gfs2_ea_ops[];
|
||||
|
||||
#endif /* __EAOPS_DOT_H__ */
|
||||
|
1501
fs/gfs2/eattr.c
Normal file
1501
fs/gfs2/eattr.c
Normal file
File diff suppressed because it is too large
Load diff
100
fs/gfs2/eattr.h
Normal file
100
fs/gfs2/eattr.h
Normal file
|
@ -0,0 +1,100 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __EATTR_DOT_H__
|
||||
#define __EATTR_DOT_H__
|
||||
|
||||
struct gfs2_inode;
|
||||
struct iattr;
|
||||
|
||||
#define GFS2_EA_REC_LEN(ea) be32_to_cpu((ea)->ea_rec_len)
|
||||
#define GFS2_EA_DATA_LEN(ea) be32_to_cpu((ea)->ea_data_len)
|
||||
|
||||
#define GFS2_EA_SIZE(ea) \
|
||||
ALIGN(sizeof(struct gfs2_ea_header) + (ea)->ea_name_len + \
|
||||
((GFS2_EA_IS_STUFFED(ea)) ? GFS2_EA_DATA_LEN(ea) : \
|
||||
(sizeof(u64) * (ea)->ea_num_ptrs)), 8)
|
||||
|
||||
#define GFS2_EA_IS_STUFFED(ea) (!(ea)->ea_num_ptrs)
|
||||
#define GFS2_EA_IS_LAST(ea) ((ea)->ea_flags & GFS2_EAFLAG_LAST)
|
||||
|
||||
#define GFS2_EAREQ_SIZE_STUFFED(er) \
|
||||
ALIGN(sizeof(struct gfs2_ea_header) + (er)->er_name_len + (er)->er_data_len, 8)
|
||||
|
||||
#define GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er) \
|
||||
ALIGN(sizeof(struct gfs2_ea_header) + (er)->er_name_len + \
|
||||
sizeof(u64) * DIV_ROUND_UP((er)->er_data_len, (sdp)->sd_jbsize), 8)
|
||||
|
||||
#define GFS2_EA2NAME(ea) ((char *)((struct gfs2_ea_header *)(ea) + 1))
|
||||
#define GFS2_EA2DATA(ea) (GFS2_EA2NAME(ea) + (ea)->ea_name_len)
|
||||
|
||||
#define GFS2_EA2DATAPTRS(ea) \
|
||||
((u64 *)(GFS2_EA2NAME(ea) + ALIGN((ea)->ea_name_len, 8)))
|
||||
|
||||
#define GFS2_EA2NEXT(ea) \
|
||||
((struct gfs2_ea_header *)((char *)(ea) + GFS2_EA_REC_LEN(ea)))
|
||||
|
||||
#define GFS2_EA_BH2FIRST(bh) \
|
||||
((struct gfs2_ea_header *)((bh)->b_data + sizeof(struct gfs2_meta_header)))
|
||||
|
||||
#define GFS2_ERF_MODE 0x80000000
|
||||
|
||||
struct gfs2_ea_request {
|
||||
const char *er_name;
|
||||
char *er_data;
|
||||
unsigned int er_name_len;
|
||||
unsigned int er_data_len;
|
||||
unsigned int er_type; /* GFS2_EATYPE_... */
|
||||
int er_flags;
|
||||
mode_t er_mode;
|
||||
};
|
||||
|
||||
struct gfs2_ea_location {
|
||||
struct buffer_head *el_bh;
|
||||
struct gfs2_ea_header *el_ea;
|
||||
struct gfs2_ea_header *el_prev;
|
||||
};
|
||||
|
||||
int gfs2_ea_get_i(struct gfs2_inode *ip, struct gfs2_ea_request *er);
|
||||
int gfs2_ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er);
|
||||
int gfs2_ea_remove_i(struct gfs2_inode *ip, struct gfs2_ea_request *er);
|
||||
|
||||
int gfs2_ea_list(struct gfs2_inode *ip, struct gfs2_ea_request *er);
|
||||
int gfs2_ea_get(struct gfs2_inode *ip, struct gfs2_ea_request *er);
|
||||
int gfs2_ea_set(struct gfs2_inode *ip, struct gfs2_ea_request *er);
|
||||
int gfs2_ea_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er);
|
||||
|
||||
int gfs2_ea_dealloc(struct gfs2_inode *ip);
|
||||
|
||||
/* Exported to acl.c */
|
||||
|
||||
int gfs2_ea_find(struct gfs2_inode *ip,
|
||||
struct gfs2_ea_request *er,
|
||||
struct gfs2_ea_location *el);
|
||||
int gfs2_ea_get_copy(struct gfs2_inode *ip,
|
||||
struct gfs2_ea_location *el,
|
||||
char *data);
|
||||
int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
|
||||
struct iattr *attr, char *data);
|
||||
|
||||
static inline unsigned int gfs2_ea_strlen(struct gfs2_ea_header *ea)
|
||||
{
|
||||
switch (ea->ea_type) {
|
||||
case GFS2_EATYPE_USR:
|
||||
return 5 + ea->ea_name_len + 1;
|
||||
case GFS2_EATYPE_SYS:
|
||||
return 7 + ea->ea_name_len + 1;
|
||||
case GFS2_EATYPE_SECURITY:
|
||||
return 9 + ea->ea_name_len + 1;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* __EATTR_DOT_H__ */
|
31
fs/gfs2/gfs2.h
Normal file
31
fs/gfs2/gfs2.h
Normal file
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __GFS2_DOT_H__
|
||||
#define __GFS2_DOT_H__
|
||||
|
||||
enum {
|
||||
NO_CREATE = 0,
|
||||
CREATE = 1,
|
||||
};
|
||||
|
||||
enum {
|
||||
NO_WAIT = 0,
|
||||
WAIT = 1,
|
||||
};
|
||||
|
||||
enum {
|
||||
NO_FORCE = 0,
|
||||
FORCE = 1,
|
||||
};
|
||||
|
||||
#define GFS2_FAST_NAME_SIZE 8
|
||||
|
||||
#endif /* __GFS2_DOT_H__ */
|
||||
|
2231
fs/gfs2/glock.c
Normal file
2231
fs/gfs2/glock.c
Normal file
File diff suppressed because it is too large
Load diff
153
fs/gfs2/glock.h
Normal file
153
fs/gfs2/glock.h
Normal file
|
@ -0,0 +1,153 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __GLOCK_DOT_H__
|
||||
#define __GLOCK_DOT_H__
|
||||
|
||||
#include "incore.h"
|
||||
|
||||
/* Flags for lock requests; used in gfs2_holder gh_flag field.
|
||||
From lm_interface.h:
|
||||
#define LM_FLAG_TRY 0x00000001
|
||||
#define LM_FLAG_TRY_1CB 0x00000002
|
||||
#define LM_FLAG_NOEXP 0x00000004
|
||||
#define LM_FLAG_ANY 0x00000008
|
||||
#define LM_FLAG_PRIORITY 0x00000010 */
|
||||
|
||||
#define GL_LOCAL_EXCL 0x00000020
|
||||
#define GL_ASYNC 0x00000040
|
||||
#define GL_EXACT 0x00000080
|
||||
#define GL_SKIP 0x00000100
|
||||
#define GL_ATIME 0x00000200
|
||||
#define GL_NOCACHE 0x00000400
|
||||
#define GL_NOCANCEL 0x00001000
|
||||
#define GL_AOP 0x00004000
|
||||
#define GL_DUMP 0x00008000
|
||||
|
||||
#define GLR_TRYFAILED 13
|
||||
#define GLR_CANCELED 14
|
||||
|
||||
static inline int gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_holder *gh;
|
||||
int locked = 0;
|
||||
|
||||
/* Look in glock's list of holders for one with current task as owner */
|
||||
spin_lock(&gl->gl_spin);
|
||||
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
|
||||
if (gh->gh_owner == current) {
|
||||
locked = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&gl->gl_spin);
|
||||
|
||||
return locked;
|
||||
}
|
||||
|
||||
static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl)
|
||||
{
|
||||
return gl->gl_state == LM_ST_EXCLUSIVE;
|
||||
}
|
||||
|
||||
static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl)
|
||||
{
|
||||
return gl->gl_state == LM_ST_DEFERRED;
|
||||
}
|
||||
|
||||
static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
|
||||
{
|
||||
return gl->gl_state == LM_ST_SHARED;
|
||||
}
|
||||
|
||||
static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl)
|
||||
{
|
||||
int ret;
|
||||
spin_lock(&gl->gl_spin);
|
||||
ret = !list_empty(&gl->gl_waiters2) || !list_empty(&gl->gl_waiters3);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int gfs2_glock_get(struct gfs2_sbd *sdp,
|
||||
u64 number, const struct gfs2_glock_operations *glops,
|
||||
int create, struct gfs2_glock **glp);
|
||||
void gfs2_glock_hold(struct gfs2_glock *gl);
|
||||
int gfs2_glock_put(struct gfs2_glock *gl);
|
||||
void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
|
||||
struct gfs2_holder *gh);
|
||||
void gfs2_holder_reinit(unsigned int state, unsigned flags,
|
||||
struct gfs2_holder *gh);
|
||||
void gfs2_holder_uninit(struct gfs2_holder *gh);
|
||||
|
||||
void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags);
|
||||
void gfs2_glock_drop_th(struct gfs2_glock *gl);
|
||||
|
||||
int gfs2_glock_nq(struct gfs2_holder *gh);
|
||||
int gfs2_glock_poll(struct gfs2_holder *gh);
|
||||
int gfs2_glock_wait(struct gfs2_holder *gh);
|
||||
void gfs2_glock_dq(struct gfs2_holder *gh);
|
||||
|
||||
int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time);
|
||||
|
||||
void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
|
||||
int gfs2_glock_nq_num(struct gfs2_sbd *sdp,
|
||||
u64 number, const struct gfs2_glock_operations *glops,
|
||||
unsigned int state, int flags, struct gfs2_holder *gh);
|
||||
|
||||
int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
|
||||
void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
|
||||
void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
|
||||
|
||||
void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, u64 number,
|
||||
const struct gfs2_glock_operations *glops,
|
||||
unsigned int state, int flags);
|
||||
void gfs2_glock_inode_squish(struct inode *inode);
|
||||
|
||||
/**
|
||||
* gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
|
||||
* @gl: the glock
|
||||
* @state: the state we're requesting
|
||||
* @flags: the modifier flags
|
||||
* @gh: the holder structure
|
||||
*
|
||||
* Returns: 0, GLR_*, or errno
|
||||
*/
|
||||
|
||||
static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
|
||||
unsigned int state, int flags,
|
||||
struct gfs2_holder *gh)
|
||||
{
|
||||
int error;
|
||||
|
||||
gfs2_holder_init(gl, state, flags, gh);
|
||||
|
||||
error = gfs2_glock_nq(gh);
|
||||
if (error)
|
||||
gfs2_holder_uninit(gh);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Lock Value Block functions */
|
||||
|
||||
int gfs2_lvb_hold(struct gfs2_glock *gl);
|
||||
void gfs2_lvb_unhold(struct gfs2_glock *gl);
|
||||
|
||||
void gfs2_glock_cb(void *cb_data, unsigned int type, void *data);
|
||||
|
||||
void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl);
|
||||
void gfs2_reclaim_glock(struct gfs2_sbd *sdp);
|
||||
|
||||
void gfs2_scand_internal(struct gfs2_sbd *sdp);
|
||||
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait);
|
||||
|
||||
int __init gfs2_glock_init(void);
|
||||
|
||||
#endif /* __GLOCK_DOT_H__ */
|
615
fs/gfs2/glops.c
Normal file
615
fs/gfs2/glops.c
Normal file
|
@ -0,0 +1,615 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/lm_interface.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
#include "bmap.h"
|
||||
#include "glock.h"
|
||||
#include "glops.h"
|
||||
#include "inode.h"
|
||||
#include "log.h"
|
||||
#include "meta_io.h"
|
||||
#include "recovery.h"
|
||||
#include "rgrp.h"
|
||||
#include "util.h"
|
||||
#include "trans.h"
|
||||
|
||||
/**
|
||||
* ail_empty_gl - remove all buffers for a given lock from the AIL
|
||||
* @gl: the glock
|
||||
*
|
||||
* None of the buffers should be dirty, locked, or pinned.
|
||||
*/
|
||||
|
||||
static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||
unsigned int blocks;
|
||||
struct list_head *head = &gl->gl_ail_list;
|
||||
struct gfs2_bufdata *bd;
|
||||
struct buffer_head *bh;
|
||||
u64 blkno;
|
||||
int error;
|
||||
|
||||
blocks = atomic_read(&gl->gl_ail_count);
|
||||
if (!blocks)
|
||||
return;
|
||||
|
||||
error = gfs2_trans_begin(sdp, 0, blocks);
|
||||
if (gfs2_assert_withdraw(sdp, !error))
|
||||
return;
|
||||
|
||||
gfs2_log_lock(sdp);
|
||||
while (!list_empty(head)) {
|
||||
bd = list_entry(head->next, struct gfs2_bufdata,
|
||||
bd_ail_gl_list);
|
||||
bh = bd->bd_bh;
|
||||
blkno = bh->b_blocknr;
|
||||
gfs2_assert_withdraw(sdp, !buffer_busy(bh));
|
||||
|
||||
bd->bd_ail = NULL;
|
||||
list_del(&bd->bd_ail_st_list);
|
||||
list_del(&bd->bd_ail_gl_list);
|
||||
atomic_dec(&gl->gl_ail_count);
|
||||
brelse(bh);
|
||||
gfs2_log_unlock(sdp);
|
||||
|
||||
gfs2_trans_add_revoke(sdp, blkno);
|
||||
|
||||
gfs2_log_lock(sdp);
|
||||
}
|
||||
gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
|
||||
gfs2_log_unlock(sdp);
|
||||
|
||||
gfs2_trans_end(sdp);
|
||||
gfs2_log_flush(sdp, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_pte_inval - Sync and invalidate all PTEs associated with a glock
|
||||
* @gl: the glock
|
||||
*
|
||||
*/
|
||||
|
||||
static void gfs2_pte_inval(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_inode *ip;
|
||||
struct inode *inode;
|
||||
|
||||
ip = gl->gl_object;
|
||||
inode = &ip->i_inode;
|
||||
if (!ip || !S_ISREG(ip->i_di.di_mode))
|
||||
return;
|
||||
|
||||
if (!test_bit(GIF_PAGED, &ip->i_flags))
|
||||
return;
|
||||
|
||||
unmap_shared_mapping_range(inode->i_mapping, 0, 0);
|
||||
|
||||
if (test_bit(GIF_SW_PAGED, &ip->i_flags))
|
||||
set_bit(GLF_DIRTY, &gl->gl_flags);
|
||||
|
||||
clear_bit(GIF_SW_PAGED, &ip->i_flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_page_inval - Invalidate all pages associated with a glock
|
||||
* @gl: the glock
|
||||
*
|
||||
*/
|
||||
|
||||
static void gfs2_page_inval(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_inode *ip;
|
||||
struct inode *inode;
|
||||
|
||||
ip = gl->gl_object;
|
||||
inode = &ip->i_inode;
|
||||
if (!ip || !S_ISREG(ip->i_di.di_mode))
|
||||
return;
|
||||
|
||||
truncate_inode_pages(inode->i_mapping, 0);
|
||||
gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !inode->i_mapping->nrpages);
|
||||
clear_bit(GIF_PAGED, &ip->i_flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_page_wait - Wait for writeback of data
|
||||
* @gl: the glock
|
||||
*
|
||||
* Syncs data (not metadata) for a regular file.
|
||||
* No-op for all other types.
|
||||
*/
|
||||
|
||||
static void gfs2_page_wait(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_inode *ip = gl->gl_object;
|
||||
struct inode *inode = &ip->i_inode;
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
int error;
|
||||
|
||||
if (!S_ISREG(ip->i_di.di_mode))
|
||||
return;
|
||||
|
||||
error = filemap_fdatawait(mapping);
|
||||
|
||||
/* Put back any errors cleared by filemap_fdatawait()
|
||||
so they can be caught by someone who can pass them
|
||||
up to user space. */
|
||||
|
||||
if (error == -ENOSPC)
|
||||
set_bit(AS_ENOSPC, &mapping->flags);
|
||||
else if (error)
|
||||
set_bit(AS_EIO, &mapping->flags);
|
||||
|
||||
}
|
||||
|
||||
static void gfs2_page_writeback(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_inode *ip = gl->gl_object;
|
||||
struct inode *inode = &ip->i_inode;
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
|
||||
if (!S_ISREG(ip->i_di.di_mode))
|
||||
return;
|
||||
|
||||
filemap_fdatawrite(mapping);
|
||||
}
|
||||
|
||||
/**
|
||||
* meta_go_sync - sync out the metadata for this glock
|
||||
* @gl: the glock
|
||||
* @flags: DIO_*
|
||||
*
|
||||
* Called when demoting or unlocking an EX glock. We must flush
|
||||
* to disk all dirty buffers/pages relating to this glock, and must not
|
||||
* not return to caller to demote/unlock the glock until I/O is complete.
|
||||
*/
|
||||
|
||||
static void meta_go_sync(struct gfs2_glock *gl, int flags)
|
||||
{
|
||||
if (!(flags & DIO_METADATA))
|
||||
return;
|
||||
|
||||
if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) {
|
||||
gfs2_log_flush(gl->gl_sbd, gl);
|
||||
gfs2_meta_sync(gl);
|
||||
if (flags & DIO_RELEASE)
|
||||
gfs2_ail_empty_gl(gl);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* meta_go_inval - invalidate the metadata for this glock
|
||||
* @gl: the glock
|
||||
* @flags:
|
||||
*
|
||||
*/
|
||||
|
||||
static void meta_go_inval(struct gfs2_glock *gl, int flags)
|
||||
{
|
||||
if (!(flags & DIO_METADATA))
|
||||
return;
|
||||
|
||||
gfs2_meta_inval(gl);
|
||||
gl->gl_vn++;
|
||||
}
|
||||
|
||||
/**
|
||||
* inode_go_xmote_th - promote/demote a glock
|
||||
* @gl: the glock
|
||||
* @state: the requested state
|
||||
* @flags:
|
||||
*
|
||||
*/
|
||||
|
||||
static void inode_go_xmote_th(struct gfs2_glock *gl, unsigned int state,
|
||||
int flags)
|
||||
{
|
||||
if (gl->gl_state != LM_ST_UNLOCKED)
|
||||
gfs2_pte_inval(gl);
|
||||
gfs2_glock_xmote_th(gl, state, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* inode_go_xmote_bh - After promoting/demoting a glock
|
||||
* @gl: the glock
|
||||
*
|
||||
*/
|
||||
|
||||
static void inode_go_xmote_bh(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_holder *gh = gl->gl_req_gh;
|
||||
struct buffer_head *bh;
|
||||
int error;
|
||||
|
||||
if (gl->gl_state != LM_ST_UNLOCKED &&
|
||||
(!gh || !(gh->gh_flags & GL_SKIP))) {
|
||||
error = gfs2_meta_read(gl, gl->gl_name.ln_number, 0, &bh);
|
||||
if (!error)
|
||||
brelse(bh);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* inode_go_drop_th - unlock a glock
|
||||
* @gl: the glock
|
||||
*
|
||||
* Invoked from rq_demote().
|
||||
* Another node needs the lock in EXCLUSIVE mode, or lock (unused for too long)
|
||||
* is being purged from our node's glock cache; we're dropping lock.
|
||||
*/
|
||||
|
||||
static void inode_go_drop_th(struct gfs2_glock *gl)
|
||||
{
|
||||
gfs2_pte_inval(gl);
|
||||
gfs2_glock_drop_th(gl);
|
||||
}
|
||||
|
||||
/**
|
||||
* inode_go_sync - Sync the dirty data and/or metadata for an inode glock
|
||||
* @gl: the glock protecting the inode
|
||||
* @flags:
|
||||
*
|
||||
*/
|
||||
|
||||
static void inode_go_sync(struct gfs2_glock *gl, int flags)
|
||||
{
|
||||
int meta = (flags & DIO_METADATA);
|
||||
int data = (flags & DIO_DATA);
|
||||
|
||||
if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
|
||||
if (meta && data) {
|
||||
gfs2_page_writeback(gl);
|
||||
gfs2_log_flush(gl->gl_sbd, gl);
|
||||
gfs2_meta_sync(gl);
|
||||
gfs2_page_wait(gl);
|
||||
clear_bit(GLF_DIRTY, &gl->gl_flags);
|
||||
} else if (meta) {
|
||||
gfs2_log_flush(gl->gl_sbd, gl);
|
||||
gfs2_meta_sync(gl);
|
||||
} else if (data) {
|
||||
gfs2_page_writeback(gl);
|
||||
gfs2_page_wait(gl);
|
||||
}
|
||||
if (flags & DIO_RELEASE)
|
||||
gfs2_ail_empty_gl(gl);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* inode_go_inval - prepare a inode glock to be released
|
||||
* @gl: the glock
|
||||
* @flags:
|
||||
*
|
||||
*/
|
||||
|
||||
static void inode_go_inval(struct gfs2_glock *gl, int flags)
|
||||
{
|
||||
int meta = (flags & DIO_METADATA);
|
||||
int data = (flags & DIO_DATA);
|
||||
|
||||
if (meta) {
|
||||
gfs2_meta_inval(gl);
|
||||
gl->gl_vn++;
|
||||
}
|
||||
if (data)
|
||||
gfs2_page_inval(gl);
|
||||
}
|
||||
|
||||
/**
|
||||
* inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
|
||||
* @gl: the glock
|
||||
*
|
||||
* Returns: 1 if it's ok
|
||||
*/
|
||||
|
||||
static int inode_go_demote_ok(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||
int demote = 0;
|
||||
|
||||
if (!gl->gl_object && !gl->gl_aspace->i_mapping->nrpages)
|
||||
demote = 1;
|
||||
else if (!sdp->sd_args.ar_localcaching &&
|
||||
time_after_eq(jiffies, gl->gl_stamp +
|
||||
gfs2_tune_get(sdp, gt_demote_secs) * HZ))
|
||||
demote = 1;
|
||||
|
||||
return demote;
|
||||
}
|
||||
|
||||
/**
|
||||
* inode_go_lock - operation done after an inode lock is locked by a process
|
||||
* @gl: the glock
|
||||
* @flags:
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
static int inode_go_lock(struct gfs2_holder *gh)
|
||||
{
|
||||
struct gfs2_glock *gl = gh->gh_gl;
|
||||
struct gfs2_inode *ip = gl->gl_object;
|
||||
int error = 0;
|
||||
|
||||
if (!ip)
|
||||
return 0;
|
||||
|
||||
if (ip->i_vn != gl->gl_vn) {
|
||||
error = gfs2_inode_refresh(ip);
|
||||
if (error)
|
||||
return error;
|
||||
gfs2_inode_attr_in(ip);
|
||||
}
|
||||
|
||||
if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) &&
|
||||
(gl->gl_state == LM_ST_EXCLUSIVE) &&
|
||||
(gh->gh_flags & GL_LOCAL_EXCL))
|
||||
error = gfs2_truncatei_resume(ip);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* inode_go_unlock - operation done before an inode lock is unlocked by a
|
||||
* process
|
||||
* @gl: the glock
|
||||
* @flags:
|
||||
*
|
||||
*/
|
||||
|
||||
static void inode_go_unlock(struct gfs2_holder *gh)
|
||||
{
|
||||
struct gfs2_glock *gl = gh->gh_gl;
|
||||
struct gfs2_inode *ip = gl->gl_object;
|
||||
|
||||
if (ip == NULL)
|
||||
return;
|
||||
if (test_bit(GLF_DIRTY, &gl->gl_flags))
|
||||
gfs2_inode_attr_in(ip);
|
||||
gfs2_meta_cache_flush(ip);
|
||||
}
|
||||
|
||||
/**
|
||||
* inode_greedy -
|
||||
* @gl: the glock
|
||||
*
|
||||
*/
|
||||
|
||||
static void inode_greedy(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||
struct gfs2_inode *ip = gl->gl_object;
|
||||
unsigned int quantum = gfs2_tune_get(sdp, gt_greedy_quantum);
|
||||
unsigned int max = gfs2_tune_get(sdp, gt_greedy_max);
|
||||
unsigned int new_time;
|
||||
|
||||
spin_lock(&ip->i_spin);
|
||||
|
||||
if (time_after(ip->i_last_pfault + quantum, jiffies)) {
|
||||
new_time = ip->i_greedy + quantum;
|
||||
if (new_time > max)
|
||||
new_time = max;
|
||||
} else {
|
||||
new_time = ip->i_greedy - quantum;
|
||||
if (!new_time || new_time > max)
|
||||
new_time = 1;
|
||||
}
|
||||
|
||||
ip->i_greedy = new_time;
|
||||
|
||||
spin_unlock(&ip->i_spin);
|
||||
|
||||
iput(&ip->i_inode);
|
||||
}
|
||||
|
||||
/**
|
||||
* rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
|
||||
* @gl: the glock
|
||||
*
|
||||
* Returns: 1 if it's ok
|
||||
*/
|
||||
|
||||
static int rgrp_go_demote_ok(struct gfs2_glock *gl)
|
||||
{
|
||||
return !gl->gl_aspace->i_mapping->nrpages;
|
||||
}
|
||||
|
||||
/**
|
||||
* rgrp_go_lock - operation done after an rgrp lock is locked by
|
||||
* a first holder on this node.
|
||||
* @gl: the glock
|
||||
* @flags:
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
static int rgrp_go_lock(struct gfs2_holder *gh)
|
||||
{
|
||||
return gfs2_rgrp_bh_get(gh->gh_gl->gl_object);
|
||||
}
|
||||
|
||||
/**
|
||||
* rgrp_go_unlock - operation done before an rgrp lock is unlocked by
|
||||
* a last holder on this node.
|
||||
* @gl: the glock
|
||||
* @flags:
|
||||
*
|
||||
*/
|
||||
|
||||
static void rgrp_go_unlock(struct gfs2_holder *gh)
|
||||
{
|
||||
gfs2_rgrp_bh_put(gh->gh_gl->gl_object);
|
||||
}
|
||||
|
||||
/**
|
||||
* trans_go_xmote_th - promote/demote the transaction glock
|
||||
* @gl: the glock
|
||||
* @state: the requested state
|
||||
* @flags:
|
||||
*
|
||||
*/
|
||||
|
||||
static void trans_go_xmote_th(struct gfs2_glock *gl, unsigned int state,
|
||||
int flags)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||
|
||||
if (gl->gl_state != LM_ST_UNLOCKED &&
|
||||
test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
|
||||
gfs2_meta_syncfs(sdp);
|
||||
gfs2_log_shutdown(sdp);
|
||||
}
|
||||
|
||||
gfs2_glock_xmote_th(gl, state, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* trans_go_xmote_bh - After promoting/demoting the transaction glock
|
||||
* @gl: the glock
|
||||
*
|
||||
*/
|
||||
|
||||
static void trans_go_xmote_bh(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||
struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
|
||||
struct gfs2_glock *j_gl = ip->i_gl;
|
||||
struct gfs2_log_header head;
|
||||
int error;
|
||||
|
||||
if (gl->gl_state != LM_ST_UNLOCKED &&
|
||||
test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
|
||||
gfs2_meta_cache_flush(GFS2_I(sdp->sd_jdesc->jd_inode));
|
||||
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA | DIO_DATA);
|
||||
|
||||
error = gfs2_find_jhead(sdp->sd_jdesc, &head);
|
||||
if (error)
|
||||
gfs2_consist(sdp);
|
||||
if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
|
||||
gfs2_consist(sdp);
|
||||
|
||||
/* Initialize some head of the log stuff */
|
||||
if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
|
||||
sdp->sd_log_sequence = head.lh_sequence + 1;
|
||||
gfs2_log_pointers_init(sdp, head.lh_blkno);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* trans_go_drop_th - unlock the transaction glock
|
||||
* @gl: the glock
|
||||
*
|
||||
* We want to sync the device even with localcaching. Remember
|
||||
* that localcaching journal replay only marks buffers dirty.
|
||||
*/
|
||||
|
||||
static void trans_go_drop_th(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||
|
||||
if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
|
||||
gfs2_meta_syncfs(sdp);
|
||||
gfs2_log_shutdown(sdp);
|
||||
}
|
||||
|
||||
gfs2_glock_drop_th(gl);
|
||||
}
|
||||
|
||||
/**
|
||||
* quota_go_demote_ok - Check to see if it's ok to unlock a quota glock
|
||||
* @gl: the glock
|
||||
*
|
||||
* Returns: 1 if it's ok
|
||||
*/
|
||||
|
||||
static int quota_go_demote_ok(struct gfs2_glock *gl)
|
||||
{
|
||||
return !atomic_read(&gl->gl_lvb_count);
|
||||
}
|
||||
|
||||
const struct gfs2_glock_operations gfs2_meta_glops = {
|
||||
.go_xmote_th = gfs2_glock_xmote_th,
|
||||
.go_drop_th = gfs2_glock_drop_th,
|
||||
.go_type = LM_TYPE_META,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations gfs2_inode_glops = {
|
||||
.go_xmote_th = inode_go_xmote_th,
|
||||
.go_xmote_bh = inode_go_xmote_bh,
|
||||
.go_drop_th = inode_go_drop_th,
|
||||
.go_sync = inode_go_sync,
|
||||
.go_inval = inode_go_inval,
|
||||
.go_demote_ok = inode_go_demote_ok,
|
||||
.go_lock = inode_go_lock,
|
||||
.go_unlock = inode_go_unlock,
|
||||
.go_greedy = inode_greedy,
|
||||
.go_type = LM_TYPE_INODE,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations gfs2_rgrp_glops = {
|
||||
.go_xmote_th = gfs2_glock_xmote_th,
|
||||
.go_drop_th = gfs2_glock_drop_th,
|
||||
.go_sync = meta_go_sync,
|
||||
.go_inval = meta_go_inval,
|
||||
.go_demote_ok = rgrp_go_demote_ok,
|
||||
.go_lock = rgrp_go_lock,
|
||||
.go_unlock = rgrp_go_unlock,
|
||||
.go_type = LM_TYPE_RGRP,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations gfs2_trans_glops = {
|
||||
.go_xmote_th = trans_go_xmote_th,
|
||||
.go_xmote_bh = trans_go_xmote_bh,
|
||||
.go_drop_th = trans_go_drop_th,
|
||||
.go_type = LM_TYPE_NONDISK,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations gfs2_iopen_glops = {
|
||||
.go_xmote_th = gfs2_glock_xmote_th,
|
||||
.go_drop_th = gfs2_glock_drop_th,
|
||||
.go_type = LM_TYPE_IOPEN,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations gfs2_flock_glops = {
|
||||
.go_xmote_th = gfs2_glock_xmote_th,
|
||||
.go_drop_th = gfs2_glock_drop_th,
|
||||
.go_type = LM_TYPE_FLOCK,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations gfs2_nondisk_glops = {
|
||||
.go_xmote_th = gfs2_glock_xmote_th,
|
||||
.go_drop_th = gfs2_glock_drop_th,
|
||||
.go_type = LM_TYPE_NONDISK,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations gfs2_quota_glops = {
|
||||
.go_xmote_th = gfs2_glock_xmote_th,
|
||||
.go_drop_th = gfs2_glock_drop_th,
|
||||
.go_demote_ok = quota_go_demote_ok,
|
||||
.go_type = LM_TYPE_QUOTA,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations gfs2_journal_glops = {
|
||||
.go_xmote_th = gfs2_glock_xmote_th,
|
||||
.go_drop_th = gfs2_glock_drop_th,
|
||||
.go_type = LM_TYPE_JOURNAL,
|
||||
};
|
||||
|
25
fs/gfs2/glops.h
Normal file
25
fs/gfs2/glops.h
Normal file
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __GLOPS_DOT_H__
|
||||
#define __GLOPS_DOT_H__
|
||||
|
||||
#include "incore.h"
|
||||
|
||||
extern const struct gfs2_glock_operations gfs2_meta_glops;
|
||||
extern const struct gfs2_glock_operations gfs2_inode_glops;
|
||||
extern const struct gfs2_glock_operations gfs2_rgrp_glops;
|
||||
extern const struct gfs2_glock_operations gfs2_trans_glops;
|
||||
extern const struct gfs2_glock_operations gfs2_iopen_glops;
|
||||
extern const struct gfs2_glock_operations gfs2_flock_glops;
|
||||
extern const struct gfs2_glock_operations gfs2_nondisk_glops;
|
||||
extern const struct gfs2_glock_operations gfs2_quota_glops;
|
||||
extern const struct gfs2_glock_operations gfs2_journal_glops;
|
||||
|
||||
#endif /* __GLOPS_DOT_H__ */
|
634
fs/gfs2/incore.h
Normal file
634
fs/gfs2/incore.h
Normal file
|
@ -0,0 +1,634 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __INCORE_DOT_H__
|
||||
#define __INCORE_DOT_H__
|
||||
|
||||
#include <linux/fs.h>
|
||||
|
||||
#define DIO_WAIT 0x00000010
|
||||
#define DIO_METADATA 0x00000020
|
||||
#define DIO_DATA 0x00000040
|
||||
#define DIO_RELEASE 0x00000080
|
||||
#define DIO_ALL 0x00000100
|
||||
|
||||
struct gfs2_log_operations;
|
||||
struct gfs2_log_element;
|
||||
struct gfs2_holder;
|
||||
struct gfs2_glock;
|
||||
struct gfs2_quota_data;
|
||||
struct gfs2_trans;
|
||||
struct gfs2_ail;
|
||||
struct gfs2_jdesc;
|
||||
struct gfs2_sbd;
|
||||
|
||||
typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret);
|
||||
|
||||
/*
|
||||
* Structure of operations that are associated with each
|
||||
* type of element in the log.
|
||||
*/
|
||||
|
||||
struct gfs2_log_operations {
|
||||
void (*lo_add) (struct gfs2_sbd *sdp, struct gfs2_log_element *le);
|
||||
void (*lo_incore_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
|
||||
void (*lo_before_commit) (struct gfs2_sbd *sdp);
|
||||
void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_ail *ai);
|
||||
void (*lo_before_scan) (struct gfs2_jdesc *jd,
|
||||
struct gfs2_log_header *head, int pass);
|
||||
int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start,
|
||||
struct gfs2_log_descriptor *ld, __be64 *ptr,
|
||||
int pass);
|
||||
void (*lo_after_scan) (struct gfs2_jdesc *jd, int error, int pass);
|
||||
const char *lo_name;
|
||||
};
|
||||
|
||||
struct gfs2_log_element {
|
||||
struct list_head le_list;
|
||||
const struct gfs2_log_operations *le_ops;
|
||||
};
|
||||
|
||||
struct gfs2_bitmap {
|
||||
struct buffer_head *bi_bh;
|
||||
char *bi_clone;
|
||||
u32 bi_offset;
|
||||
u32 bi_start;
|
||||
u32 bi_len;
|
||||
};
|
||||
|
||||
struct gfs2_rgrpd {
|
||||
struct list_head rd_list; /* Link with superblock */
|
||||
struct list_head rd_list_mru;
|
||||
struct list_head rd_recent; /* Recently used rgrps */
|
||||
struct gfs2_glock *rd_gl; /* Glock for this rgrp */
|
||||
struct gfs2_rindex rd_ri;
|
||||
struct gfs2_rgrp rd_rg;
|
||||
u64 rd_rg_vn;
|
||||
struct gfs2_bitmap *rd_bits;
|
||||
unsigned int rd_bh_count;
|
||||
struct mutex rd_mutex;
|
||||
u32 rd_free_clone;
|
||||
struct gfs2_log_element rd_le;
|
||||
u32 rd_last_alloc_data;
|
||||
u32 rd_last_alloc_meta;
|
||||
struct gfs2_sbd *rd_sbd;
|
||||
};
|
||||
|
||||
enum gfs2_state_bits {
|
||||
BH_Pinned = BH_PrivateStart,
|
||||
BH_Escaped = BH_PrivateStart + 1,
|
||||
};
|
||||
|
||||
BUFFER_FNS(Pinned, pinned)
|
||||
TAS_BUFFER_FNS(Pinned, pinned)
|
||||
BUFFER_FNS(Escaped, escaped)
|
||||
TAS_BUFFER_FNS(Escaped, escaped)
|
||||
|
||||
struct gfs2_bufdata {
|
||||
struct buffer_head *bd_bh;
|
||||
struct gfs2_glock *bd_gl;
|
||||
|
||||
struct list_head bd_list_tr;
|
||||
struct gfs2_log_element bd_le;
|
||||
|
||||
struct gfs2_ail *bd_ail;
|
||||
struct list_head bd_ail_st_list;
|
||||
struct list_head bd_ail_gl_list;
|
||||
};
|
||||
|
||||
struct gfs2_glock_operations {
|
||||
void (*go_xmote_th) (struct gfs2_glock * gl, unsigned int state,
|
||||
int flags);
|
||||
void (*go_xmote_bh) (struct gfs2_glock * gl);
|
||||
void (*go_drop_th) (struct gfs2_glock * gl);
|
||||
void (*go_drop_bh) (struct gfs2_glock * gl);
|
||||
void (*go_sync) (struct gfs2_glock * gl, int flags);
|
||||
void (*go_inval) (struct gfs2_glock * gl, int flags);
|
||||
int (*go_demote_ok) (struct gfs2_glock * gl);
|
||||
int (*go_lock) (struct gfs2_holder * gh);
|
||||
void (*go_unlock) (struct gfs2_holder * gh);
|
||||
void (*go_callback) (struct gfs2_glock * gl, unsigned int state);
|
||||
void (*go_greedy) (struct gfs2_glock * gl);
|
||||
const int go_type;
|
||||
};
|
||||
|
||||
enum {
|
||||
/* Actions */
|
||||
HIF_MUTEX = 0,
|
||||
HIF_PROMOTE = 1,
|
||||
HIF_DEMOTE = 2,
|
||||
HIF_GREEDY = 3,
|
||||
|
||||
/* States */
|
||||
HIF_ALLOCED = 4,
|
||||
HIF_DEALLOC = 5,
|
||||
HIF_HOLDER = 6,
|
||||
HIF_FIRST = 7,
|
||||
HIF_ABORTED = 9,
|
||||
};
|
||||
|
||||
struct gfs2_holder {
|
||||
struct list_head gh_list;
|
||||
|
||||
struct gfs2_glock *gh_gl;
|
||||
struct task_struct *gh_owner;
|
||||
unsigned int gh_state;
|
||||
unsigned gh_flags;
|
||||
|
||||
int gh_error;
|
||||
unsigned long gh_iflags;
|
||||
struct completion gh_wait;
|
||||
unsigned long gh_ip;
|
||||
};
|
||||
|
||||
enum {
|
||||
GLF_LOCK = 1,
|
||||
GLF_STICKY = 2,
|
||||
GLF_PREFETCH = 3,
|
||||
GLF_DIRTY = 5,
|
||||
GLF_SKIP_WAITERS2 = 6,
|
||||
GLF_GREEDY = 7,
|
||||
};
|
||||
|
||||
struct gfs2_glock {
|
||||
struct hlist_node gl_list;
|
||||
unsigned long gl_flags; /* GLF_... */
|
||||
struct lm_lockname gl_name;
|
||||
atomic_t gl_ref;
|
||||
|
||||
spinlock_t gl_spin;
|
||||
|
||||
unsigned int gl_state;
|
||||
unsigned int gl_hash;
|
||||
struct task_struct *gl_owner;
|
||||
unsigned long gl_ip;
|
||||
struct list_head gl_holders;
|
||||
struct list_head gl_waiters1; /* HIF_MUTEX */
|
||||
struct list_head gl_waiters2; /* HIF_DEMOTE, HIF_GREEDY */
|
||||
struct list_head gl_waiters3; /* HIF_PROMOTE */
|
||||
|
||||
const struct gfs2_glock_operations *gl_ops;
|
||||
|
||||
struct gfs2_holder *gl_req_gh;
|
||||
gfs2_glop_bh_t gl_req_bh;
|
||||
|
||||
void *gl_lock;
|
||||
char *gl_lvb;
|
||||
atomic_t gl_lvb_count;
|
||||
|
||||
u64 gl_vn;
|
||||
unsigned long gl_stamp;
|
||||
void *gl_object;
|
||||
|
||||
struct list_head gl_reclaim;
|
||||
|
||||
struct gfs2_sbd *gl_sbd;
|
||||
|
||||
struct inode *gl_aspace;
|
||||
struct gfs2_log_element gl_le;
|
||||
struct list_head gl_ail_list;
|
||||
atomic_t gl_ail_count;
|
||||
};
|
||||
|
||||
struct gfs2_alloc {
|
||||
/* Quota stuff */
|
||||
|
||||
struct gfs2_quota_data *al_qd[2*MAXQUOTAS];
|
||||
struct gfs2_holder al_qd_ghs[2*MAXQUOTAS];
|
||||
unsigned int al_qd_num;
|
||||
|
||||
u32 al_requested; /* Filled in by caller of gfs2_inplace_reserve() */
|
||||
u32 al_alloced; /* Filled in by gfs2_alloc_*() */
|
||||
|
||||
/* Filled in by gfs2_inplace_reserve() */
|
||||
|
||||
unsigned int al_line;
|
||||
char *al_file;
|
||||
struct gfs2_holder al_ri_gh;
|
||||
struct gfs2_holder al_rgd_gh;
|
||||
struct gfs2_rgrpd *al_rgd;
|
||||
|
||||
};
|
||||
|
||||
enum {
|
||||
GIF_QD_LOCKED = 1,
|
||||
GIF_PAGED = 2,
|
||||
GIF_SW_PAGED = 3,
|
||||
};
|
||||
|
||||
struct gfs2_inode {
|
||||
struct inode i_inode;
|
||||
struct gfs2_inum i_num;
|
||||
|
||||
unsigned long i_flags; /* GIF_... */
|
||||
|
||||
u64 i_vn;
|
||||
struct gfs2_dinode i_di; /* To be replaced by ref to block */
|
||||
|
||||
struct gfs2_glock *i_gl; /* Move into i_gh? */
|
||||
struct gfs2_holder i_iopen_gh;
|
||||
struct gfs2_holder i_gh; /* for prepare/commit_write only */
|
||||
struct gfs2_alloc i_alloc;
|
||||
u64 i_last_rg_alloc;
|
||||
|
||||
spinlock_t i_spin;
|
||||
struct rw_semaphore i_rw_mutex;
|
||||
unsigned int i_greedy;
|
||||
unsigned long i_last_pfault;
|
||||
|
||||
struct buffer_head *i_cache[GFS2_MAX_META_HEIGHT];
|
||||
};
|
||||
|
||||
/*
|
||||
* Since i_inode is the first element of struct gfs2_inode,
|
||||
* this is effectively a cast.
|
||||
*/
|
||||
static inline struct gfs2_inode *GFS2_I(struct inode *inode)
|
||||
{
|
||||
return container_of(inode, struct gfs2_inode, i_inode);
|
||||
}
|
||||
|
||||
/* To be removed? */
|
||||
static inline struct gfs2_sbd *GFS2_SB(struct inode *inode)
|
||||
{
|
||||
return inode->i_sb->s_fs_info;
|
||||
}
|
||||
|
||||
enum {
|
||||
GFF_DID_DIRECT_ALLOC = 0,
|
||||
GFF_EXLOCK = 1,
|
||||
};
|
||||
|
||||
struct gfs2_file {
|
||||
unsigned long f_flags; /* GFF_... */
|
||||
struct mutex f_fl_mutex;
|
||||
struct gfs2_holder f_fl_gh;
|
||||
};
|
||||
|
||||
struct gfs2_revoke {
|
||||
struct gfs2_log_element rv_le;
|
||||
u64 rv_blkno;
|
||||
};
|
||||
|
||||
struct gfs2_revoke_replay {
|
||||
struct list_head rr_list;
|
||||
u64 rr_blkno;
|
||||
unsigned int rr_where;
|
||||
};
|
||||
|
||||
enum {
|
||||
QDF_USER = 0,
|
||||
QDF_CHANGE = 1,
|
||||
QDF_LOCKED = 2,
|
||||
};
|
||||
|
||||
struct gfs2_quota_lvb {
|
||||
__be32 qb_magic;
|
||||
u32 __pad;
|
||||
__be64 qb_limit; /* Hard limit of # blocks to alloc */
|
||||
__be64 qb_warn; /* Warn user when alloc is above this # */
|
||||
__be64 qb_value; /* Current # blocks allocated */
|
||||
};
|
||||
|
||||
struct gfs2_quota_data {
|
||||
struct list_head qd_list;
|
||||
unsigned int qd_count;
|
||||
|
||||
u32 qd_id;
|
||||
unsigned long qd_flags; /* QDF_... */
|
||||
|
||||
s64 qd_change;
|
||||
s64 qd_change_sync;
|
||||
|
||||
unsigned int qd_slot;
|
||||
unsigned int qd_slot_count;
|
||||
|
||||
struct buffer_head *qd_bh;
|
||||
struct gfs2_quota_change *qd_bh_qc;
|
||||
unsigned int qd_bh_count;
|
||||
|
||||
struct gfs2_glock *qd_gl;
|
||||
struct gfs2_quota_lvb qd_qb;
|
||||
|
||||
u64 qd_sync_gen;
|
||||
unsigned long qd_last_warn;
|
||||
unsigned long qd_last_touched;
|
||||
};
|
||||
|
||||
struct gfs2_log_buf {
|
||||
struct list_head lb_list;
|
||||
struct buffer_head *lb_bh;
|
||||
struct buffer_head *lb_real;
|
||||
};
|
||||
|
||||
struct gfs2_trans {
|
||||
unsigned long tr_ip;
|
||||
|
||||
unsigned int tr_blocks;
|
||||
unsigned int tr_revokes;
|
||||
unsigned int tr_reserved;
|
||||
|
||||
struct gfs2_holder tr_t_gh;
|
||||
|
||||
int tr_touched;
|
||||
|
||||
unsigned int tr_num_buf;
|
||||
unsigned int tr_num_buf_new;
|
||||
unsigned int tr_num_buf_rm;
|
||||
struct list_head tr_list_buf;
|
||||
|
||||
unsigned int tr_num_revoke;
|
||||
unsigned int tr_num_revoke_rm;
|
||||
};
|
||||
|
||||
struct gfs2_ail {
|
||||
struct list_head ai_list;
|
||||
|
||||
unsigned int ai_first;
|
||||
struct list_head ai_ail1_list;
|
||||
struct list_head ai_ail2_list;
|
||||
|
||||
u64 ai_sync_gen;
|
||||
};
|
||||
|
||||
struct gfs2_jdesc {
|
||||
struct list_head jd_list;
|
||||
|
||||
struct inode *jd_inode;
|
||||
unsigned int jd_jid;
|
||||
int jd_dirty;
|
||||
|
||||
unsigned int jd_blocks;
|
||||
};
|
||||
|
||||
#define GFS2_GLOCKD_DEFAULT 1
|
||||
#define GFS2_GLOCKD_MAX 16
|
||||
|
||||
#define GFS2_QUOTA_DEFAULT GFS2_QUOTA_OFF
|
||||
#define GFS2_QUOTA_OFF 0
|
||||
#define GFS2_QUOTA_ACCOUNT 1
|
||||
#define GFS2_QUOTA_ON 2
|
||||
|
||||
#define GFS2_DATA_DEFAULT GFS2_DATA_ORDERED
|
||||
#define GFS2_DATA_WRITEBACK 1
|
||||
#define GFS2_DATA_ORDERED 2
|
||||
|
||||
struct gfs2_args {
|
||||
char ar_lockproto[GFS2_LOCKNAME_LEN]; /* Name of the Lock Protocol */
|
||||
char ar_locktable[GFS2_LOCKNAME_LEN]; /* Name of the Lock Table */
|
||||
char ar_hostdata[GFS2_LOCKNAME_LEN]; /* Host specific data */
|
||||
int ar_spectator; /* Don't get a journal because we're always RO */
|
||||
int ar_ignore_local_fs; /* Don't optimize even if local_fs is 1 */
|
||||
int ar_localflocks; /* Let the VFS do flock|fcntl locks for us */
|
||||
int ar_localcaching; /* Local-style caching (dangerous on multihost) */
|
||||
int ar_debug; /* Oops on errors instead of trying to be graceful */
|
||||
int ar_upgrade; /* Upgrade ondisk/multihost format */
|
||||
unsigned int ar_num_glockd; /* Number of glockd threads */
|
||||
int ar_posix_acl; /* Enable posix acls */
|
||||
int ar_quota; /* off/account/on */
|
||||
int ar_suiddir; /* suiddir support */
|
||||
int ar_data; /* ordered/writeback */
|
||||
};
|
||||
|
||||
struct gfs2_tune {
|
||||
spinlock_t gt_spin;
|
||||
|
||||
unsigned int gt_ilimit;
|
||||
unsigned int gt_ilimit_tries;
|
||||
unsigned int gt_ilimit_min;
|
||||
unsigned int gt_demote_secs; /* Cache retention for unheld glock */
|
||||
unsigned int gt_incore_log_blocks;
|
||||
unsigned int gt_log_flush_secs;
|
||||
unsigned int gt_jindex_refresh_secs; /* Check for new journal index */
|
||||
|
||||
unsigned int gt_scand_secs;
|
||||
unsigned int gt_recoverd_secs;
|
||||
unsigned int gt_logd_secs;
|
||||
unsigned int gt_quotad_secs;
|
||||
|
||||
unsigned int gt_quota_simul_sync; /* Max quotavals to sync at once */
|
||||
unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */
|
||||
unsigned int gt_quota_scale_num; /* Numerator */
|
||||
unsigned int gt_quota_scale_den; /* Denominator */
|
||||
unsigned int gt_quota_cache_secs;
|
||||
unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
|
||||
unsigned int gt_atime_quantum; /* Min secs between atime updates */
|
||||
unsigned int gt_new_files_jdata;
|
||||
unsigned int gt_new_files_directio;
|
||||
unsigned int gt_max_atomic_write; /* Split big writes into this size */
|
||||
unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
|
||||
unsigned int gt_lockdump_size;
|
||||
unsigned int gt_stall_secs; /* Detects trouble! */
|
||||
unsigned int gt_complain_secs;
|
||||
unsigned int gt_reclaim_limit; /* Max num of glocks in reclaim list */
|
||||
unsigned int gt_entries_per_readdir;
|
||||
unsigned int gt_prefetch_secs; /* Usage window for prefetched glocks */
|
||||
unsigned int gt_greedy_default;
|
||||
unsigned int gt_greedy_quantum;
|
||||
unsigned int gt_greedy_max;
|
||||
unsigned int gt_statfs_quantum;
|
||||
unsigned int gt_statfs_slow;
|
||||
};
|
||||
|
||||
enum {
|
||||
SDF_JOURNAL_CHECKED = 0,
|
||||
SDF_JOURNAL_LIVE = 1,
|
||||
SDF_SHUTDOWN = 2,
|
||||
SDF_NOATIME = 3,
|
||||
};
|
||||
|
||||
#define GFS2_FSNAME_LEN 256
|
||||
|
||||
struct gfs2_sbd {
|
||||
struct super_block *sd_vfs;
|
||||
struct super_block *sd_vfs_meta;
|
||||
struct kobject sd_kobj;
|
||||
unsigned long sd_flags; /* SDF_... */
|
||||
struct gfs2_sb sd_sb;
|
||||
|
||||
/* Constants computed on mount */
|
||||
|
||||
u32 sd_fsb2bb;
|
||||
u32 sd_fsb2bb_shift;
|
||||
u32 sd_diptrs; /* Number of pointers in a dinode */
|
||||
u32 sd_inptrs; /* Number of pointers in a indirect block */
|
||||
u32 sd_jbsize; /* Size of a journaled data block */
|
||||
u32 sd_hash_bsize; /* sizeof(exhash block) */
|
||||
u32 sd_hash_bsize_shift;
|
||||
u32 sd_hash_ptrs; /* Number of pointers in a hash block */
|
||||
u32 sd_qc_per_block;
|
||||
u32 sd_max_dirres; /* Max blocks needed to add a directory entry */
|
||||
u32 sd_max_height; /* Max height of a file's metadata tree */
|
||||
u64 sd_heightsize[GFS2_MAX_META_HEIGHT];
|
||||
u32 sd_max_jheight; /* Max height of journaled file's meta tree */
|
||||
u64 sd_jheightsize[GFS2_MAX_META_HEIGHT];
|
||||
|
||||
struct gfs2_args sd_args; /* Mount arguments */
|
||||
struct gfs2_tune sd_tune; /* Filesystem tuning structure */
|
||||
|
||||
/* Lock Stuff */
|
||||
|
||||
struct lm_lockstruct sd_lockstruct;
|
||||
struct list_head sd_reclaim_list;
|
||||
spinlock_t sd_reclaim_lock;
|
||||
wait_queue_head_t sd_reclaim_wq;
|
||||
atomic_t sd_reclaim_count;
|
||||
struct gfs2_holder sd_live_gh;
|
||||
struct gfs2_glock *sd_rename_gl;
|
||||
struct gfs2_glock *sd_trans_gl;
|
||||
|
||||
/* Inode Stuff */
|
||||
|
||||
struct inode *sd_master_dir;
|
||||
struct inode *sd_jindex;
|
||||
struct inode *sd_inum_inode;
|
||||
struct inode *sd_statfs_inode;
|
||||
struct inode *sd_ir_inode;
|
||||
struct inode *sd_sc_inode;
|
||||
struct inode *sd_qc_inode;
|
||||
struct inode *sd_rindex;
|
||||
struct inode *sd_quota_inode;
|
||||
|
||||
/* Inum stuff */
|
||||
|
||||
struct mutex sd_inum_mutex;
|
||||
|
||||
/* StatFS stuff */
|
||||
|
||||
spinlock_t sd_statfs_spin;
|
||||
struct mutex sd_statfs_mutex;
|
||||
struct gfs2_statfs_change sd_statfs_master;
|
||||
struct gfs2_statfs_change sd_statfs_local;
|
||||
unsigned long sd_statfs_sync_time;
|
||||
|
||||
/* Resource group stuff */
|
||||
|
||||
u64 sd_rindex_vn;
|
||||
spinlock_t sd_rindex_spin;
|
||||
struct mutex sd_rindex_mutex;
|
||||
struct list_head sd_rindex_list;
|
||||
struct list_head sd_rindex_mru_list;
|
||||
struct list_head sd_rindex_recent_list;
|
||||
struct gfs2_rgrpd *sd_rindex_forward;
|
||||
unsigned int sd_rgrps;
|
||||
|
||||
/* Journal index stuff */
|
||||
|
||||
struct list_head sd_jindex_list;
|
||||
spinlock_t sd_jindex_spin;
|
||||
struct mutex sd_jindex_mutex;
|
||||
unsigned int sd_journals;
|
||||
unsigned long sd_jindex_refresh_time;
|
||||
|
||||
struct gfs2_jdesc *sd_jdesc;
|
||||
struct gfs2_holder sd_journal_gh;
|
||||
struct gfs2_holder sd_jinode_gh;
|
||||
|
||||
struct gfs2_holder sd_ir_gh;
|
||||
struct gfs2_holder sd_sc_gh;
|
||||
struct gfs2_holder sd_qc_gh;
|
||||
|
||||
/* Daemon stuff */
|
||||
|
||||
struct task_struct *sd_scand_process;
|
||||
struct task_struct *sd_recoverd_process;
|
||||
struct task_struct *sd_logd_process;
|
||||
struct task_struct *sd_quotad_process;
|
||||
struct task_struct *sd_glockd_process[GFS2_GLOCKD_MAX];
|
||||
unsigned int sd_glockd_num;
|
||||
|
||||
/* Quota stuff */
|
||||
|
||||
struct list_head sd_quota_list;
|
||||
atomic_t sd_quota_count;
|
||||
spinlock_t sd_quota_spin;
|
||||
struct mutex sd_quota_mutex;
|
||||
|
||||
unsigned int sd_quota_slots;
|
||||
unsigned int sd_quota_chunks;
|
||||
unsigned char **sd_quota_bitmap;
|
||||
|
||||
u64 sd_quota_sync_gen;
|
||||
unsigned long sd_quota_sync_time;
|
||||
|
||||
/* Log stuff */
|
||||
|
||||
spinlock_t sd_log_lock;
|
||||
|
||||
unsigned int sd_log_blks_reserved;
|
||||
unsigned int sd_log_commited_buf;
|
||||
unsigned int sd_log_commited_revoke;
|
||||
|
||||
unsigned int sd_log_num_gl;
|
||||
unsigned int sd_log_num_buf;
|
||||
unsigned int sd_log_num_revoke;
|
||||
unsigned int sd_log_num_rg;
|
||||
unsigned int sd_log_num_databuf;
|
||||
unsigned int sd_log_num_jdata;
|
||||
unsigned int sd_log_num_hdrs;
|
||||
|
||||
struct list_head sd_log_le_gl;
|
||||
struct list_head sd_log_le_buf;
|
||||
struct list_head sd_log_le_revoke;
|
||||
struct list_head sd_log_le_rg;
|
||||
struct list_head sd_log_le_databuf;
|
||||
|
||||
unsigned int sd_log_blks_free;
|
||||
struct mutex sd_log_reserve_mutex;
|
||||
|
||||
u64 sd_log_sequence;
|
||||
unsigned int sd_log_head;
|
||||
unsigned int sd_log_tail;
|
||||
int sd_log_idle;
|
||||
|
||||
unsigned long sd_log_flush_time;
|
||||
struct rw_semaphore sd_log_flush_lock;
|
||||
struct list_head sd_log_flush_list;
|
||||
|
||||
unsigned int sd_log_flush_head;
|
||||
u64 sd_log_flush_wrapped;
|
||||
|
||||
struct list_head sd_ail1_list;
|
||||
struct list_head sd_ail2_list;
|
||||
u64 sd_ail_sync_gen;
|
||||
|
||||
/* Replay stuff */
|
||||
|
||||
struct list_head sd_revoke_list;
|
||||
unsigned int sd_replay_tail;
|
||||
|
||||
unsigned int sd_found_blocks;
|
||||
unsigned int sd_found_revokes;
|
||||
unsigned int sd_replayed_blocks;
|
||||
|
||||
/* For quiescing the filesystem */
|
||||
|
||||
struct gfs2_holder sd_freeze_gh;
|
||||
struct mutex sd_freeze_lock;
|
||||
unsigned int sd_freeze_count;
|
||||
|
||||
/* Counters */
|
||||
|
||||
atomic_t sd_glock_count;
|
||||
atomic_t sd_glock_held_count;
|
||||
atomic_t sd_inode_count;
|
||||
atomic_t sd_reclaimed;
|
||||
|
||||
char sd_fsname[GFS2_FSNAME_LEN];
|
||||
char sd_table_name[GFS2_FSNAME_LEN];
|
||||
char sd_proto_name[GFS2_FSNAME_LEN];
|
||||
|
||||
/* Debugging crud */
|
||||
|
||||
unsigned long sd_last_warning;
|
||||
struct vfsmount *sd_gfs2mnt;
|
||||
};
|
||||
|
||||
#endif /* __INCORE_DOT_H__ */
|
||||
|
1379
fs/gfs2/inode.c
Normal file
1379
fs/gfs2/inode.c
Normal file
File diff suppressed because it is too large
Load diff
56
fs/gfs2/inode.h
Normal file
56
fs/gfs2/inode.h
Normal file
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __INODE_DOT_H__
|
||||
#define __INODE_DOT_H__
|
||||
|
||||
static inline int gfs2_is_stuffed(struct gfs2_inode *ip)
|
||||
{
|
||||
return !ip->i_di.di_height;
|
||||
}
|
||||
|
||||
static inline int gfs2_is_jdata(struct gfs2_inode *ip)
|
||||
{
|
||||
return ip->i_di.di_flags & GFS2_DIF_JDATA;
|
||||
}
|
||||
|
||||
static inline int gfs2_is_dir(struct gfs2_inode *ip)
|
||||
{
|
||||
return S_ISDIR(ip->i_di.di_mode);
|
||||
}
|
||||
|
||||
void gfs2_inode_attr_in(struct gfs2_inode *ip);
|
||||
void gfs2_inode_attr_out(struct gfs2_inode *ip);
|
||||
struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum *inum, unsigned type);
|
||||
struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum *inum);
|
||||
|
||||
int gfs2_inode_refresh(struct gfs2_inode *ip);
|
||||
|
||||
int gfs2_dinode_dealloc(struct gfs2_inode *inode);
|
||||
int gfs2_change_nlink(struct gfs2_inode *ip, int diff);
|
||||
struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
|
||||
int is_root, struct nameidata *nd);
|
||||
struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
|
||||
unsigned int mode);
|
||||
int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
|
||||
struct gfs2_inode *ip);
|
||||
int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
|
||||
struct gfs2_inode *ip);
|
||||
int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to);
|
||||
int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len);
|
||||
|
||||
int gfs2_glock_nq_atime(struct gfs2_holder *gh);
|
||||
int gfs2_glock_nq_m_atime(unsigned int num_gh, struct gfs2_holder *ghs);
|
||||
|
||||
int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr);
|
||||
|
||||
struct inode *gfs2_lookup_simple(struct inode *dip, const char *name);
|
||||
|
||||
#endif /* __INODE_DOT_H__ */
|
||||
|
217
fs/gfs2/lm.c
Normal file
217
fs/gfs2/lm.c
Normal file
|
@ -0,0 +1,217 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/lm_interface.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
#include "glock.h"
|
||||
#include "lm.h"
|
||||
#include "super.h"
|
||||
#include "util.h"
|
||||
|
||||
/**
|
||||
* gfs2_lm_mount - mount a locking protocol
|
||||
* @sdp: the filesystem
|
||||
* @args: mount arguements
|
||||
* @silent: if 1, don't complain if the FS isn't a GFS2 fs
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
|
||||
{
|
||||
char *proto = sdp->sd_proto_name;
|
||||
char *table = sdp->sd_table_name;
|
||||
int flags = 0;
|
||||
int error;
|
||||
|
||||
if (sdp->sd_args.ar_spectator)
|
||||
flags |= LM_MFLAG_SPECTATOR;
|
||||
|
||||
fs_info(sdp, "Trying to join cluster \"%s\", \"%s\"\n", proto, table);
|
||||
|
||||
error = gfs2_mount_lockproto(proto, table, sdp->sd_args.ar_hostdata,
|
||||
gfs2_glock_cb, sdp,
|
||||
GFS2_MIN_LVB_SIZE, flags,
|
||||
&sdp->sd_lockstruct, &sdp->sd_kobj);
|
||||
if (error) {
|
||||
fs_info(sdp, "can't mount proto=%s, table=%s, hostdata=%s\n",
|
||||
proto, table, sdp->sd_args.ar_hostdata);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_lockspace) ||
|
||||
gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_ops) ||
|
||||
gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_lvb_size >=
|
||||
GFS2_MIN_LVB_SIZE)) {
|
||||
gfs2_unmount_lockproto(&sdp->sd_lockstruct);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (sdp->sd_args.ar_spectator)
|
||||
snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.s", table);
|
||||
else
|
||||
snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.%u", table,
|
||||
sdp->sd_lockstruct.ls_jid);
|
||||
|
||||
fs_info(sdp, "Joined cluster. Now mounting FS...\n");
|
||||
|
||||
if ((sdp->sd_lockstruct.ls_flags & LM_LSFLAG_LOCAL) &&
|
||||
!sdp->sd_args.ar_ignore_local_fs) {
|
||||
sdp->sd_args.ar_localflocks = 1;
|
||||
sdp->sd_args.ar_localcaching = 1;
|
||||
}
|
||||
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
void gfs2_lm_others_may_mount(struct gfs2_sbd *sdp)
|
||||
{
|
||||
if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
||||
sdp->sd_lockstruct.ls_ops->lm_others_may_mount(
|
||||
sdp->sd_lockstruct.ls_lockspace);
|
||||
}
|
||||
|
||||
void gfs2_lm_unmount(struct gfs2_sbd *sdp)
|
||||
{
|
||||
if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
||||
gfs2_unmount_lockproto(&sdp->sd_lockstruct);
|
||||
}
|
||||
|
||||
int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
if (test_and_set_bit(SDF_SHUTDOWN, &sdp->sd_flags))
|
||||
return 0;
|
||||
|
||||
va_start(args, fmt);
|
||||
vprintk(fmt, args);
|
||||
va_end(args);
|
||||
|
||||
fs_err(sdp, "about to withdraw from the cluster\n");
|
||||
BUG_ON(sdp->sd_args.ar_debug);
|
||||
|
||||
|
||||
fs_err(sdp, "waiting for outstanding I/O\n");
|
||||
|
||||
/* FIXME: suspend dm device so oustanding bio's complete
|
||||
and all further io requests fail */
|
||||
|
||||
fs_err(sdp, "telling LM to withdraw\n");
|
||||
gfs2_withdraw_lockproto(&sdp->sd_lockstruct);
|
||||
fs_err(sdp, "withdrawn\n");
|
||||
dump_stack();
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name,
|
||||
void **lockp)
|
||||
{
|
||||
int error = -EIO;
|
||||
if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
||||
error = sdp->sd_lockstruct.ls_ops->lm_get_lock(
|
||||
sdp->sd_lockstruct.ls_lockspace, name, lockp);
|
||||
return error;
|
||||
}
|
||||
|
||||
void gfs2_lm_put_lock(struct gfs2_sbd *sdp, void *lock)
|
||||
{
|
||||
if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
||||
sdp->sd_lockstruct.ls_ops->lm_put_lock(lock);
|
||||
}
|
||||
|
||||
unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
|
||||
unsigned int cur_state, unsigned int req_state,
|
||||
unsigned int flags)
|
||||
{
|
||||
int ret = 0;
|
||||
if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
||||
ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state,
|
||||
req_state, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
unsigned int gfs2_lm_unlock(struct gfs2_sbd *sdp, void *lock,
|
||||
unsigned int cur_state)
|
||||
{
|
||||
int ret = 0;
|
||||
if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
||||
ret = sdp->sd_lockstruct.ls_ops->lm_unlock(lock, cur_state);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void gfs2_lm_cancel(struct gfs2_sbd *sdp, void *lock)
|
||||
{
|
||||
if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
||||
sdp->sd_lockstruct.ls_ops->lm_cancel(lock);
|
||||
}
|
||||
|
||||
int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp)
|
||||
{
|
||||
int error = -EIO;
|
||||
if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
||||
error = sdp->sd_lockstruct.ls_ops->lm_hold_lvb(lock, lvbp);
|
||||
return error;
|
||||
}
|
||||
|
||||
void gfs2_lm_unhold_lvb(struct gfs2_sbd *sdp, void *lock, char *lvb)
|
||||
{
|
||||
if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
||||
sdp->sd_lockstruct.ls_ops->lm_unhold_lvb(lock, lvb);
|
||||
}
|
||||
|
||||
int gfs2_lm_plock_get(struct gfs2_sbd *sdp, struct lm_lockname *name,
|
||||
struct file *file, struct file_lock *fl)
|
||||
{
|
||||
int error = -EIO;
|
||||
if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
||||
error = sdp->sd_lockstruct.ls_ops->lm_plock_get(
|
||||
sdp->sd_lockstruct.ls_lockspace, name, file, fl);
|
||||
return error;
|
||||
}
|
||||
|
||||
int gfs2_lm_plock(struct gfs2_sbd *sdp, struct lm_lockname *name,
|
||||
struct file *file, int cmd, struct file_lock *fl)
|
||||
{
|
||||
int error = -EIO;
|
||||
if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
||||
error = sdp->sd_lockstruct.ls_ops->lm_plock(
|
||||
sdp->sd_lockstruct.ls_lockspace, name, file, cmd, fl);
|
||||
return error;
|
||||
}
|
||||
|
||||
int gfs2_lm_punlock(struct gfs2_sbd *sdp, struct lm_lockname *name,
|
||||
struct file *file, struct file_lock *fl)
|
||||
{
|
||||
int error = -EIO;
|
||||
if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
||||
error = sdp->sd_lockstruct.ls_ops->lm_punlock(
|
||||
sdp->sd_lockstruct.ls_lockspace, name, file, fl);
|
||||
return error;
|
||||
}
|
||||
|
||||
void gfs2_lm_recovery_done(struct gfs2_sbd *sdp, unsigned int jid,
|
||||
unsigned int message)
|
||||
{
|
||||
if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
||||
sdp->sd_lockstruct.ls_ops->lm_recovery_done(
|
||||
sdp->sd_lockstruct.ls_lockspace, jid, message);
|
||||
}
|
||||
|
42
fs/gfs2/lm.h
Normal file
42
fs/gfs2/lm.h
Normal file
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __LM_DOT_H__
|
||||
#define __LM_DOT_H__
|
||||
|
||||
struct gfs2_sbd;
|
||||
|
||||
#define GFS2_MIN_LVB_SIZE 32
|
||||
|
||||
int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent);
|
||||
void gfs2_lm_others_may_mount(struct gfs2_sbd *sdp);
|
||||
void gfs2_lm_unmount(struct gfs2_sbd *sdp);
|
||||
int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...)
|
||||
__attribute__ ((format(printf, 2, 3)));
|
||||
int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name,
|
||||
void **lockp);
|
||||
void gfs2_lm_put_lock(struct gfs2_sbd *sdp, void *lock);
|
||||
unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
|
||||
unsigned int cur_state, unsigned int req_state,
|
||||
unsigned int flags);
|
||||
unsigned int gfs2_lm_unlock(struct gfs2_sbd *sdp, void *lock,
|
||||
unsigned int cur_state);
|
||||
void gfs2_lm_cancel(struct gfs2_sbd *sdp, void *lock);
|
||||
int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp);
|
||||
void gfs2_lm_unhold_lvb(struct gfs2_sbd *sdp, void *lock, char *lvb);
|
||||
int gfs2_lm_plock_get(struct gfs2_sbd *sdp, struct lm_lockname *name,
|
||||
struct file *file, struct file_lock *fl);
|
||||
int gfs2_lm_plock(struct gfs2_sbd *sdp, struct lm_lockname *name,
|
||||
struct file *file, int cmd, struct file_lock *fl);
|
||||
int gfs2_lm_punlock(struct gfs2_sbd *sdp, struct lm_lockname *name,
|
||||
struct file *file, struct file_lock *fl);
|
||||
void gfs2_lm_recovery_done(struct gfs2_sbd *sdp, unsigned int jid,
|
||||
unsigned int message);
|
||||
|
||||
#endif /* __LM_DOT_H__ */
|
184
fs/gfs2/locking.c
Normal file
184
fs/gfs2/locking.c
Normal file
|
@ -0,0 +1,184 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kmod.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/lm_interface.h>
|
||||
|
||||
struct lmh_wrapper {
|
||||
struct list_head lw_list;
|
||||
const struct lm_lockops *lw_ops;
|
||||
};
|
||||
|
||||
/* List of registered low-level locking protocols. A file system selects one
|
||||
of them by name at mount time, e.g. lock_nolock, lock_dlm. */
|
||||
|
||||
static LIST_HEAD(lmh_list);
|
||||
static DEFINE_MUTEX(lmh_lock);
|
||||
|
||||
/**
|
||||
* gfs2_register_lockproto - Register a low-level locking protocol
|
||||
* @proto: the protocol definition
|
||||
*
|
||||
* Returns: 0 on success, -EXXX on failure
|
||||
*/
|
||||
|
||||
int gfs2_register_lockproto(const struct lm_lockops *proto)
|
||||
{
|
||||
struct lmh_wrapper *lw;
|
||||
|
||||
mutex_lock(&lmh_lock);
|
||||
|
||||
list_for_each_entry(lw, &lmh_list, lw_list) {
|
||||
if (!strcmp(lw->lw_ops->lm_proto_name, proto->lm_proto_name)) {
|
||||
mutex_unlock(&lmh_lock);
|
||||
printk(KERN_INFO "GFS2: protocol %s already exists\n",
|
||||
proto->lm_proto_name);
|
||||
return -EEXIST;
|
||||
}
|
||||
}
|
||||
|
||||
lw = kzalloc(sizeof(struct lmh_wrapper), GFP_KERNEL);
|
||||
if (!lw) {
|
||||
mutex_unlock(&lmh_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
lw->lw_ops = proto;
|
||||
list_add(&lw->lw_list, &lmh_list);
|
||||
|
||||
mutex_unlock(&lmh_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_unregister_lockproto - Unregister a low-level locking protocol
|
||||
* @proto: the protocol definition
|
||||
*
|
||||
*/
|
||||
|
||||
void gfs2_unregister_lockproto(const struct lm_lockops *proto)
|
||||
{
|
||||
struct lmh_wrapper *lw;
|
||||
|
||||
mutex_lock(&lmh_lock);
|
||||
|
||||
list_for_each_entry(lw, &lmh_list, lw_list) {
|
||||
if (!strcmp(lw->lw_ops->lm_proto_name, proto->lm_proto_name)) {
|
||||
list_del(&lw->lw_list);
|
||||
mutex_unlock(&lmh_lock);
|
||||
kfree(lw);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&lmh_lock);
|
||||
|
||||
printk(KERN_WARNING "GFS2: can't unregister lock protocol %s\n",
|
||||
proto->lm_proto_name);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_mount_lockproto - Mount a lock protocol
|
||||
* @proto_name - the name of the protocol
|
||||
* @table_name - the name of the lock space
|
||||
* @host_data - data specific to this host
|
||||
* @cb - the callback to the code using the lock module
|
||||
* @sdp - The GFS2 superblock
|
||||
* @min_lvb_size - the mininum LVB size that the caller can deal with
|
||||
* @flags - LM_MFLAG_*
|
||||
* @lockstruct - a structure returned describing the mount
|
||||
*
|
||||
* Returns: 0 on success, -EXXX on failure
|
||||
*/
|
||||
|
||||
int gfs2_mount_lockproto(char *proto_name, char *table_name, char *host_data,
|
||||
lm_callback_t cb, void *cb_data,
|
||||
unsigned int min_lvb_size, int flags,
|
||||
struct lm_lockstruct *lockstruct,
|
||||
struct kobject *fskobj)
|
||||
{
|
||||
struct lmh_wrapper *lw = NULL;
|
||||
int try = 0;
|
||||
int error, found;
|
||||
|
||||
retry:
|
||||
mutex_lock(&lmh_lock);
|
||||
|
||||
found = 0;
|
||||
list_for_each_entry(lw, &lmh_list, lw_list) {
|
||||
if (!strcmp(lw->lw_ops->lm_proto_name, proto_name)) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
if (!try && capable(CAP_SYS_MODULE)) {
|
||||
try = 1;
|
||||
mutex_unlock(&lmh_lock);
|
||||
request_module(proto_name);
|
||||
goto retry;
|
||||
}
|
||||
printk(KERN_INFO "GFS2: can't find protocol %s\n", proto_name);
|
||||
error = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!try_module_get(lw->lw_ops->lm_owner)) {
|
||||
try = 0;
|
||||
mutex_unlock(&lmh_lock);
|
||||
msleep(1000);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
error = lw->lw_ops->lm_mount(table_name, host_data, cb, cb_data,
|
||||
min_lvb_size, flags, lockstruct, fskobj);
|
||||
if (error)
|
||||
module_put(lw->lw_ops->lm_owner);
|
||||
out:
|
||||
mutex_unlock(&lmh_lock);
|
||||
return error;
|
||||
}
|
||||
|
||||
void gfs2_unmount_lockproto(struct lm_lockstruct *lockstruct)
|
||||
{
|
||||
mutex_lock(&lmh_lock);
|
||||
lockstruct->ls_ops->lm_unmount(lockstruct->ls_lockspace);
|
||||
if (lockstruct->ls_ops->lm_owner)
|
||||
module_put(lockstruct->ls_ops->lm_owner);
|
||||
mutex_unlock(&lmh_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_withdraw_lockproto - abnormally unmount a lock module
|
||||
* @lockstruct: the lockstruct passed into mount
|
||||
*
|
||||
*/
|
||||
|
||||
void gfs2_withdraw_lockproto(struct lm_lockstruct *lockstruct)
|
||||
{
|
||||
mutex_lock(&lmh_lock);
|
||||
lockstruct->ls_ops->lm_withdraw(lockstruct->ls_lockspace);
|
||||
if (lockstruct->ls_ops->lm_owner)
|
||||
module_put(lockstruct->ls_ops->lm_owner);
|
||||
mutex_unlock(&lmh_lock);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(gfs2_register_lockproto);
|
||||
EXPORT_SYMBOL_GPL(gfs2_unregister_lockproto);
|
||||
|
3
fs/gfs2/locking/dlm/Makefile
Normal file
3
fs/gfs2/locking/dlm/Makefile
Normal file
|
@ -0,0 +1,3 @@
|
|||
obj-$(CONFIG_GFS2_FS_LOCKING_DLM) += lock_dlm.o
|
||||
lock_dlm-y := lock.o main.o mount.o sysfs.o thread.o plock.o
|
||||
|
524
fs/gfs2/locking/dlm/lock.c
Normal file
524
fs/gfs2/locking/dlm/lock.c
Normal file
|
@ -0,0 +1,524 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include "lock_dlm.h"
|
||||
|
||||
static char junk_lvb[GDLM_LVB_SIZE];
|
||||
|
||||
static void queue_complete(struct gdlm_lock *lp)
|
||||
{
|
||||
struct gdlm_ls *ls = lp->ls;
|
||||
|
||||
clear_bit(LFL_ACTIVE, &lp->flags);
|
||||
|
||||
spin_lock(&ls->async_lock);
|
||||
list_add_tail(&lp->clist, &ls->complete);
|
||||
spin_unlock(&ls->async_lock);
|
||||
wake_up(&ls->thread_wait);
|
||||
}
|
||||
|
||||
static inline void gdlm_ast(void *astarg)
|
||||
{
|
||||
queue_complete(astarg);
|
||||
}
|
||||
|
||||
static inline void gdlm_bast(void *astarg, int mode)
|
||||
{
|
||||
struct gdlm_lock *lp = astarg;
|
||||
struct gdlm_ls *ls = lp->ls;
|
||||
|
||||
if (!mode) {
|
||||
printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n",
|
||||
lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock(&ls->async_lock);
|
||||
if (!lp->bast_mode) {
|
||||
list_add_tail(&lp->blist, &ls->blocking);
|
||||
lp->bast_mode = mode;
|
||||
} else if (lp->bast_mode < mode)
|
||||
lp->bast_mode = mode;
|
||||
spin_unlock(&ls->async_lock);
|
||||
wake_up(&ls->thread_wait);
|
||||
}
|
||||
|
||||
void gdlm_queue_delayed(struct gdlm_lock *lp)
|
||||
{
|
||||
struct gdlm_ls *ls = lp->ls;
|
||||
|
||||
spin_lock(&ls->async_lock);
|
||||
list_add_tail(&lp->delay_list, &ls->delayed);
|
||||
spin_unlock(&ls->async_lock);
|
||||
}
|
||||
|
||||
/* convert gfs lock-state to dlm lock-mode */
|
||||
|
||||
static s16 make_mode(s16 lmstate)
|
||||
{
|
||||
switch (lmstate) {
|
||||
case LM_ST_UNLOCKED:
|
||||
return DLM_LOCK_NL;
|
||||
case LM_ST_EXCLUSIVE:
|
||||
return DLM_LOCK_EX;
|
||||
case LM_ST_DEFERRED:
|
||||
return DLM_LOCK_CW;
|
||||
case LM_ST_SHARED:
|
||||
return DLM_LOCK_PR;
|
||||
}
|
||||
gdlm_assert(0, "unknown LM state %d", lmstate);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* convert dlm lock-mode to gfs lock-state */
|
||||
|
||||
s16 gdlm_make_lmstate(s16 dlmmode)
|
||||
{
|
||||
switch (dlmmode) {
|
||||
case DLM_LOCK_IV:
|
||||
case DLM_LOCK_NL:
|
||||
return LM_ST_UNLOCKED;
|
||||
case DLM_LOCK_EX:
|
||||
return LM_ST_EXCLUSIVE;
|
||||
case DLM_LOCK_CW:
|
||||
return LM_ST_DEFERRED;
|
||||
case DLM_LOCK_PR:
|
||||
return LM_ST_SHARED;
|
||||
}
|
||||
gdlm_assert(0, "unknown DLM mode %d", dlmmode);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and
|
||||
DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */
|
||||
|
||||
static void check_cur_state(struct gdlm_lock *lp, unsigned int cur_state)
|
||||
{
|
||||
s16 cur = make_mode(cur_state);
|
||||
if (lp->cur != DLM_LOCK_IV)
|
||||
gdlm_assert(lp->cur == cur, "%d, %d", lp->cur, cur);
|
||||
}
|
||||
|
||||
static inline unsigned int make_flags(struct gdlm_lock *lp,
|
||||
unsigned int gfs_flags,
|
||||
s16 cur, s16 req)
|
||||
{
|
||||
unsigned int lkf = 0;
|
||||
|
||||
if (gfs_flags & LM_FLAG_TRY)
|
||||
lkf |= DLM_LKF_NOQUEUE;
|
||||
|
||||
if (gfs_flags & LM_FLAG_TRY_1CB) {
|
||||
lkf |= DLM_LKF_NOQUEUE;
|
||||
lkf |= DLM_LKF_NOQUEUEBAST;
|
||||
}
|
||||
|
||||
if (gfs_flags & LM_FLAG_PRIORITY) {
|
||||
lkf |= DLM_LKF_NOORDER;
|
||||
lkf |= DLM_LKF_HEADQUE;
|
||||
}
|
||||
|
||||
if (gfs_flags & LM_FLAG_ANY) {
|
||||
if (req == DLM_LOCK_PR)
|
||||
lkf |= DLM_LKF_ALTCW;
|
||||
else if (req == DLM_LOCK_CW)
|
||||
lkf |= DLM_LKF_ALTPR;
|
||||
}
|
||||
|
||||
if (lp->lksb.sb_lkid != 0) {
|
||||
lkf |= DLM_LKF_CONVERT;
|
||||
|
||||
/* Conversion deadlock avoidance by DLM */
|
||||
|
||||
if (!test_bit(LFL_FORCE_PROMOTE, &lp->flags) &&
|
||||
!(lkf & DLM_LKF_NOQUEUE) &&
|
||||
cur > DLM_LOCK_NL && req > DLM_LOCK_NL && cur != req)
|
||||
lkf |= DLM_LKF_CONVDEADLK;
|
||||
}
|
||||
|
||||
if (lp->lvb)
|
||||
lkf |= DLM_LKF_VALBLK;
|
||||
|
||||
return lkf;
|
||||
}
|
||||
|
||||
/* make_strname - convert GFS lock numbers to a string */
|
||||
|
||||
static inline void make_strname(struct lm_lockname *lockname,
|
||||
struct gdlm_strname *str)
|
||||
{
|
||||
sprintf(str->name, "%8x%16llx", lockname->ln_type,
|
||||
(unsigned long long)lockname->ln_number);
|
||||
str->namelen = GDLM_STRNAME_BYTES;
|
||||
}
|
||||
|
||||
static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
|
||||
struct gdlm_lock **lpp)
|
||||
{
|
||||
struct gdlm_lock *lp;
|
||||
|
||||
lp = kzalloc(sizeof(struct gdlm_lock), GFP_KERNEL);
|
||||
if (!lp)
|
||||
return -ENOMEM;
|
||||
|
||||
lp->lockname = *name;
|
||||
lp->ls = ls;
|
||||
lp->cur = DLM_LOCK_IV;
|
||||
lp->lvb = NULL;
|
||||
lp->hold_null = NULL;
|
||||
init_completion(&lp->ast_wait);
|
||||
INIT_LIST_HEAD(&lp->clist);
|
||||
INIT_LIST_HEAD(&lp->blist);
|
||||
INIT_LIST_HEAD(&lp->delay_list);
|
||||
|
||||
spin_lock(&ls->async_lock);
|
||||
list_add(&lp->all_list, &ls->all_locks);
|
||||
ls->all_locks_count++;
|
||||
spin_unlock(&ls->async_lock);
|
||||
|
||||
*lpp = lp;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void gdlm_delete_lp(struct gdlm_lock *lp)
|
||||
{
|
||||
struct gdlm_ls *ls = lp->ls;
|
||||
|
||||
spin_lock(&ls->async_lock);
|
||||
if (!list_empty(&lp->clist))
|
||||
list_del_init(&lp->clist);
|
||||
if (!list_empty(&lp->blist))
|
||||
list_del_init(&lp->blist);
|
||||
if (!list_empty(&lp->delay_list))
|
||||
list_del_init(&lp->delay_list);
|
||||
gdlm_assert(!list_empty(&lp->all_list), "%x,%llx", lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number);
|
||||
list_del_init(&lp->all_list);
|
||||
ls->all_locks_count--;
|
||||
spin_unlock(&ls->async_lock);
|
||||
|
||||
kfree(lp);
|
||||
}
|
||||
|
||||
int gdlm_get_lock(void *lockspace, struct lm_lockname *name,
|
||||
void **lockp)
|
||||
{
|
||||
struct gdlm_lock *lp;
|
||||
int error;
|
||||
|
||||
error = gdlm_create_lp(lockspace, name, &lp);
|
||||
|
||||
*lockp = lp;
|
||||
return error;
|
||||
}
|
||||
|
||||
void gdlm_put_lock(void *lock)
|
||||
{
|
||||
gdlm_delete_lp(lock);
|
||||
}
|
||||
|
||||
unsigned int gdlm_do_lock(struct gdlm_lock *lp)
|
||||
{
|
||||
struct gdlm_ls *ls = lp->ls;
|
||||
struct gdlm_strname str;
|
||||
int error, bast = 1;
|
||||
|
||||
/*
|
||||
* When recovery is in progress, delay lock requests for submission
|
||||
* once recovery is done. Requests for recovery (NOEXP) and unlocks
|
||||
* can pass.
|
||||
*/
|
||||
|
||||
if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
|
||||
!test_bit(LFL_NOBLOCK, &lp->flags) && lp->req != DLM_LOCK_NL) {
|
||||
gdlm_queue_delayed(lp);
|
||||
return LM_OUT_ASYNC;
|
||||
}
|
||||
|
||||
/*
|
||||
* Submit the actual lock request.
|
||||
*/
|
||||
|
||||
if (test_bit(LFL_NOBAST, &lp->flags))
|
||||
bast = 0;
|
||||
|
||||
make_strname(&lp->lockname, &str);
|
||||
|
||||
set_bit(LFL_ACTIVE, &lp->flags);
|
||||
|
||||
log_debug("lk %x,%llx id %x %d,%d %x", lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number, lp->lksb.sb_lkid,
|
||||
lp->cur, lp->req, lp->lkf);
|
||||
|
||||
error = dlm_lock(ls->dlm_lockspace, lp->req, &lp->lksb, lp->lkf,
|
||||
str.name, str.namelen, 0, gdlm_ast, lp,
|
||||
bast ? gdlm_bast : NULL);
|
||||
|
||||
if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) {
|
||||
lp->lksb.sb_status = -EAGAIN;
|
||||
queue_complete(lp);
|
||||
error = 0;
|
||||
}
|
||||
|
||||
if (error) {
|
||||
log_debug("%s: gdlm_lock %x,%llx err=%d cur=%d req=%d lkf=%x "
|
||||
"flags=%lx", ls->fsname, lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number, error,
|
||||
lp->cur, lp->req, lp->lkf, lp->flags);
|
||||
return LM_OUT_ERROR;
|
||||
}
|
||||
return LM_OUT_ASYNC;
|
||||
}
|
||||
|
||||
static unsigned int gdlm_do_unlock(struct gdlm_lock *lp)
|
||||
{
|
||||
struct gdlm_ls *ls = lp->ls;
|
||||
unsigned int lkf = 0;
|
||||
int error;
|
||||
|
||||
set_bit(LFL_DLM_UNLOCK, &lp->flags);
|
||||
set_bit(LFL_ACTIVE, &lp->flags);
|
||||
|
||||
if (lp->lvb)
|
||||
lkf = DLM_LKF_VALBLK;
|
||||
|
||||
log_debug("un %x,%llx %x %d %x", lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number,
|
||||
lp->lksb.sb_lkid, lp->cur, lkf);
|
||||
|
||||
error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, lkf, NULL, lp);
|
||||
|
||||
if (error) {
|
||||
log_debug("%s: gdlm_unlock %x,%llx err=%d cur=%d req=%d lkf=%x "
|
||||
"flags=%lx", ls->fsname, lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number, error,
|
||||
lp->cur, lp->req, lp->lkf, lp->flags);
|
||||
return LM_OUT_ERROR;
|
||||
}
|
||||
return LM_OUT_ASYNC;
|
||||
}
|
||||
|
||||
unsigned int gdlm_lock(void *lock, unsigned int cur_state,
|
||||
unsigned int req_state, unsigned int flags)
|
||||
{
|
||||
struct gdlm_lock *lp = lock;
|
||||
|
||||
clear_bit(LFL_DLM_CANCEL, &lp->flags);
|
||||
if (flags & LM_FLAG_NOEXP)
|
||||
set_bit(LFL_NOBLOCK, &lp->flags);
|
||||
|
||||
check_cur_state(lp, cur_state);
|
||||
lp->req = make_mode(req_state);
|
||||
lp->lkf = make_flags(lp, flags, lp->cur, lp->req);
|
||||
|
||||
return gdlm_do_lock(lp);
|
||||
}
|
||||
|
||||
unsigned int gdlm_unlock(void *lock, unsigned int cur_state)
|
||||
{
|
||||
struct gdlm_lock *lp = lock;
|
||||
|
||||
clear_bit(LFL_DLM_CANCEL, &lp->flags);
|
||||
if (lp->cur == DLM_LOCK_IV)
|
||||
return 0;
|
||||
return gdlm_do_unlock(lp);
|
||||
}
|
||||
|
||||
void gdlm_cancel(void *lock)
|
||||
{
|
||||
struct gdlm_lock *lp = lock;
|
||||
struct gdlm_ls *ls = lp->ls;
|
||||
int error, delay_list = 0;
|
||||
|
||||
if (test_bit(LFL_DLM_CANCEL, &lp->flags))
|
||||
return;
|
||||
|
||||
log_info("gdlm_cancel %x,%llx flags %lx", lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number, lp->flags);
|
||||
|
||||
spin_lock(&ls->async_lock);
|
||||
if (!list_empty(&lp->delay_list)) {
|
||||
list_del_init(&lp->delay_list);
|
||||
delay_list = 1;
|
||||
}
|
||||
spin_unlock(&ls->async_lock);
|
||||
|
||||
if (delay_list) {
|
||||
set_bit(LFL_CANCEL, &lp->flags);
|
||||
set_bit(LFL_ACTIVE, &lp->flags);
|
||||
queue_complete(lp);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!test_bit(LFL_ACTIVE, &lp->flags) ||
|
||||
test_bit(LFL_DLM_UNLOCK, &lp->flags)) {
|
||||
log_info("gdlm_cancel skip %x,%llx flags %lx",
|
||||
lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number, lp->flags);
|
||||
return;
|
||||
}
|
||||
|
||||
/* the lock is blocked in the dlm */
|
||||
|
||||
set_bit(LFL_DLM_CANCEL, &lp->flags);
|
||||
set_bit(LFL_ACTIVE, &lp->flags);
|
||||
|
||||
error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, DLM_LKF_CANCEL,
|
||||
NULL, lp);
|
||||
|
||||
log_info("gdlm_cancel rv %d %x,%llx flags %lx", error,
|
||||
lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number, lp->flags);
|
||||
|
||||
if (error == -EBUSY)
|
||||
clear_bit(LFL_DLM_CANCEL, &lp->flags);
|
||||
}
|
||||
|
||||
static int gdlm_add_lvb(struct gdlm_lock *lp)
|
||||
{
|
||||
char *lvb;
|
||||
|
||||
lvb = kzalloc(GDLM_LVB_SIZE, GFP_KERNEL);
|
||||
if (!lvb)
|
||||
return -ENOMEM;
|
||||
|
||||
lp->lksb.sb_lvbptr = lvb;
|
||||
lp->lvb = lvb;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gdlm_del_lvb(struct gdlm_lock *lp)
|
||||
{
|
||||
kfree(lp->lvb);
|
||||
lp->lvb = NULL;
|
||||
lp->lksb.sb_lvbptr = NULL;
|
||||
}
|
||||
|
||||
/* This can do a synchronous dlm request (requiring a lock_dlm thread to get
|
||||
the completion) because gfs won't call hold_lvb() during a callback (from
|
||||
the context of a lock_dlm thread). */
|
||||
|
||||
static int hold_null_lock(struct gdlm_lock *lp)
|
||||
{
|
||||
struct gdlm_lock *lpn = NULL;
|
||||
int error;
|
||||
|
||||
if (lp->hold_null) {
|
||||
printk(KERN_INFO "lock_dlm: lvb already held\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
error = gdlm_create_lp(lp->ls, &lp->lockname, &lpn);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
lpn->lksb.sb_lvbptr = junk_lvb;
|
||||
lpn->lvb = junk_lvb;
|
||||
|
||||
lpn->req = DLM_LOCK_NL;
|
||||
lpn->lkf = DLM_LKF_VALBLK | DLM_LKF_EXPEDITE;
|
||||
set_bit(LFL_NOBAST, &lpn->flags);
|
||||
set_bit(LFL_INLOCK, &lpn->flags);
|
||||
|
||||
init_completion(&lpn->ast_wait);
|
||||
gdlm_do_lock(lpn);
|
||||
wait_for_completion(&lpn->ast_wait);
|
||||
error = lpn->lksb.sb_status;
|
||||
if (error) {
|
||||
printk(KERN_INFO "lock_dlm: hold_null_lock dlm error %d\n",
|
||||
error);
|
||||
gdlm_delete_lp(lpn);
|
||||
lpn = NULL;
|
||||
}
|
||||
out:
|
||||
lp->hold_null = lpn;
|
||||
return error;
|
||||
}
|
||||
|
||||
/* This cannot do a synchronous dlm request (requiring a lock_dlm thread to get
|
||||
the completion) because gfs may call unhold_lvb() during a callback (from
|
||||
the context of a lock_dlm thread) which could cause a deadlock since the
|
||||
other lock_dlm thread could be engaged in recovery. */
|
||||
|
||||
static void unhold_null_lock(struct gdlm_lock *lp)
|
||||
{
|
||||
struct gdlm_lock *lpn = lp->hold_null;
|
||||
|
||||
gdlm_assert(lpn, "%x,%llx", lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number);
|
||||
lpn->lksb.sb_lvbptr = NULL;
|
||||
lpn->lvb = NULL;
|
||||
set_bit(LFL_UNLOCK_DELETE, &lpn->flags);
|
||||
gdlm_do_unlock(lpn);
|
||||
lp->hold_null = NULL;
|
||||
}
|
||||
|
||||
/* Acquire a NL lock because gfs requires the value block to remain
|
||||
intact on the resource while the lvb is "held" even if it's holding no locks
|
||||
on the resource. */
|
||||
|
||||
int gdlm_hold_lvb(void *lock, char **lvbp)
|
||||
{
|
||||
struct gdlm_lock *lp = lock;
|
||||
int error;
|
||||
|
||||
error = gdlm_add_lvb(lp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
*lvbp = lp->lvb;
|
||||
|
||||
error = hold_null_lock(lp);
|
||||
if (error)
|
||||
gdlm_del_lvb(lp);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
void gdlm_unhold_lvb(void *lock, char *lvb)
|
||||
{
|
||||
struct gdlm_lock *lp = lock;
|
||||
|
||||
unhold_null_lock(lp);
|
||||
gdlm_del_lvb(lp);
|
||||
}
|
||||
|
||||
void gdlm_submit_delayed(struct gdlm_ls *ls)
|
||||
{
|
||||
struct gdlm_lock *lp, *safe;
|
||||
|
||||
spin_lock(&ls->async_lock);
|
||||
list_for_each_entry_safe(lp, safe, &ls->delayed, delay_list) {
|
||||
list_del_init(&lp->delay_list);
|
||||
list_add_tail(&lp->delay_list, &ls->submit);
|
||||
}
|
||||
spin_unlock(&ls->async_lock);
|
||||
wake_up(&ls->thread_wait);
|
||||
}
|
||||
|
||||
int gdlm_release_all_locks(struct gdlm_ls *ls)
|
||||
{
|
||||
struct gdlm_lock *lp, *safe;
|
||||
int count = 0;
|
||||
|
||||
spin_lock(&ls->async_lock);
|
||||
list_for_each_entry_safe(lp, safe, &ls->all_locks, all_list) {
|
||||
list_del_init(&lp->all_list);
|
||||
|
||||
if (lp->lvb && lp->lvb != junk_lvb)
|
||||
kfree(lp->lvb);
|
||||
kfree(lp);
|
||||
count++;
|
||||
}
|
||||
spin_unlock(&ls->async_lock);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
187
fs/gfs2/locking/dlm/lock_dlm.h
Normal file
187
fs/gfs2/locking/dlm/lock_dlm.h
Normal file
|
@ -0,0 +1,187 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef LOCK_DLM_DOT_H
|
||||
#define LOCK_DLM_DOT_H
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/socket.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/fcntl.h>
|
||||
#include <linux/wait.h>
|
||||
#include <net/sock.h>
|
||||
|
||||
#include <linux/dlm.h>
|
||||
#include <linux/lm_interface.h>
|
||||
|
||||
/*
|
||||
* Internally, we prefix things with gdlm_ and GDLM_ (for gfs-dlm) since a
|
||||
* prefix of lock_dlm_ gets awkward. Externally, GFS refers to this module
|
||||
* as "lock_dlm".
|
||||
*/
|
||||
|
||||
#define GDLM_STRNAME_BYTES 24
|
||||
#define GDLM_LVB_SIZE 32
|
||||
#define GDLM_DROP_COUNT 50000
|
||||
#define GDLM_DROP_PERIOD 60
|
||||
#define GDLM_NAME_LEN 128
|
||||
|
||||
/* GFS uses 12 bytes to identify a resource (32 bit type + 64 bit number).
|
||||
We sprintf these numbers into a 24 byte string of hex values to make them
|
||||
human-readable (to make debugging simpler.) */
|
||||
|
||||
struct gdlm_strname {
|
||||
unsigned char name[GDLM_STRNAME_BYTES];
|
||||
unsigned short namelen;
|
||||
};
|
||||
|
||||
enum {
|
||||
DFL_BLOCK_LOCKS = 0,
|
||||
DFL_SPECTATOR = 1,
|
||||
DFL_WITHDRAW = 2,
|
||||
};
|
||||
|
||||
struct gdlm_ls {
|
||||
u32 id;
|
||||
int jid;
|
||||
int first;
|
||||
int first_done;
|
||||
unsigned long flags;
|
||||
struct kobject kobj;
|
||||
char clustername[GDLM_NAME_LEN];
|
||||
char fsname[GDLM_NAME_LEN];
|
||||
int fsflags;
|
||||
dlm_lockspace_t *dlm_lockspace;
|
||||
lm_callback_t fscb;
|
||||
struct gfs2_sbd *sdp;
|
||||
int recover_jid;
|
||||
int recover_jid_done;
|
||||
int recover_jid_status;
|
||||
spinlock_t async_lock;
|
||||
struct list_head complete;
|
||||
struct list_head blocking;
|
||||
struct list_head delayed;
|
||||
struct list_head submit;
|
||||
struct list_head all_locks;
|
||||
u32 all_locks_count;
|
||||
wait_queue_head_t wait_control;
|
||||
struct task_struct *thread1;
|
||||
struct task_struct *thread2;
|
||||
wait_queue_head_t thread_wait;
|
||||
unsigned long drop_time;
|
||||
int drop_locks_count;
|
||||
int drop_locks_period;
|
||||
};
|
||||
|
||||
enum {
|
||||
LFL_NOBLOCK = 0,
|
||||
LFL_NOCACHE = 1,
|
||||
LFL_DLM_UNLOCK = 2,
|
||||
LFL_DLM_CANCEL = 3,
|
||||
LFL_SYNC_LVB = 4,
|
||||
LFL_FORCE_PROMOTE = 5,
|
||||
LFL_REREQUEST = 6,
|
||||
LFL_ACTIVE = 7,
|
||||
LFL_INLOCK = 8,
|
||||
LFL_CANCEL = 9,
|
||||
LFL_NOBAST = 10,
|
||||
LFL_HEADQUE = 11,
|
||||
LFL_UNLOCK_DELETE = 12,
|
||||
};
|
||||
|
||||
struct gdlm_lock {
|
||||
struct gdlm_ls *ls;
|
||||
struct lm_lockname lockname;
|
||||
char *lvb;
|
||||
struct dlm_lksb lksb;
|
||||
|
||||
s16 cur;
|
||||
s16 req;
|
||||
s16 prev_req;
|
||||
u32 lkf; /* dlm flags DLM_LKF_ */
|
||||
unsigned long flags; /* lock_dlm flags LFL_ */
|
||||
|
||||
int bast_mode; /* protected by async_lock */
|
||||
struct completion ast_wait;
|
||||
|
||||
struct list_head clist; /* complete */
|
||||
struct list_head blist; /* blocking */
|
||||
struct list_head delay_list; /* delayed */
|
||||
struct list_head all_list; /* all locks for the fs */
|
||||
struct gdlm_lock *hold_null; /* NL lock for hold_lvb */
|
||||
};
|
||||
|
||||
#define gdlm_assert(assertion, fmt, args...) \
|
||||
do { \
|
||||
if (unlikely(!(assertion))) { \
|
||||
printk(KERN_EMERG "lock_dlm: fatal assertion failed \"%s\"\n" \
|
||||
"lock_dlm: " fmt "\n", \
|
||||
#assertion, ##args); \
|
||||
BUG(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define log_print(lev, fmt, arg...) printk(lev "lock_dlm: " fmt "\n" , ## arg)
|
||||
#define log_info(fmt, arg...) log_print(KERN_INFO , fmt , ## arg)
|
||||
#define log_error(fmt, arg...) log_print(KERN_ERR , fmt , ## arg)
|
||||
#ifdef LOCK_DLM_LOG_DEBUG
|
||||
#define log_debug(fmt, arg...) log_print(KERN_DEBUG , fmt , ## arg)
|
||||
#else
|
||||
#define log_debug(fmt, arg...)
|
||||
#endif
|
||||
|
||||
/* sysfs.c */
|
||||
|
||||
int gdlm_sysfs_init(void);
|
||||
void gdlm_sysfs_exit(void);
|
||||
int gdlm_kobject_setup(struct gdlm_ls *, struct kobject *);
|
||||
void gdlm_kobject_release(struct gdlm_ls *);
|
||||
|
||||
/* thread.c */
|
||||
|
||||
int gdlm_init_threads(struct gdlm_ls *);
|
||||
void gdlm_release_threads(struct gdlm_ls *);
|
||||
|
||||
/* lock.c */
|
||||
|
||||
s16 gdlm_make_lmstate(s16);
|
||||
void gdlm_queue_delayed(struct gdlm_lock *);
|
||||
void gdlm_submit_delayed(struct gdlm_ls *);
|
||||
int gdlm_release_all_locks(struct gdlm_ls *);
|
||||
void gdlm_delete_lp(struct gdlm_lock *);
|
||||
unsigned int gdlm_do_lock(struct gdlm_lock *);
|
||||
|
||||
int gdlm_get_lock(void *, struct lm_lockname *, void **);
|
||||
void gdlm_put_lock(void *);
|
||||
unsigned int gdlm_lock(void *, unsigned int, unsigned int, unsigned int);
|
||||
unsigned int gdlm_unlock(void *, unsigned int);
|
||||
void gdlm_cancel(void *);
|
||||
int gdlm_hold_lvb(void *, char **);
|
||||
void gdlm_unhold_lvb(void *, char *);
|
||||
|
||||
/* plock.c */
|
||||
|
||||
int gdlm_plock_init(void);
|
||||
void gdlm_plock_exit(void);
|
||||
int gdlm_plock(void *, struct lm_lockname *, struct file *, int,
|
||||
struct file_lock *);
|
||||
int gdlm_plock_get(void *, struct lm_lockname *, struct file *,
|
||||
struct file_lock *);
|
||||
int gdlm_punlock(void *, struct lm_lockname *, struct file *,
|
||||
struct file_lock *);
|
||||
#endif
|
||||
|
64
fs/gfs2/locking/dlm/main.c
Normal file
64
fs/gfs2/locking/dlm/main.c
Normal file
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
|
||||
#include "lock_dlm.h"
|
||||
|
||||
extern int gdlm_drop_count;
|
||||
extern int gdlm_drop_period;
|
||||
|
||||
extern struct lm_lockops gdlm_ops;
|
||||
|
||||
static int __init init_lock_dlm(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = gfs2_register_lockproto(&gdlm_ops);
|
||||
if (error) {
|
||||
printk(KERN_WARNING "lock_dlm: can't register protocol: %d\n",
|
||||
error);
|
||||
return error;
|
||||
}
|
||||
|
||||
error = gdlm_sysfs_init();
|
||||
if (error) {
|
||||
gfs2_unregister_lockproto(&gdlm_ops);
|
||||
return error;
|
||||
}
|
||||
|
||||
error = gdlm_plock_init();
|
||||
if (error) {
|
||||
gdlm_sysfs_exit();
|
||||
gfs2_unregister_lockproto(&gdlm_ops);
|
||||
return error;
|
||||
}
|
||||
|
||||
gdlm_drop_count = GDLM_DROP_COUNT;
|
||||
gdlm_drop_period = GDLM_DROP_PERIOD;
|
||||
|
||||
printk(KERN_INFO
|
||||
"Lock_DLM (built %s %s) installed\n", __DATE__, __TIME__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit exit_lock_dlm(void)
|
||||
{
|
||||
gdlm_plock_exit();
|
||||
gdlm_sysfs_exit();
|
||||
gfs2_unregister_lockproto(&gdlm_ops);
|
||||
}
|
||||
|
||||
module_init(init_lock_dlm);
|
||||
module_exit(exit_lock_dlm);
|
||||
|
||||
MODULE_DESCRIPTION("GFS DLM Locking Module");
|
||||
MODULE_AUTHOR("Red Hat, Inc.");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
255
fs/gfs2/locking/dlm/mount.c
Normal file
255
fs/gfs2/locking/dlm/mount.c
Normal file
|
@ -0,0 +1,255 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include "lock_dlm.h"
|
||||
|
||||
int gdlm_drop_count;
|
||||
int gdlm_drop_period;
|
||||
const struct lm_lockops gdlm_ops;
|
||||
|
||||
|
||||
static struct gdlm_ls *init_gdlm(lm_callback_t cb, struct gfs2_sbd *sdp,
|
||||
int flags, char *table_name)
|
||||
{
|
||||
struct gdlm_ls *ls;
|
||||
char buf[256], *p;
|
||||
|
||||
ls = kzalloc(sizeof(struct gdlm_ls), GFP_KERNEL);
|
||||
if (!ls)
|
||||
return NULL;
|
||||
|
||||
ls->drop_locks_count = gdlm_drop_count;
|
||||
ls->drop_locks_period = gdlm_drop_period;
|
||||
ls->fscb = cb;
|
||||
ls->sdp = sdp;
|
||||
ls->fsflags = flags;
|
||||
spin_lock_init(&ls->async_lock);
|
||||
INIT_LIST_HEAD(&ls->complete);
|
||||
INIT_LIST_HEAD(&ls->blocking);
|
||||
INIT_LIST_HEAD(&ls->delayed);
|
||||
INIT_LIST_HEAD(&ls->submit);
|
||||
INIT_LIST_HEAD(&ls->all_locks);
|
||||
init_waitqueue_head(&ls->thread_wait);
|
||||
init_waitqueue_head(&ls->wait_control);
|
||||
ls->thread1 = NULL;
|
||||
ls->thread2 = NULL;
|
||||
ls->drop_time = jiffies;
|
||||
ls->jid = -1;
|
||||
|
||||
strncpy(buf, table_name, 256);
|
||||
buf[255] = '\0';
|
||||
|
||||
p = strstr(buf, ":");
|
||||
if (!p) {
|
||||
log_info("invalid table_name \"%s\"", table_name);
|
||||
kfree(ls);
|
||||
return NULL;
|
||||
}
|
||||
*p = '\0';
|
||||
p++;
|
||||
|
||||
strncpy(ls->clustername, buf, GDLM_NAME_LEN);
|
||||
strncpy(ls->fsname, p, GDLM_NAME_LEN);
|
||||
|
||||
return ls;
|
||||
}
|
||||
|
||||
static int make_args(struct gdlm_ls *ls, char *data_arg, int *nodir)
|
||||
{
|
||||
char data[256];
|
||||
char *options, *x, *y;
|
||||
int error = 0;
|
||||
|
||||
memset(data, 0, 256);
|
||||
strncpy(data, data_arg, 255);
|
||||
|
||||
for (options = data; (x = strsep(&options, ":")); ) {
|
||||
if (!*x)
|
||||
continue;
|
||||
|
||||
y = strchr(x, '=');
|
||||
if (y)
|
||||
*y++ = 0;
|
||||
|
||||
if (!strcmp(x, "jid")) {
|
||||
if (!y) {
|
||||
log_error("need argument to jid");
|
||||
error = -EINVAL;
|
||||
break;
|
||||
}
|
||||
sscanf(y, "%u", &ls->jid);
|
||||
|
||||
} else if (!strcmp(x, "first")) {
|
||||
if (!y) {
|
||||
log_error("need argument to first");
|
||||
error = -EINVAL;
|
||||
break;
|
||||
}
|
||||
sscanf(y, "%u", &ls->first);
|
||||
|
||||
} else if (!strcmp(x, "id")) {
|
||||
if (!y) {
|
||||
log_error("need argument to id");
|
||||
error = -EINVAL;
|
||||
break;
|
||||
}
|
||||
sscanf(y, "%u", &ls->id);
|
||||
|
||||
} else if (!strcmp(x, "nodir")) {
|
||||
if (!y) {
|
||||
log_error("need argument to nodir");
|
||||
error = -EINVAL;
|
||||
break;
|
||||
}
|
||||
sscanf(y, "%u", nodir);
|
||||
|
||||
} else {
|
||||
log_error("unkonwn option: %s", x);
|
||||
error = -EINVAL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
static int gdlm_mount(char *table_name, char *host_data,
|
||||
lm_callback_t cb, void *cb_data,
|
||||
unsigned int min_lvb_size, int flags,
|
||||
struct lm_lockstruct *lockstruct,
|
||||
struct kobject *fskobj)
|
||||
{
|
||||
struct gdlm_ls *ls;
|
||||
int error = -ENOMEM, nodir = 0;
|
||||
|
||||
if (min_lvb_size > GDLM_LVB_SIZE)
|
||||
goto out;
|
||||
|
||||
ls = init_gdlm(cb, cb_data, flags, table_name);
|
||||
if (!ls)
|
||||
goto out;
|
||||
|
||||
error = make_args(ls, host_data, &nodir);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
error = gdlm_init_threads(ls);
|
||||
if (error)
|
||||
goto out_free;
|
||||
|
||||
error = gdlm_kobject_setup(ls, fskobj);
|
||||
if (error)
|
||||
goto out_thread;
|
||||
|
||||
error = dlm_new_lockspace(ls->fsname, strlen(ls->fsname),
|
||||
&ls->dlm_lockspace,
|
||||
nodir ? DLM_LSFL_NODIR : 0,
|
||||
GDLM_LVB_SIZE);
|
||||
if (error) {
|
||||
log_error("dlm_new_lockspace error %d", error);
|
||||
goto out_kobj;
|
||||
}
|
||||
|
||||
lockstruct->ls_jid = ls->jid;
|
||||
lockstruct->ls_first = ls->first;
|
||||
lockstruct->ls_lockspace = ls;
|
||||
lockstruct->ls_ops = &gdlm_ops;
|
||||
lockstruct->ls_flags = 0;
|
||||
lockstruct->ls_lvb_size = GDLM_LVB_SIZE;
|
||||
return 0;
|
||||
|
||||
out_kobj:
|
||||
gdlm_kobject_release(ls);
|
||||
out_thread:
|
||||
gdlm_release_threads(ls);
|
||||
out_free:
|
||||
kfree(ls);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
static void gdlm_unmount(void *lockspace)
|
||||
{
|
||||
struct gdlm_ls *ls = lockspace;
|
||||
int rv;
|
||||
|
||||
log_debug("unmount flags %lx", ls->flags);
|
||||
|
||||
/* FIXME: serialize unmount and withdraw in case they
|
||||
happen at once. Also, if unmount follows withdraw,
|
||||
wait for withdraw to finish. */
|
||||
|
||||
if (test_bit(DFL_WITHDRAW, &ls->flags))
|
||||
goto out;
|
||||
|
||||
gdlm_kobject_release(ls);
|
||||
dlm_release_lockspace(ls->dlm_lockspace, 2);
|
||||
gdlm_release_threads(ls);
|
||||
rv = gdlm_release_all_locks(ls);
|
||||
if (rv)
|
||||
log_info("gdlm_unmount: %d stray locks freed", rv);
|
||||
out:
|
||||
kfree(ls);
|
||||
}
|
||||
|
||||
static void gdlm_recovery_done(void *lockspace, unsigned int jid,
|
||||
unsigned int message)
|
||||
{
|
||||
struct gdlm_ls *ls = lockspace;
|
||||
ls->recover_jid_done = jid;
|
||||
ls->recover_jid_status = message;
|
||||
kobject_uevent(&ls->kobj, KOBJ_CHANGE);
|
||||
}
|
||||
|
||||
static void gdlm_others_may_mount(void *lockspace)
|
||||
{
|
||||
struct gdlm_ls *ls = lockspace;
|
||||
ls->first_done = 1;
|
||||
kobject_uevent(&ls->kobj, KOBJ_CHANGE);
|
||||
}
|
||||
|
||||
/* Userspace gets the offline uevent, blocks new gfs locks on
|
||||
other mounters, and lets us know (sets WITHDRAW flag). Then,
|
||||
userspace leaves the mount group while we leave the lockspace. */
|
||||
|
||||
static void gdlm_withdraw(void *lockspace)
|
||||
{
|
||||
struct gdlm_ls *ls = lockspace;
|
||||
|
||||
kobject_uevent(&ls->kobj, KOBJ_OFFLINE);
|
||||
|
||||
wait_event_interruptible(ls->wait_control,
|
||||
test_bit(DFL_WITHDRAW, &ls->flags));
|
||||
|
||||
dlm_release_lockspace(ls->dlm_lockspace, 2);
|
||||
gdlm_release_threads(ls);
|
||||
gdlm_release_all_locks(ls);
|
||||
gdlm_kobject_release(ls);
|
||||
}
|
||||
|
||||
const struct lm_lockops gdlm_ops = {
|
||||
.lm_proto_name = "lock_dlm",
|
||||
.lm_mount = gdlm_mount,
|
||||
.lm_others_may_mount = gdlm_others_may_mount,
|
||||
.lm_unmount = gdlm_unmount,
|
||||
.lm_withdraw = gdlm_withdraw,
|
||||
.lm_get_lock = gdlm_get_lock,
|
||||
.lm_put_lock = gdlm_put_lock,
|
||||
.lm_lock = gdlm_lock,
|
||||
.lm_unlock = gdlm_unlock,
|
||||
.lm_plock = gdlm_plock,
|
||||
.lm_punlock = gdlm_punlock,
|
||||
.lm_plock_get = gdlm_plock_get,
|
||||
.lm_cancel = gdlm_cancel,
|
||||
.lm_hold_lvb = gdlm_hold_lvb,
|
||||
.lm_unhold_lvb = gdlm_unhold_lvb,
|
||||
.lm_recovery_done = gdlm_recovery_done,
|
||||
.lm_owner = THIS_MODULE,
|
||||
};
|
||||
|
301
fs/gfs2/locking/dlm/plock.c
Normal file
301
fs/gfs2/locking/dlm/plock.c
Normal file
|
@ -0,0 +1,301 @@
|
|||
/*
|
||||
* Copyright (C) 2005 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/lock_dlm_plock.h>
|
||||
|
||||
#include "lock_dlm.h"
|
||||
|
||||
|
||||
static spinlock_t ops_lock;
|
||||
static struct list_head send_list;
|
||||
static struct list_head recv_list;
|
||||
static wait_queue_head_t send_wq;
|
||||
static wait_queue_head_t recv_wq;
|
||||
|
||||
struct plock_op {
|
||||
struct list_head list;
|
||||
int done;
|
||||
struct gdlm_plock_info info;
|
||||
};
|
||||
|
||||
static inline void set_version(struct gdlm_plock_info *info)
|
||||
{
|
||||
info->version[0] = GDLM_PLOCK_VERSION_MAJOR;
|
||||
info->version[1] = GDLM_PLOCK_VERSION_MINOR;
|
||||
info->version[2] = GDLM_PLOCK_VERSION_PATCH;
|
||||
}
|
||||
|
||||
static int check_version(struct gdlm_plock_info *info)
|
||||
{
|
||||
if ((GDLM_PLOCK_VERSION_MAJOR != info->version[0]) ||
|
||||
(GDLM_PLOCK_VERSION_MINOR < info->version[1])) {
|
||||
log_error("plock device version mismatch: "
|
||||
"kernel (%u.%u.%u), user (%u.%u.%u)",
|
||||
GDLM_PLOCK_VERSION_MAJOR,
|
||||
GDLM_PLOCK_VERSION_MINOR,
|
||||
GDLM_PLOCK_VERSION_PATCH,
|
||||
info->version[0],
|
||||
info->version[1],
|
||||
info->version[2]);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void send_op(struct plock_op *op)
|
||||
{
|
||||
set_version(&op->info);
|
||||
INIT_LIST_HEAD(&op->list);
|
||||
spin_lock(&ops_lock);
|
||||
list_add_tail(&op->list, &send_list);
|
||||
spin_unlock(&ops_lock);
|
||||
wake_up(&send_wq);
|
||||
}
|
||||
|
||||
int gdlm_plock(void *lockspace, struct lm_lockname *name,
|
||||
struct file *file, int cmd, struct file_lock *fl)
|
||||
{
|
||||
struct gdlm_ls *ls = lockspace;
|
||||
struct plock_op *op;
|
||||
int rv;
|
||||
|
||||
op = kzalloc(sizeof(*op), GFP_KERNEL);
|
||||
if (!op)
|
||||
return -ENOMEM;
|
||||
|
||||
op->info.optype = GDLM_PLOCK_OP_LOCK;
|
||||
op->info.pid = fl->fl_pid;
|
||||
op->info.ex = (fl->fl_type == F_WRLCK);
|
||||
op->info.wait = IS_SETLKW(cmd);
|
||||
op->info.fsid = ls->id;
|
||||
op->info.number = name->ln_number;
|
||||
op->info.start = fl->fl_start;
|
||||
op->info.end = fl->fl_end;
|
||||
op->info.owner = (__u64)(long) fl->fl_owner;
|
||||
|
||||
send_op(op);
|
||||
wait_event(recv_wq, (op->done != 0));
|
||||
|
||||
spin_lock(&ops_lock);
|
||||
if (!list_empty(&op->list)) {
|
||||
printk(KERN_INFO "plock op on list\n");
|
||||
list_del(&op->list);
|
||||
}
|
||||
spin_unlock(&ops_lock);
|
||||
|
||||
rv = op->info.rv;
|
||||
|
||||
if (!rv) {
|
||||
if (posix_lock_file_wait(file, fl) < 0)
|
||||
log_error("gdlm_plock: vfs lock error %x,%llx",
|
||||
name->ln_type,
|
||||
(unsigned long long)name->ln_number);
|
||||
}
|
||||
|
||||
kfree(op);
|
||||
return rv;
|
||||
}
|
||||
|
||||
int gdlm_punlock(void *lockspace, struct lm_lockname *name,
|
||||
struct file *file, struct file_lock *fl)
|
||||
{
|
||||
struct gdlm_ls *ls = lockspace;
|
||||
struct plock_op *op;
|
||||
int rv;
|
||||
|
||||
op = kzalloc(sizeof(*op), GFP_KERNEL);
|
||||
if (!op)
|
||||
return -ENOMEM;
|
||||
|
||||
if (posix_lock_file_wait(file, fl) < 0)
|
||||
log_error("gdlm_punlock: vfs unlock error %x,%llx",
|
||||
name->ln_type, (unsigned long long)name->ln_number);
|
||||
|
||||
op->info.optype = GDLM_PLOCK_OP_UNLOCK;
|
||||
op->info.pid = fl->fl_pid;
|
||||
op->info.fsid = ls->id;
|
||||
op->info.number = name->ln_number;
|
||||
op->info.start = fl->fl_start;
|
||||
op->info.end = fl->fl_end;
|
||||
op->info.owner = (__u64)(long) fl->fl_owner;
|
||||
|
||||
send_op(op);
|
||||
wait_event(recv_wq, (op->done != 0));
|
||||
|
||||
spin_lock(&ops_lock);
|
||||
if (!list_empty(&op->list)) {
|
||||
printk(KERN_INFO "punlock op on list\n");
|
||||
list_del(&op->list);
|
||||
}
|
||||
spin_unlock(&ops_lock);
|
||||
|
||||
rv = op->info.rv;
|
||||
|
||||
kfree(op);
|
||||
return rv;
|
||||
}
|
||||
|
||||
int gdlm_plock_get(void *lockspace, struct lm_lockname *name,
|
||||
struct file *file, struct file_lock *fl)
|
||||
{
|
||||
struct gdlm_ls *ls = lockspace;
|
||||
struct plock_op *op;
|
||||
int rv;
|
||||
|
||||
op = kzalloc(sizeof(*op), GFP_KERNEL);
|
||||
if (!op)
|
||||
return -ENOMEM;
|
||||
|
||||
op->info.optype = GDLM_PLOCK_OP_GET;
|
||||
op->info.pid = fl->fl_pid;
|
||||
op->info.ex = (fl->fl_type == F_WRLCK);
|
||||
op->info.fsid = ls->id;
|
||||
op->info.number = name->ln_number;
|
||||
op->info.start = fl->fl_start;
|
||||
op->info.end = fl->fl_end;
|
||||
|
||||
send_op(op);
|
||||
wait_event(recv_wq, (op->done != 0));
|
||||
|
||||
spin_lock(&ops_lock);
|
||||
if (!list_empty(&op->list)) {
|
||||
printk(KERN_INFO "plock_get op on list\n");
|
||||
list_del(&op->list);
|
||||
}
|
||||
spin_unlock(&ops_lock);
|
||||
|
||||
rv = op->info.rv;
|
||||
|
||||
if (rv == 0)
|
||||
fl->fl_type = F_UNLCK;
|
||||
else if (rv > 0) {
|
||||
fl->fl_type = (op->info.ex) ? F_WRLCK : F_RDLCK;
|
||||
fl->fl_pid = op->info.pid;
|
||||
fl->fl_start = op->info.start;
|
||||
fl->fl_end = op->info.end;
|
||||
}
|
||||
|
||||
kfree(op);
|
||||
return rv;
|
||||
}
|
||||
|
||||
/* a read copies out one plock request from the send list */
|
||||
static ssize_t dev_read(struct file *file, char __user *u, size_t count,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct gdlm_plock_info info;
|
||||
struct plock_op *op = NULL;
|
||||
|
||||
if (count < sizeof(info))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&ops_lock);
|
||||
if (!list_empty(&send_list)) {
|
||||
op = list_entry(send_list.next, struct plock_op, list);
|
||||
list_move(&op->list, &recv_list);
|
||||
memcpy(&info, &op->info, sizeof(info));
|
||||
}
|
||||
spin_unlock(&ops_lock);
|
||||
|
||||
if (!op)
|
||||
return -EAGAIN;
|
||||
|
||||
if (copy_to_user(u, &info, sizeof(info)))
|
||||
return -EFAULT;
|
||||
return sizeof(info);
|
||||
}
|
||||
|
||||
/* a write copies in one plock result that should match a plock_op
|
||||
on the recv list */
|
||||
static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct gdlm_plock_info info;
|
||||
struct plock_op *op;
|
||||
int found = 0;
|
||||
|
||||
if (count != sizeof(info))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&info, u, sizeof(info)))
|
||||
return -EFAULT;
|
||||
|
||||
if (check_version(&info))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&ops_lock);
|
||||
list_for_each_entry(op, &recv_list, list) {
|
||||
if (op->info.fsid == info.fsid && op->info.number == info.number &&
|
||||
op->info.owner == info.owner) {
|
||||
list_del_init(&op->list);
|
||||
found = 1;
|
||||
op->done = 1;
|
||||
memcpy(&op->info, &info, sizeof(info));
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&ops_lock);
|
||||
|
||||
if (found)
|
||||
wake_up(&recv_wq);
|
||||
else
|
||||
printk(KERN_INFO "gdlm dev_write no op %x %llx\n", info.fsid,
|
||||
(unsigned long long)info.number);
|
||||
return count;
|
||||
}
|
||||
|
||||
static unsigned int dev_poll(struct file *file, poll_table *wait)
|
||||
{
|
||||
poll_wait(file, &send_wq, wait);
|
||||
|
||||
spin_lock(&ops_lock);
|
||||
if (!list_empty(&send_list)) {
|
||||
spin_unlock(&ops_lock);
|
||||
return POLLIN | POLLRDNORM;
|
||||
}
|
||||
spin_unlock(&ops_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct file_operations dev_fops = {
|
||||
.read = dev_read,
|
||||
.write = dev_write,
|
||||
.poll = dev_poll,
|
||||
.owner = THIS_MODULE
|
||||
};
|
||||
|
||||
static struct miscdevice plock_dev_misc = {
|
||||
.minor = MISC_DYNAMIC_MINOR,
|
||||
.name = GDLM_PLOCK_MISC_NAME,
|
||||
.fops = &dev_fops
|
||||
};
|
||||
|
||||
int gdlm_plock_init(void)
|
||||
{
|
||||
int rv;
|
||||
|
||||
spin_lock_init(&ops_lock);
|
||||
INIT_LIST_HEAD(&send_list);
|
||||
INIT_LIST_HEAD(&recv_list);
|
||||
init_waitqueue_head(&send_wq);
|
||||
init_waitqueue_head(&recv_wq);
|
||||
|
||||
rv = misc_register(&plock_dev_misc);
|
||||
if (rv)
|
||||
printk(KERN_INFO "gdlm_plock_init: misc_register failed %d",
|
||||
rv);
|
||||
return rv;
|
||||
}
|
||||
|
||||
void gdlm_plock_exit(void)
|
||||
{
|
||||
if (misc_deregister(&plock_dev_misc) < 0)
|
||||
printk(KERN_INFO "gdlm_plock_exit: misc_deregister failed");
|
||||
}
|
||||
|
226
fs/gfs2/locking/dlm/sysfs.c
Normal file
226
fs/gfs2/locking/dlm/sysfs.c
Normal file
|
@ -0,0 +1,226 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/stat.h>
|
||||
|
||||
#include "lock_dlm.h"
|
||||
|
||||
extern struct lm_lockops gdlm_ops;
|
||||
|
||||
static ssize_t proto_name_show(struct gdlm_ls *ls, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%s\n", gdlm_ops.lm_proto_name);
|
||||
}
|
||||
|
||||
static ssize_t block_show(struct gdlm_ls *ls, char *buf)
|
||||
{
|
||||
ssize_t ret;
|
||||
int val = 0;
|
||||
|
||||
if (test_bit(DFL_BLOCK_LOCKS, &ls->flags))
|
||||
val = 1;
|
||||
ret = sprintf(buf, "%d\n", val);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t block_store(struct gdlm_ls *ls, const char *buf, size_t len)
|
||||
{
|
||||
ssize_t ret = len;
|
||||
int val;
|
||||
|
||||
val = simple_strtol(buf, NULL, 0);
|
||||
|
||||
if (val == 1)
|
||||
set_bit(DFL_BLOCK_LOCKS, &ls->flags);
|
||||
else if (val == 0) {
|
||||
clear_bit(DFL_BLOCK_LOCKS, &ls->flags);
|
||||
gdlm_submit_delayed(ls);
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t withdraw_show(struct gdlm_ls *ls, char *buf)
|
||||
{
|
||||
ssize_t ret;
|
||||
int val = 0;
|
||||
|
||||
if (test_bit(DFL_WITHDRAW, &ls->flags))
|
||||
val = 1;
|
||||
ret = sprintf(buf, "%d\n", val);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t withdraw_store(struct gdlm_ls *ls, const char *buf, size_t len)
|
||||
{
|
||||
ssize_t ret = len;
|
||||
int val;
|
||||
|
||||
val = simple_strtol(buf, NULL, 0);
|
||||
|
||||
if (val == 1)
|
||||
set_bit(DFL_WITHDRAW, &ls->flags);
|
||||
else
|
||||
ret = -EINVAL;
|
||||
wake_up(&ls->wait_control);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t id_show(struct gdlm_ls *ls, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%u\n", ls->id);
|
||||
}
|
||||
|
||||
static ssize_t jid_show(struct gdlm_ls *ls, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", ls->jid);
|
||||
}
|
||||
|
||||
static ssize_t first_show(struct gdlm_ls *ls, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", ls->first);
|
||||
}
|
||||
|
||||
static ssize_t first_done_show(struct gdlm_ls *ls, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", ls->first_done);
|
||||
}
|
||||
|
||||
static ssize_t recover_show(struct gdlm_ls *ls, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", ls->recover_jid);
|
||||
}
|
||||
|
||||
static ssize_t recover_store(struct gdlm_ls *ls, const char *buf, size_t len)
|
||||
{
|
||||
ls->recover_jid = simple_strtol(buf, NULL, 0);
|
||||
ls->fscb(ls->sdp, LM_CB_NEED_RECOVERY, &ls->recover_jid);
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t recover_done_show(struct gdlm_ls *ls, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", ls->recover_jid_done);
|
||||
}
|
||||
|
||||
static ssize_t recover_status_show(struct gdlm_ls *ls, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", ls->recover_jid_status);
|
||||
}
|
||||
|
||||
struct gdlm_attr {
|
||||
struct attribute attr;
|
||||
ssize_t (*show)(struct gdlm_ls *, char *);
|
||||
ssize_t (*store)(struct gdlm_ls *, const char *, size_t);
|
||||
};
|
||||
|
||||
#define GDLM_ATTR(_name,_mode,_show,_store) \
|
||||
static struct gdlm_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store)
|
||||
|
||||
GDLM_ATTR(proto_name, 0444, proto_name_show, NULL);
|
||||
GDLM_ATTR(block, 0644, block_show, block_store);
|
||||
GDLM_ATTR(withdraw, 0644, withdraw_show, withdraw_store);
|
||||
GDLM_ATTR(id, 0444, id_show, NULL);
|
||||
GDLM_ATTR(jid, 0444, jid_show, NULL);
|
||||
GDLM_ATTR(first, 0444, first_show, NULL);
|
||||
GDLM_ATTR(first_done, 0444, first_done_show, NULL);
|
||||
GDLM_ATTR(recover, 0644, recover_show, recover_store);
|
||||
GDLM_ATTR(recover_done, 0444, recover_done_show, NULL);
|
||||
GDLM_ATTR(recover_status, 0444, recover_status_show, NULL);
|
||||
|
||||
static struct attribute *gdlm_attrs[] = {
|
||||
&gdlm_attr_proto_name.attr,
|
||||
&gdlm_attr_block.attr,
|
||||
&gdlm_attr_withdraw.attr,
|
||||
&gdlm_attr_id.attr,
|
||||
&gdlm_attr_jid.attr,
|
||||
&gdlm_attr_first.attr,
|
||||
&gdlm_attr_first_done.attr,
|
||||
&gdlm_attr_recover.attr,
|
||||
&gdlm_attr_recover_done.attr,
|
||||
&gdlm_attr_recover_status.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static ssize_t gdlm_attr_show(struct kobject *kobj, struct attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct gdlm_ls *ls = container_of(kobj, struct gdlm_ls, kobj);
|
||||
struct gdlm_attr *a = container_of(attr, struct gdlm_attr, attr);
|
||||
return a->show ? a->show(ls, buf) : 0;
|
||||
}
|
||||
|
||||
static ssize_t gdlm_attr_store(struct kobject *kobj, struct attribute *attr,
|
||||
const char *buf, size_t len)
|
||||
{
|
||||
struct gdlm_ls *ls = container_of(kobj, struct gdlm_ls, kobj);
|
||||
struct gdlm_attr *a = container_of(attr, struct gdlm_attr, attr);
|
||||
return a->store ? a->store(ls, buf, len) : len;
|
||||
}
|
||||
|
||||
static struct sysfs_ops gdlm_attr_ops = {
|
||||
.show = gdlm_attr_show,
|
||||
.store = gdlm_attr_store,
|
||||
};
|
||||
|
||||
static struct kobj_type gdlm_ktype = {
|
||||
.default_attrs = gdlm_attrs,
|
||||
.sysfs_ops = &gdlm_attr_ops,
|
||||
};
|
||||
|
||||
static struct kset gdlm_kset = {
|
||||
.subsys = &kernel_subsys,
|
||||
.kobj = {.name = "lock_dlm",},
|
||||
.ktype = &gdlm_ktype,
|
||||
};
|
||||
|
||||
int gdlm_kobject_setup(struct gdlm_ls *ls, struct kobject *fskobj)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = kobject_set_name(&ls->kobj, "%s", "lock_module");
|
||||
if (error) {
|
||||
log_error("can't set kobj name %d", error);
|
||||
return error;
|
||||
}
|
||||
|
||||
ls->kobj.kset = &gdlm_kset;
|
||||
ls->kobj.ktype = &gdlm_ktype;
|
||||
ls->kobj.parent = fskobj;
|
||||
|
||||
error = kobject_register(&ls->kobj);
|
||||
if (error)
|
||||
log_error("can't register kobj %d", error);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
void gdlm_kobject_release(struct gdlm_ls *ls)
|
||||
{
|
||||
kobject_unregister(&ls->kobj);
|
||||
}
|
||||
|
||||
int gdlm_sysfs_init(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = kset_register(&gdlm_kset);
|
||||
if (error)
|
||||
printk("lock_dlm: cannot register kset %d\n", error);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
void gdlm_sysfs_exit(void)
|
||||
{
|
||||
kset_unregister(&gdlm_kset);
|
||||
}
|
||||
|
359
fs/gfs2/locking/dlm/thread.c
Normal file
359
fs/gfs2/locking/dlm/thread.c
Normal file
|
@ -0,0 +1,359 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include "lock_dlm.h"
|
||||
|
||||
/* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm
|
||||
thread gets to it. */
|
||||
|
||||
static void queue_submit(struct gdlm_lock *lp)
|
||||
{
|
||||
struct gdlm_ls *ls = lp->ls;
|
||||
|
||||
spin_lock(&ls->async_lock);
|
||||
list_add_tail(&lp->delay_list, &ls->submit);
|
||||
spin_unlock(&ls->async_lock);
|
||||
wake_up(&ls->thread_wait);
|
||||
}
|
||||
|
||||
static void process_blocking(struct gdlm_lock *lp, int bast_mode)
|
||||
{
|
||||
struct gdlm_ls *ls = lp->ls;
|
||||
unsigned int cb = 0;
|
||||
|
||||
switch (gdlm_make_lmstate(bast_mode)) {
|
||||
case LM_ST_EXCLUSIVE:
|
||||
cb = LM_CB_NEED_E;
|
||||
break;
|
||||
case LM_ST_DEFERRED:
|
||||
cb = LM_CB_NEED_D;
|
||||
break;
|
||||
case LM_ST_SHARED:
|
||||
cb = LM_CB_NEED_S;
|
||||
break;
|
||||
default:
|
||||
gdlm_assert(0, "unknown bast mode %u", lp->bast_mode);
|
||||
}
|
||||
|
||||
ls->fscb(ls->sdp, cb, &lp->lockname);
|
||||
}
|
||||
|
||||
static void process_complete(struct gdlm_lock *lp)
|
||||
{
|
||||
struct gdlm_ls *ls = lp->ls;
|
||||
struct lm_async_cb acb;
|
||||
s16 prev_mode = lp->cur;
|
||||
|
||||
memset(&acb, 0, sizeof(acb));
|
||||
|
||||
if (lp->lksb.sb_status == -DLM_ECANCEL) {
|
||||
log_info("complete dlm cancel %x,%llx flags %lx",
|
||||
lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number,
|
||||
lp->flags);
|
||||
|
||||
lp->req = lp->cur;
|
||||
acb.lc_ret |= LM_OUT_CANCELED;
|
||||
if (lp->cur == DLM_LOCK_IV)
|
||||
lp->lksb.sb_lkid = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) {
|
||||
if (lp->lksb.sb_status != -DLM_EUNLOCK) {
|
||||
log_info("unlock sb_status %d %x,%llx flags %lx",
|
||||
lp->lksb.sb_status, lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number,
|
||||
lp->flags);
|
||||
return;
|
||||
}
|
||||
|
||||
lp->cur = DLM_LOCK_IV;
|
||||
lp->req = DLM_LOCK_IV;
|
||||
lp->lksb.sb_lkid = 0;
|
||||
|
||||
if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) {
|
||||
gdlm_delete_lp(lp);
|
||||
return;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID)
|
||||
memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
|
||||
|
||||
if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) {
|
||||
if (lp->req == DLM_LOCK_PR)
|
||||
lp->req = DLM_LOCK_CW;
|
||||
else if (lp->req == DLM_LOCK_CW)
|
||||
lp->req = DLM_LOCK_PR;
|
||||
}
|
||||
|
||||
/*
|
||||
* A canceled lock request. The lock was just taken off the delayed
|
||||
* list and was never even submitted to dlm.
|
||||
*/
|
||||
|
||||
if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) {
|
||||
log_info("complete internal cancel %x,%llx",
|
||||
lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number);
|
||||
lp->req = lp->cur;
|
||||
acb.lc_ret |= LM_OUT_CANCELED;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* An error occured.
|
||||
*/
|
||||
|
||||
if (lp->lksb.sb_status) {
|
||||
/* a "normal" error */
|
||||
if ((lp->lksb.sb_status == -EAGAIN) &&
|
||||
(lp->lkf & DLM_LKF_NOQUEUE)) {
|
||||
lp->req = lp->cur;
|
||||
if (lp->cur == DLM_LOCK_IV)
|
||||
lp->lksb.sb_lkid = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* this could only happen with cancels I think */
|
||||
log_info("ast sb_status %d %x,%llx flags %lx",
|
||||
lp->lksb.sb_status, lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number,
|
||||
lp->flags);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is an AST for an EX->EX conversion for sync_lvb from GFS.
|
||||
*/
|
||||
|
||||
if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) {
|
||||
complete(&lp->ast_wait);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* A lock has been demoted to NL because it initially completed during
|
||||
* BLOCK_LOCKS. Now it must be requested in the originally requested
|
||||
* mode.
|
||||
*/
|
||||
|
||||
if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) {
|
||||
gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx",
|
||||
lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number);
|
||||
gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx",
|
||||
lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number);
|
||||
|
||||
lp->cur = DLM_LOCK_NL;
|
||||
lp->req = lp->prev_req;
|
||||
lp->prev_req = DLM_LOCK_IV;
|
||||
lp->lkf &= ~DLM_LKF_CONVDEADLK;
|
||||
|
||||
set_bit(LFL_NOCACHE, &lp->flags);
|
||||
|
||||
if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
|
||||
!test_bit(LFL_NOBLOCK, &lp->flags))
|
||||
gdlm_queue_delayed(lp);
|
||||
else
|
||||
queue_submit(lp);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* A request is granted during dlm recovery. It may be granted
|
||||
* because the locks of a failed node were cleared. In that case,
|
||||
* there may be inconsistent data beneath this lock and we must wait
|
||||
* for recovery to complete to use it. When gfs recovery is done this
|
||||
* granted lock will be converted to NL and then reacquired in this
|
||||
* granted state.
|
||||
*/
|
||||
|
||||
if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
|
||||
!test_bit(LFL_NOBLOCK, &lp->flags) &&
|
||||
lp->req != DLM_LOCK_NL) {
|
||||
|
||||
lp->cur = lp->req;
|
||||
lp->prev_req = lp->req;
|
||||
lp->req = DLM_LOCK_NL;
|
||||
lp->lkf |= DLM_LKF_CONVERT;
|
||||
lp->lkf &= ~DLM_LKF_CONVDEADLK;
|
||||
|
||||
log_debug("rereq %x,%llx id %x %d,%d",
|
||||
lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number,
|
||||
lp->lksb.sb_lkid, lp->cur, lp->req);
|
||||
|
||||
set_bit(LFL_REREQUEST, &lp->flags);
|
||||
queue_submit(lp);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* DLM demoted the lock to NL before it was granted so GFS must be
|
||||
* told it cannot cache data for this lock.
|
||||
*/
|
||||
|
||||
if (lp->lksb.sb_flags & DLM_SBF_DEMOTED)
|
||||
set_bit(LFL_NOCACHE, &lp->flags);
|
||||
|
||||
out:
|
||||
/*
|
||||
* This is an internal lock_dlm lock
|
||||
*/
|
||||
|
||||
if (test_bit(LFL_INLOCK, &lp->flags)) {
|
||||
clear_bit(LFL_NOBLOCK, &lp->flags);
|
||||
lp->cur = lp->req;
|
||||
complete(&lp->ast_wait);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Normal completion of a lock request. Tell GFS it now has the lock.
|
||||
*/
|
||||
|
||||
clear_bit(LFL_NOBLOCK, &lp->flags);
|
||||
lp->cur = lp->req;
|
||||
|
||||
acb.lc_name = lp->lockname;
|
||||
acb.lc_ret |= gdlm_make_lmstate(lp->cur);
|
||||
|
||||
if (!test_and_clear_bit(LFL_NOCACHE, &lp->flags) &&
|
||||
(lp->cur > DLM_LOCK_NL) && (prev_mode > DLM_LOCK_NL))
|
||||
acb.lc_ret |= LM_OUT_CACHEABLE;
|
||||
|
||||
ls->fscb(ls->sdp, LM_CB_ASYNC, &acb);
|
||||
}
|
||||
|
||||
static inline int no_work(struct gdlm_ls *ls, int blocking)
|
||||
{
|
||||
int ret;
|
||||
|
||||
spin_lock(&ls->async_lock);
|
||||
ret = list_empty(&ls->complete) && list_empty(&ls->submit);
|
||||
if (ret && blocking)
|
||||
ret = list_empty(&ls->blocking);
|
||||
spin_unlock(&ls->async_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int check_drop(struct gdlm_ls *ls)
|
||||
{
|
||||
if (!ls->drop_locks_count)
|
||||
return 0;
|
||||
|
||||
if (time_after(jiffies, ls->drop_time + ls->drop_locks_period * HZ)) {
|
||||
ls->drop_time = jiffies;
|
||||
if (ls->all_locks_count >= ls->drop_locks_count)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gdlm_thread(void *data)
|
||||
{
|
||||
struct gdlm_ls *ls = (struct gdlm_ls *) data;
|
||||
struct gdlm_lock *lp = NULL;
|
||||
int blist = 0;
|
||||
uint8_t complete, blocking, submit, drop;
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
|
||||
/* Only thread1 is allowed to do blocking callbacks since gfs
|
||||
may wait for a completion callback within a blocking cb. */
|
||||
|
||||
if (current == ls->thread1)
|
||||
blist = 1;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
add_wait_queue(&ls->thread_wait, &wait);
|
||||
if (no_work(ls, blist))
|
||||
schedule();
|
||||
remove_wait_queue(&ls->thread_wait, &wait);
|
||||
set_current_state(TASK_RUNNING);
|
||||
|
||||
complete = blocking = submit = drop = 0;
|
||||
|
||||
spin_lock(&ls->async_lock);
|
||||
|
||||
if (blist && !list_empty(&ls->blocking)) {
|
||||
lp = list_entry(ls->blocking.next, struct gdlm_lock,
|
||||
blist);
|
||||
list_del_init(&lp->blist);
|
||||
blocking = lp->bast_mode;
|
||||
lp->bast_mode = 0;
|
||||
} else if (!list_empty(&ls->complete)) {
|
||||
lp = list_entry(ls->complete.next, struct gdlm_lock,
|
||||
clist);
|
||||
list_del_init(&lp->clist);
|
||||
complete = 1;
|
||||
} else if (!list_empty(&ls->submit)) {
|
||||
lp = list_entry(ls->submit.next, struct gdlm_lock,
|
||||
delay_list);
|
||||
list_del_init(&lp->delay_list);
|
||||
submit = 1;
|
||||
}
|
||||
|
||||
drop = check_drop(ls);
|
||||
spin_unlock(&ls->async_lock);
|
||||
|
||||
if (complete)
|
||||
process_complete(lp);
|
||||
|
||||
else if (blocking)
|
||||
process_blocking(lp, blocking);
|
||||
|
||||
else if (submit)
|
||||
gdlm_do_lock(lp);
|
||||
|
||||
if (drop)
|
||||
ls->fscb(ls->sdp, LM_CB_DROPLOCKS, NULL);
|
||||
|
||||
schedule();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gdlm_init_threads(struct gdlm_ls *ls)
|
||||
{
|
||||
struct task_struct *p;
|
||||
int error;
|
||||
|
||||
p = kthread_run(gdlm_thread, ls, "lock_dlm1");
|
||||
error = IS_ERR(p);
|
||||
if (error) {
|
||||
log_error("can't start lock_dlm1 thread %d", error);
|
||||
return error;
|
||||
}
|
||||
ls->thread1 = p;
|
||||
|
||||
p = kthread_run(gdlm_thread, ls, "lock_dlm2");
|
||||
error = IS_ERR(p);
|
||||
if (error) {
|
||||
log_error("can't start lock_dlm2 thread %d", error);
|
||||
kthread_stop(ls->thread1);
|
||||
return error;
|
||||
}
|
||||
ls->thread2 = p;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void gdlm_release_threads(struct gdlm_ls *ls)
|
||||
{
|
||||
kthread_stop(ls->thread1);
|
||||
kthread_stop(ls->thread2);
|
||||
}
|
||||
|
3
fs/gfs2/locking/nolock/Makefile
Normal file
3
fs/gfs2/locking/nolock/Makefile
Normal file
|
@ -0,0 +1,3 @@
|
|||
obj-$(CONFIG_GFS2_FS_LOCKING_NOLOCK) += lock_nolock.o
|
||||
lock_nolock-y := main.o
|
||||
|
246
fs/gfs2/locking/nolock/main.c
Normal file
246
fs/gfs2/locking/nolock/main.c
Normal file
|
@ -0,0 +1,246 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/lm_interface.h>
|
||||
|
||||
struct nolock_lockspace {
|
||||
unsigned int nl_lvb_size;
|
||||
};
|
||||
|
||||
static const struct lm_lockops nolock_ops;
|
||||
|
||||
static int nolock_mount(char *table_name, char *host_data,
|
||||
lm_callback_t cb, void *cb_data,
|
||||
unsigned int min_lvb_size, int flags,
|
||||
struct lm_lockstruct *lockstruct,
|
||||
struct kobject *fskobj)
|
||||
{
|
||||
char *c;
|
||||
unsigned int jid;
|
||||
struct nolock_lockspace *nl;
|
||||
|
||||
c = strstr(host_data, "jid=");
|
||||
if (!c)
|
||||
jid = 0;
|
||||
else {
|
||||
c += 4;
|
||||
sscanf(c, "%u", &jid);
|
||||
}
|
||||
|
||||
nl = kzalloc(sizeof(struct nolock_lockspace), GFP_KERNEL);
|
||||
if (!nl)
|
||||
return -ENOMEM;
|
||||
|
||||
nl->nl_lvb_size = min_lvb_size;
|
||||
|
||||
lockstruct->ls_jid = jid;
|
||||
lockstruct->ls_first = 1;
|
||||
lockstruct->ls_lvb_size = min_lvb_size;
|
||||
lockstruct->ls_lockspace = nl;
|
||||
lockstruct->ls_ops = &nolock_ops;
|
||||
lockstruct->ls_flags = LM_LSFLAG_LOCAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nolock_others_may_mount(void *lockspace)
|
||||
{
|
||||
}
|
||||
|
||||
static void nolock_unmount(void *lockspace)
|
||||
{
|
||||
struct nolock_lockspace *nl = lockspace;
|
||||
kfree(nl);
|
||||
}
|
||||
|
||||
static void nolock_withdraw(void *lockspace)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* nolock_get_lock - get a lm_lock_t given a descripton of the lock
|
||||
* @lockspace: the lockspace the lock lives in
|
||||
* @name: the name of the lock
|
||||
* @lockp: return the lm_lock_t here
|
||||
*
|
||||
* Returns: 0 on success, -EXXX on failure
|
||||
*/
|
||||
|
||||
static int nolock_get_lock(void *lockspace, struct lm_lockname *name,
|
||||
void **lockp)
|
||||
{
|
||||
*lockp = lockspace;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* nolock_put_lock - get rid of a lock structure
|
||||
* @lock: the lock to throw away
|
||||
*
|
||||
*/
|
||||
|
||||
static void nolock_put_lock(void *lock)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* nolock_lock - acquire a lock
|
||||
* @lock: the lock to manipulate
|
||||
* @cur_state: the current state
|
||||
* @req_state: the requested state
|
||||
* @flags: modifier flags
|
||||
*
|
||||
* Returns: A bitmap of LM_OUT_*
|
||||
*/
|
||||
|
||||
static unsigned int nolock_lock(void *lock, unsigned int cur_state,
|
||||
unsigned int req_state, unsigned int flags)
|
||||
{
|
||||
return req_state | LM_OUT_CACHEABLE;
|
||||
}
|
||||
|
||||
/**
|
||||
* nolock_unlock - unlock a lock
|
||||
* @lock: the lock to manipulate
|
||||
* @cur_state: the current state
|
||||
*
|
||||
* Returns: 0
|
||||
*/
|
||||
|
||||
static unsigned int nolock_unlock(void *lock, unsigned int cur_state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nolock_cancel(void *lock)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* nolock_hold_lvb - hold on to a lock value block
|
||||
* @lock: the lock the LVB is associated with
|
||||
* @lvbp: return the lm_lvb_t here
|
||||
*
|
||||
* Returns: 0 on success, -EXXX on failure
|
||||
*/
|
||||
|
||||
static int nolock_hold_lvb(void *lock, char **lvbp)
|
||||
{
|
||||
struct nolock_lockspace *nl = lock;
|
||||
int error = 0;
|
||||
|
||||
*lvbp = kzalloc(nl->nl_lvb_size, GFP_KERNEL);
|
||||
if (!*lvbp)
|
||||
error = -ENOMEM;
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* nolock_unhold_lvb - release a LVB
|
||||
* @lock: the lock the LVB is associated with
|
||||
* @lvb: the lock value block
|
||||
*
|
||||
*/
|
||||
|
||||
static void nolock_unhold_lvb(void *lock, char *lvb)
|
||||
{
|
||||
kfree(lvb);
|
||||
}
|
||||
|
||||
static int nolock_plock_get(void *lockspace, struct lm_lockname *name,
|
||||
struct file *file, struct file_lock *fl)
|
||||
{
|
||||
struct file_lock tmp;
|
||||
int ret;
|
||||
|
||||
ret = posix_test_lock(file, fl, &tmp);
|
||||
fl->fl_type = F_UNLCK;
|
||||
if (ret)
|
||||
memcpy(fl, &tmp, sizeof(struct file_lock));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nolock_plock(void *lockspace, struct lm_lockname *name,
|
||||
struct file *file, int cmd, struct file_lock *fl)
|
||||
{
|
||||
int error;
|
||||
error = posix_lock_file_wait(file, fl);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int nolock_punlock(void *lockspace, struct lm_lockname *name,
|
||||
struct file *file, struct file_lock *fl)
|
||||
{
|
||||
int error;
|
||||
error = posix_lock_file_wait(file, fl);
|
||||
return error;
|
||||
}
|
||||
|
||||
static void nolock_recovery_done(void *lockspace, unsigned int jid,
|
||||
unsigned int message)
|
||||
{
|
||||
}
|
||||
|
||||
static const struct lm_lockops nolock_ops = {
|
||||
.lm_proto_name = "lock_nolock",
|
||||
.lm_mount = nolock_mount,
|
||||
.lm_others_may_mount = nolock_others_may_mount,
|
||||
.lm_unmount = nolock_unmount,
|
||||
.lm_withdraw = nolock_withdraw,
|
||||
.lm_get_lock = nolock_get_lock,
|
||||
.lm_put_lock = nolock_put_lock,
|
||||
.lm_lock = nolock_lock,
|
||||
.lm_unlock = nolock_unlock,
|
||||
.lm_cancel = nolock_cancel,
|
||||
.lm_hold_lvb = nolock_hold_lvb,
|
||||
.lm_unhold_lvb = nolock_unhold_lvb,
|
||||
.lm_plock_get = nolock_plock_get,
|
||||
.lm_plock = nolock_plock,
|
||||
.lm_punlock = nolock_punlock,
|
||||
.lm_recovery_done = nolock_recovery_done,
|
||||
.lm_owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init init_nolock(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = gfs2_register_lockproto(&nolock_ops);
|
||||
if (error) {
|
||||
printk(KERN_WARNING
|
||||
"lock_nolock: can't register protocol: %d\n", error);
|
||||
return error;
|
||||
}
|
||||
|
||||
printk(KERN_INFO
|
||||
"Lock_Nolock (built %s %s) installed\n", __DATE__, __TIME__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit exit_nolock(void)
|
||||
{
|
||||
gfs2_unregister_lockproto(&nolock_ops);
|
||||
}
|
||||
|
||||
module_init(init_nolock);
|
||||
module_exit(exit_nolock);
|
||||
|
||||
MODULE_DESCRIPTION("GFS Nolock Locking Module");
|
||||
MODULE_AUTHOR("Red Hat, Inc.");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
687
fs/gfs2/log.c
Normal file
687
fs/gfs2/log.c
Normal file
|
@ -0,0 +1,687 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/lm_interface.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
#include "bmap.h"
|
||||
#include "glock.h"
|
||||
#include "log.h"
|
||||
#include "lops.h"
|
||||
#include "meta_io.h"
|
||||
#include "util.h"
|
||||
#include "dir.h"
|
||||
|
||||
#define PULL 1
|
||||
|
||||
/**
|
||||
* gfs2_struct2blk - compute stuff
|
||||
* @sdp: the filesystem
|
||||
* @nstruct: the number of structures
|
||||
* @ssize: the size of the structures
|
||||
*
|
||||
* Compute the number of log descriptor blocks needed to hold a certain number
|
||||
* of structures of a certain size.
|
||||
*
|
||||
* Returns: the number of blocks needed (minimum is always 1)
|
||||
*/
|
||||
|
||||
unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
|
||||
unsigned int ssize)
|
||||
{
|
||||
unsigned int blks;
|
||||
unsigned int first, second;
|
||||
|
||||
blks = 1;
|
||||
first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
|
||||
|
||||
if (nstruct > first) {
|
||||
second = (sdp->sd_sb.sb_bsize -
|
||||
sizeof(struct gfs2_meta_header)) / ssize;
|
||||
blks += DIV_ROUND_UP(nstruct - first, second);
|
||||
}
|
||||
|
||||
return blks;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_ail1_start_one - Start I/O on a part of the AIL
|
||||
* @sdp: the filesystem
|
||||
* @tr: the part of the AIL
|
||||
*
|
||||
*/
|
||||
|
||||
static void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
|
||||
{
|
||||
struct gfs2_bufdata *bd, *s;
|
||||
struct buffer_head *bh;
|
||||
int retry;
|
||||
|
||||
BUG_ON(!spin_is_locked(&sdp->sd_log_lock));
|
||||
|
||||
do {
|
||||
retry = 0;
|
||||
|
||||
list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
|
||||
bd_ail_st_list) {
|
||||
bh = bd->bd_bh;
|
||||
|
||||
gfs2_assert(sdp, bd->bd_ail == ai);
|
||||
|
||||
if (!buffer_busy(bh)) {
|
||||
if (!buffer_uptodate(bh)) {
|
||||
gfs2_log_unlock(sdp);
|
||||
gfs2_io_error_bh(sdp, bh);
|
||||
gfs2_log_lock(sdp);
|
||||
}
|
||||
list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!buffer_dirty(bh))
|
||||
continue;
|
||||
|
||||
list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list);
|
||||
|
||||
gfs2_log_unlock(sdp);
|
||||
wait_on_buffer(bh);
|
||||
ll_rw_block(WRITE, 1, &bh);
|
||||
gfs2_log_lock(sdp);
|
||||
|
||||
retry = 1;
|
||||
break;
|
||||
}
|
||||
} while (retry);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
|
||||
* @sdp: the filesystem
|
||||
* @ai: the AIL entry
|
||||
*
|
||||
*/
|
||||
|
||||
static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int flags)
|
||||
{
|
||||
struct gfs2_bufdata *bd, *s;
|
||||
struct buffer_head *bh;
|
||||
|
||||
list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
|
||||
bd_ail_st_list) {
|
||||
bh = bd->bd_bh;
|
||||
|
||||
gfs2_assert(sdp, bd->bd_ail == ai);
|
||||
|
||||
if (buffer_busy(bh)) {
|
||||
if (flags & DIO_ALL)
|
||||
continue;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
if (!buffer_uptodate(bh))
|
||||
gfs2_io_error_bh(sdp, bh);
|
||||
|
||||
list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
|
||||
}
|
||||
|
||||
return list_empty(&ai->ai_ail1_list);
|
||||
}
|
||||
|
||||
void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
|
||||
{
|
||||
struct list_head *head = &sdp->sd_ail1_list;
|
||||
u64 sync_gen;
|
||||
struct list_head *first;
|
||||
struct gfs2_ail *first_ai, *ai, *tmp;
|
||||
int done = 0;
|
||||
|
||||
gfs2_log_lock(sdp);
|
||||
if (list_empty(head)) {
|
||||
gfs2_log_unlock(sdp);
|
||||
return;
|
||||
}
|
||||
sync_gen = sdp->sd_ail_sync_gen++;
|
||||
|
||||
first = head->prev;
|
||||
first_ai = list_entry(first, struct gfs2_ail, ai_list);
|
||||
first_ai->ai_sync_gen = sync_gen;
|
||||
gfs2_ail1_start_one(sdp, first_ai); /* This may drop log lock */
|
||||
|
||||
if (flags & DIO_ALL)
|
||||
first = NULL;
|
||||
|
||||
while(!done) {
|
||||
if (first && (head->prev != first ||
|
||||
gfs2_ail1_empty_one(sdp, first_ai, 0)))
|
||||
break;
|
||||
|
||||
done = 1;
|
||||
list_for_each_entry_safe_reverse(ai, tmp, head, ai_list) {
|
||||
if (ai->ai_sync_gen >= sync_gen)
|
||||
continue;
|
||||
ai->ai_sync_gen = sync_gen;
|
||||
gfs2_ail1_start_one(sdp, ai); /* This may drop log lock */
|
||||
done = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
gfs2_log_unlock(sdp);
|
||||
}
|
||||
|
||||
int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags)
|
||||
{
|
||||
struct gfs2_ail *ai, *s;
|
||||
int ret;
|
||||
|
||||
gfs2_log_lock(sdp);
|
||||
|
||||
list_for_each_entry_safe_reverse(ai, s, &sdp->sd_ail1_list, ai_list) {
|
||||
if (gfs2_ail1_empty_one(sdp, ai, flags))
|
||||
list_move(&ai->ai_list, &sdp->sd_ail2_list);
|
||||
else if (!(flags & DIO_ALL))
|
||||
break;
|
||||
}
|
||||
|
||||
ret = list_empty(&sdp->sd_ail1_list);
|
||||
|
||||
gfs2_log_unlock(sdp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
|
||||
* @sdp: the filesystem
|
||||
* @ai: the AIL entry
|
||||
*
|
||||
*/
|
||||
|
||||
static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
|
||||
{
|
||||
struct list_head *head = &ai->ai_ail2_list;
|
||||
struct gfs2_bufdata *bd;
|
||||
|
||||
while (!list_empty(head)) {
|
||||
bd = list_entry(head->prev, struct gfs2_bufdata,
|
||||
bd_ail_st_list);
|
||||
gfs2_assert(sdp, bd->bd_ail == ai);
|
||||
bd->bd_ail = NULL;
|
||||
list_del(&bd->bd_ail_st_list);
|
||||
list_del(&bd->bd_ail_gl_list);
|
||||
atomic_dec(&bd->bd_gl->gl_ail_count);
|
||||
brelse(bd->bd_bh);
|
||||
}
|
||||
}
|
||||
|
||||
static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
|
||||
{
|
||||
struct gfs2_ail *ai, *safe;
|
||||
unsigned int old_tail = sdp->sd_log_tail;
|
||||
int wrap = (new_tail < old_tail);
|
||||
int a, b, rm;
|
||||
|
||||
gfs2_log_lock(sdp);
|
||||
|
||||
list_for_each_entry_safe(ai, safe, &sdp->sd_ail2_list, ai_list) {
|
||||
a = (old_tail <= ai->ai_first);
|
||||
b = (ai->ai_first < new_tail);
|
||||
rm = (wrap) ? (a || b) : (a && b);
|
||||
if (!rm)
|
||||
continue;
|
||||
|
||||
gfs2_ail2_empty_one(sdp, ai);
|
||||
list_del(&ai->ai_list);
|
||||
gfs2_assert_warn(sdp, list_empty(&ai->ai_ail1_list));
|
||||
gfs2_assert_warn(sdp, list_empty(&ai->ai_ail2_list));
|
||||
kfree(ai);
|
||||
}
|
||||
|
||||
gfs2_log_unlock(sdp);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_log_reserve - Make a log reservation
|
||||
* @sdp: The GFS2 superblock
|
||||
* @blks: The number of blocks to reserve
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
|
||||
{
|
||||
unsigned int try = 0;
|
||||
|
||||
if (gfs2_assert_warn(sdp, blks) ||
|
||||
gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&sdp->sd_log_reserve_mutex);
|
||||
gfs2_log_lock(sdp);
|
||||
while(sdp->sd_log_blks_free <= blks) {
|
||||
gfs2_log_unlock(sdp);
|
||||
gfs2_ail1_empty(sdp, 0);
|
||||
gfs2_log_flush(sdp, NULL);
|
||||
|
||||
if (try++)
|
||||
gfs2_ail1_start(sdp, 0);
|
||||
gfs2_log_lock(sdp);
|
||||
}
|
||||
sdp->sd_log_blks_free -= blks;
|
||||
gfs2_log_unlock(sdp);
|
||||
mutex_unlock(&sdp->sd_log_reserve_mutex);
|
||||
|
||||
down_read(&sdp->sd_log_flush_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_log_release - Release a given number of log blocks
|
||||
* @sdp: The GFS2 superblock
|
||||
* @blks: The number of blocks
|
||||
*
|
||||
*/
|
||||
|
||||
void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
|
||||
{
|
||||
|
||||
gfs2_log_lock(sdp);
|
||||
sdp->sd_log_blks_free += blks;
|
||||
gfs2_assert_withdraw(sdp,
|
||||
sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks);
|
||||
gfs2_log_unlock(sdp);
|
||||
up_read(&sdp->sd_log_flush_lock);
|
||||
}
|
||||
|
||||
static u64 log_bmap(struct gfs2_sbd *sdp, unsigned int lbn)
|
||||
{
|
||||
int error;
|
||||
struct buffer_head bh_map;
|
||||
|
||||
error = gfs2_block_map(sdp->sd_jdesc->jd_inode, lbn, 0, &bh_map, 1);
|
||||
if (error || !bh_map.b_blocknr)
|
||||
printk(KERN_INFO "error=%d, dbn=%llu lbn=%u", error, bh_map.b_blocknr, lbn);
|
||||
gfs2_assert_withdraw(sdp, !error && bh_map.b_blocknr);
|
||||
|
||||
return bh_map.b_blocknr;
|
||||
}
|
||||
|
||||
/**
|
||||
* log_distance - Compute distance between two journal blocks
|
||||
* @sdp: The GFS2 superblock
|
||||
* @newer: The most recent journal block of the pair
|
||||
* @older: The older journal block of the pair
|
||||
*
|
||||
* Compute the distance (in the journal direction) between two
|
||||
* blocks in the journal
|
||||
*
|
||||
* Returns: the distance in blocks
|
||||
*/
|
||||
|
||||
static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
|
||||
unsigned int older)
|
||||
{
|
||||
int dist;
|
||||
|
||||
dist = newer - older;
|
||||
if (dist < 0)
|
||||
dist += sdp->sd_jdesc->jd_blocks;
|
||||
|
||||
return dist;
|
||||
}
|
||||
|
||||
static unsigned int current_tail(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct gfs2_ail *ai;
|
||||
unsigned int tail;
|
||||
|
||||
gfs2_log_lock(sdp);
|
||||
|
||||
if (list_empty(&sdp->sd_ail1_list)) {
|
||||
tail = sdp->sd_log_head;
|
||||
} else {
|
||||
ai = list_entry(sdp->sd_ail1_list.prev, struct gfs2_ail, ai_list);
|
||||
tail = ai->ai_first;
|
||||
}
|
||||
|
||||
gfs2_log_unlock(sdp);
|
||||
|
||||
return tail;
|
||||
}
|
||||
|
||||
static inline void log_incr_head(struct gfs2_sbd *sdp)
|
||||
{
|
||||
if (sdp->sd_log_flush_head == sdp->sd_log_tail)
|
||||
gfs2_assert_withdraw(sdp, sdp->sd_log_flush_head == sdp->sd_log_head);
|
||||
|
||||
if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
|
||||
sdp->sd_log_flush_head = 0;
|
||||
sdp->sd_log_flush_wrapped = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_log_get_buf - Get and initialize a buffer to use for log control data
|
||||
* @sdp: The GFS2 superblock
|
||||
*
|
||||
* Returns: the buffer_head
|
||||
*/
|
||||
|
||||
struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp)
|
||||
{
|
||||
u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
|
||||
struct gfs2_log_buf *lb;
|
||||
struct buffer_head *bh;
|
||||
|
||||
lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL);
|
||||
list_add(&lb->lb_list, &sdp->sd_log_flush_list);
|
||||
|
||||
bh = lb->lb_bh = sb_getblk(sdp->sd_vfs, blkno);
|
||||
lock_buffer(bh);
|
||||
memset(bh->b_data, 0, bh->b_size);
|
||||
set_buffer_uptodate(bh);
|
||||
clear_buffer_dirty(bh);
|
||||
unlock_buffer(bh);
|
||||
|
||||
log_incr_head(sdp);
|
||||
|
||||
return bh;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log
|
||||
* @sdp: the filesystem
|
||||
* @data: the data the buffer_head should point to
|
||||
*
|
||||
* Returns: the log buffer descriptor
|
||||
*/
|
||||
|
||||
struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
|
||||
struct buffer_head *real)
|
||||
{
|
||||
u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
|
||||
struct gfs2_log_buf *lb;
|
||||
struct buffer_head *bh;
|
||||
|
||||
lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL);
|
||||
list_add(&lb->lb_list, &sdp->sd_log_flush_list);
|
||||
lb->lb_real = real;
|
||||
|
||||
bh = lb->lb_bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL);
|
||||
atomic_set(&bh->b_count, 1);
|
||||
bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate);
|
||||
set_bh_page(bh, real->b_page, bh_offset(real));
|
||||
bh->b_blocknr = blkno;
|
||||
bh->b_size = sdp->sd_sb.sb_bsize;
|
||||
bh->b_bdev = sdp->sd_vfs->s_bdev;
|
||||
|
||||
log_incr_head(sdp);
|
||||
|
||||
return bh;
|
||||
}
|
||||
|
||||
static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail, int pull)
|
||||
{
|
||||
unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
|
||||
|
||||
ail2_empty(sdp, new_tail);
|
||||
|
||||
gfs2_log_lock(sdp);
|
||||
sdp->sd_log_blks_free += dist - (pull ? 1 : 0);
|
||||
gfs2_assert_withdraw(sdp, sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks);
|
||||
gfs2_log_unlock(sdp);
|
||||
|
||||
sdp->sd_log_tail = new_tail;
|
||||
}
|
||||
|
||||
/**
|
||||
* log_write_header - Get and initialize a journal header buffer
|
||||
* @sdp: The GFS2 superblock
|
||||
*
|
||||
* Returns: the initialized log buffer descriptor
|
||||
*/
|
||||
|
||||
static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
|
||||
{
|
||||
u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
|
||||
struct buffer_head *bh;
|
||||
struct gfs2_log_header *lh;
|
||||
unsigned int tail;
|
||||
u32 hash;
|
||||
|
||||
bh = sb_getblk(sdp->sd_vfs, blkno);
|
||||
lock_buffer(bh);
|
||||
memset(bh->b_data, 0, bh->b_size);
|
||||
set_buffer_uptodate(bh);
|
||||
clear_buffer_dirty(bh);
|
||||
unlock_buffer(bh);
|
||||
|
||||
gfs2_ail1_empty(sdp, 0);
|
||||
tail = current_tail(sdp);
|
||||
|
||||
lh = (struct gfs2_log_header *)bh->b_data;
|
||||
memset(lh, 0, sizeof(struct gfs2_log_header));
|
||||
lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
|
||||
lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
|
||||
lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
|
||||
lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
|
||||
lh->lh_flags = cpu_to_be32(flags);
|
||||
lh->lh_tail = cpu_to_be32(tail);
|
||||
lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head);
|
||||
hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header));
|
||||
lh->lh_hash = cpu_to_be32(hash);
|
||||
|
||||
set_buffer_dirty(bh);
|
||||
if (sync_dirty_buffer(bh))
|
||||
gfs2_io_error_bh(sdp, bh);
|
||||
brelse(bh);
|
||||
|
||||
if (sdp->sd_log_tail != tail)
|
||||
log_pull_tail(sdp, tail, pull);
|
||||
else
|
||||
gfs2_assert_withdraw(sdp, !pull);
|
||||
|
||||
sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
|
||||
log_incr_head(sdp);
|
||||
}
|
||||
|
||||
static void log_flush_commit(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct list_head *head = &sdp->sd_log_flush_list;
|
||||
struct gfs2_log_buf *lb;
|
||||
struct buffer_head *bh;
|
||||
|
||||
while (!list_empty(head)) {
|
||||
lb = list_entry(head->next, struct gfs2_log_buf, lb_list);
|
||||
list_del(&lb->lb_list);
|
||||
bh = lb->lb_bh;
|
||||
|
||||
wait_on_buffer(bh);
|
||||
if (!buffer_uptodate(bh))
|
||||
gfs2_io_error_bh(sdp, bh);
|
||||
if (lb->lb_real) {
|
||||
while (atomic_read(&bh->b_count) != 1) /* Grrrr... */
|
||||
schedule();
|
||||
free_buffer_head(bh);
|
||||
} else
|
||||
brelse(bh);
|
||||
kfree(lb);
|
||||
}
|
||||
|
||||
log_write_header(sdp, 0, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_log_flush - flush incore transaction(s)
|
||||
* @sdp: the filesystem
|
||||
* @gl: The glock structure to flush. If NULL, flush the whole incore log
|
||||
*
|
||||
*/
|
||||
|
||||
void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_ail *ai;
|
||||
|
||||
down_write(&sdp->sd_log_flush_lock);
|
||||
|
||||
if (gl) {
|
||||
gfs2_log_lock(sdp);
|
||||
if (list_empty(&gl->gl_le.le_list)) {
|
||||
gfs2_log_unlock(sdp);
|
||||
up_write(&sdp->sd_log_flush_lock);
|
||||
return;
|
||||
}
|
||||
gfs2_log_unlock(sdp);
|
||||
}
|
||||
|
||||
ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL);
|
||||
INIT_LIST_HEAD(&ai->ai_ail1_list);
|
||||
INIT_LIST_HEAD(&ai->ai_ail2_list);
|
||||
|
||||
gfs2_assert_withdraw(sdp, sdp->sd_log_num_buf == sdp->sd_log_commited_buf);
|
||||
gfs2_assert_withdraw(sdp,
|
||||
sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
|
||||
|
||||
sdp->sd_log_flush_head = sdp->sd_log_head;
|
||||
sdp->sd_log_flush_wrapped = 0;
|
||||
ai->ai_first = sdp->sd_log_flush_head;
|
||||
|
||||
lops_before_commit(sdp);
|
||||
if (!list_empty(&sdp->sd_log_flush_list))
|
||||
log_flush_commit(sdp);
|
||||
else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle)
|
||||
log_write_header(sdp, 0, PULL);
|
||||
lops_after_commit(sdp, ai);
|
||||
sdp->sd_log_head = sdp->sd_log_flush_head;
|
||||
|
||||
sdp->sd_log_blks_free -= sdp->sd_log_num_hdrs;
|
||||
|
||||
sdp->sd_log_blks_reserved = 0;
|
||||
sdp->sd_log_commited_buf = 0;
|
||||
sdp->sd_log_num_hdrs = 0;
|
||||
sdp->sd_log_commited_revoke = 0;
|
||||
|
||||
gfs2_log_lock(sdp);
|
||||
if (!list_empty(&ai->ai_ail1_list)) {
|
||||
list_add(&ai->ai_list, &sdp->sd_ail1_list);
|
||||
ai = NULL;
|
||||
}
|
||||
gfs2_log_unlock(sdp);
|
||||
|
||||
sdp->sd_vfs->s_dirt = 0;
|
||||
up_write(&sdp->sd_log_flush_lock);
|
||||
|
||||
kfree(ai);
|
||||
}
|
||||
|
||||
static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
|
||||
{
|
||||
unsigned int reserved = 0;
|
||||
unsigned int old;
|
||||
|
||||
gfs2_log_lock(sdp);
|
||||
|
||||
sdp->sd_log_commited_buf += tr->tr_num_buf_new - tr->tr_num_buf_rm;
|
||||
gfs2_assert_withdraw(sdp, ((int)sdp->sd_log_commited_buf) >= 0);
|
||||
sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
|
||||
gfs2_assert_withdraw(sdp, ((int)sdp->sd_log_commited_revoke) >= 0);
|
||||
|
||||
if (sdp->sd_log_commited_buf)
|
||||
reserved += sdp->sd_log_commited_buf;
|
||||
if (sdp->sd_log_commited_revoke)
|
||||
reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
|
||||
sizeof(u64));
|
||||
if (reserved)
|
||||
reserved++;
|
||||
|
||||
old = sdp->sd_log_blks_free;
|
||||
sdp->sd_log_blks_free += tr->tr_reserved -
|
||||
(reserved - sdp->sd_log_blks_reserved);
|
||||
|
||||
gfs2_assert_withdraw(sdp, sdp->sd_log_blks_free >= old);
|
||||
gfs2_assert_withdraw(sdp,
|
||||
sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks +
|
||||
sdp->sd_log_num_hdrs);
|
||||
|
||||
sdp->sd_log_blks_reserved = reserved;
|
||||
|
||||
gfs2_log_unlock(sdp);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_log_commit - Commit a transaction to the log
|
||||
* @sdp: the filesystem
|
||||
* @tr: the transaction
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
|
||||
{
|
||||
log_refund(sdp, tr);
|
||||
lops_incore_commit(sdp, tr);
|
||||
|
||||
sdp->sd_vfs->s_dirt = 1;
|
||||
up_read(&sdp->sd_log_flush_lock);
|
||||
|
||||
gfs2_log_lock(sdp);
|
||||
if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks)) {
|
||||
gfs2_log_unlock(sdp);
|
||||
gfs2_log_flush(sdp, NULL);
|
||||
} else {
|
||||
gfs2_log_unlock(sdp);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_log_shutdown - write a shutdown header into a journal
|
||||
* @sdp: the filesystem
|
||||
*
|
||||
*/
|
||||
|
||||
void gfs2_log_shutdown(struct gfs2_sbd *sdp)
|
||||
{
|
||||
down_write(&sdp->sd_log_flush_lock);
|
||||
|
||||
gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
|
||||
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_gl);
|
||||
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_buf);
|
||||
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_jdata);
|
||||
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
|
||||
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_rg);
|
||||
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_databuf);
|
||||
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_hdrs);
|
||||
gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
|
||||
|
||||
sdp->sd_log_flush_head = sdp->sd_log_head;
|
||||
sdp->sd_log_flush_wrapped = 0;
|
||||
|
||||
log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT, 0);
|
||||
|
||||
gfs2_assert_warn(sdp, sdp->sd_log_blks_free == sdp->sd_jdesc->jd_blocks);
|
||||
gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
|
||||
gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
|
||||
|
||||
sdp->sd_log_head = sdp->sd_log_flush_head;
|
||||
sdp->sd_log_tail = sdp->sd_log_head;
|
||||
|
||||
up_write(&sdp->sd_log_flush_lock);
|
||||
}
|
||||
|
65
fs/gfs2/log.h
Normal file
65
fs/gfs2/log.h
Normal file
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __LOG_DOT_H__
|
||||
#define __LOG_DOT_H__
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include "incore.h"
|
||||
|
||||
/**
|
||||
* gfs2_log_lock - acquire the right to mess with the log manager
|
||||
* @sdp: the filesystem
|
||||
*
|
||||
*/
|
||||
|
||||
static inline void gfs2_log_lock(struct gfs2_sbd *sdp)
|
||||
{
|
||||
spin_lock(&sdp->sd_log_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_log_unlock - release the right to mess with the log manager
|
||||
* @sdp: the filesystem
|
||||
*
|
||||
*/
|
||||
|
||||
static inline void gfs2_log_unlock(struct gfs2_sbd *sdp)
|
||||
{
|
||||
spin_unlock(&sdp->sd_log_lock);
|
||||
}
|
||||
|
||||
static inline void gfs2_log_pointers_init(struct gfs2_sbd *sdp,
|
||||
unsigned int value)
|
||||
{
|
||||
if (++value == sdp->sd_jdesc->jd_blocks) {
|
||||
value = 0;
|
||||
}
|
||||
sdp->sd_log_head = sdp->sd_log_tail = value;
|
||||
}
|
||||
|
||||
unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
|
||||
unsigned int ssize);
|
||||
|
||||
void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags);
|
||||
int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags);
|
||||
|
||||
int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks);
|
||||
void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks);
|
||||
|
||||
struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp);
|
||||
struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
|
||||
struct buffer_head *real);
|
||||
void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl);
|
||||
void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
|
||||
|
||||
void gfs2_log_shutdown(struct gfs2_sbd *sdp);
|
||||
|
||||
#endif /* __LOG_DOT_H__ */
|
809
fs/gfs2/lops.c
Normal file
809
fs/gfs2/lops.c
Normal file
|
@ -0,0 +1,809 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/lm_interface.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
#include "glock.h"
|
||||
#include "log.h"
|
||||
#include "lops.h"
|
||||
#include "meta_io.h"
|
||||
#include "recovery.h"
|
||||
#include "rgrp.h"
|
||||
#include "trans.h"
|
||||
#include "util.h"
|
||||
|
||||
static void glock_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
|
||||
{
|
||||
struct gfs2_glock *gl;
|
||||
struct gfs2_trans *tr = current->journal_info;
|
||||
|
||||
tr->tr_touched = 1;
|
||||
|
||||
if (!list_empty(&le->le_list))
|
||||
return;
|
||||
|
||||
gl = container_of(le, struct gfs2_glock, gl_le);
|
||||
if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl)))
|
||||
return;
|
||||
gfs2_glock_hold(gl);
|
||||
set_bit(GLF_DIRTY, &gl->gl_flags);
|
||||
|
||||
gfs2_log_lock(sdp);
|
||||
sdp->sd_log_num_gl++;
|
||||
list_add(&le->le_list, &sdp->sd_log_le_gl);
|
||||
gfs2_log_unlock(sdp);
|
||||
}
|
||||
|
||||
static void glock_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
|
||||
{
|
||||
struct list_head *head = &sdp->sd_log_le_gl;
|
||||
struct gfs2_glock *gl;
|
||||
|
||||
while (!list_empty(head)) {
|
||||
gl = list_entry(head->next, struct gfs2_glock, gl_le.le_list);
|
||||
list_del_init(&gl->gl_le.le_list);
|
||||
sdp->sd_log_num_gl--;
|
||||
|
||||
gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl));
|
||||
gfs2_glock_put(gl);
|
||||
}
|
||||
gfs2_assert_warn(sdp, !sdp->sd_log_num_gl);
|
||||
}
|
||||
|
||||
static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
|
||||
{
|
||||
struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
|
||||
struct gfs2_trans *tr;
|
||||
|
||||
if (!list_empty(&bd->bd_list_tr))
|
||||
return;
|
||||
|
||||
tr = current->journal_info;
|
||||
tr->tr_touched = 1;
|
||||
tr->tr_num_buf++;
|
||||
list_add(&bd->bd_list_tr, &tr->tr_list_buf);
|
||||
|
||||
if (!list_empty(&le->le_list))
|
||||
return;
|
||||
|
||||
gfs2_trans_add_gl(bd->bd_gl);
|
||||
|
||||
gfs2_meta_check(sdp, bd->bd_bh);
|
||||
gfs2_pin(sdp, bd->bd_bh);
|
||||
|
||||
gfs2_log_lock(sdp);
|
||||
sdp->sd_log_num_buf++;
|
||||
list_add(&le->le_list, &sdp->sd_log_le_buf);
|
||||
gfs2_log_unlock(sdp);
|
||||
|
||||
tr->tr_num_buf_new++;
|
||||
}
|
||||
|
||||
static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
|
||||
{
|
||||
struct list_head *head = &tr->tr_list_buf;
|
||||
struct gfs2_bufdata *bd;
|
||||
|
||||
while (!list_empty(head)) {
|
||||
bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
|
||||
list_del_init(&bd->bd_list_tr);
|
||||
tr->tr_num_buf--;
|
||||
}
|
||||
gfs2_assert_warn(sdp, !tr->tr_num_buf);
|
||||
}
|
||||
|
||||
static void buf_lo_before_commit(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct buffer_head *bh;
|
||||
struct gfs2_log_descriptor *ld;
|
||||
struct gfs2_bufdata *bd1 = NULL, *bd2;
|
||||
unsigned int total = sdp->sd_log_num_buf;
|
||||
unsigned int offset = sizeof(struct gfs2_log_descriptor);
|
||||
unsigned int limit;
|
||||
unsigned int num;
|
||||
unsigned n;
|
||||
__be64 *ptr;
|
||||
|
||||
offset += sizeof(__be64) - 1;
|
||||
offset &= ~(sizeof(__be64) - 1);
|
||||
limit = (sdp->sd_sb.sb_bsize - offset)/sizeof(__be64);
|
||||
/* for 4k blocks, limit = 503 */
|
||||
|
||||
bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list);
|
||||
while(total) {
|
||||
num = total;
|
||||
if (total > limit)
|
||||
num = limit;
|
||||
bh = gfs2_log_get_buf(sdp);
|
||||
sdp->sd_log_num_hdrs++;
|
||||
ld = (struct gfs2_log_descriptor *)bh->b_data;
|
||||
ptr = (__be64 *)(bh->b_data + offset);
|
||||
ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
|
||||
ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
|
||||
ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
|
||||
ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_METADATA);
|
||||
ld->ld_length = cpu_to_be32(num + 1);
|
||||
ld->ld_data1 = cpu_to_be32(num);
|
||||
ld->ld_data2 = cpu_to_be32(0);
|
||||
memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
|
||||
|
||||
n = 0;
|
||||
list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf,
|
||||
bd_le.le_list) {
|
||||
*ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
|
||||
if (++n >= num)
|
||||
break;
|
||||
}
|
||||
|
||||
set_buffer_dirty(bh);
|
||||
ll_rw_block(WRITE, 1, &bh);
|
||||
|
||||
n = 0;
|
||||
list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf,
|
||||
bd_le.le_list) {
|
||||
bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
|
||||
set_buffer_dirty(bh);
|
||||
ll_rw_block(WRITE, 1, &bh);
|
||||
if (++n >= num)
|
||||
break;
|
||||
}
|
||||
|
||||
total -= num;
|
||||
}
|
||||
}
|
||||
|
||||
static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
|
||||
{
|
||||
struct list_head *head = &sdp->sd_log_le_buf;
|
||||
struct gfs2_bufdata *bd;
|
||||
|
||||
while (!list_empty(head)) {
|
||||
bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
|
||||
list_del_init(&bd->bd_le.le_list);
|
||||
sdp->sd_log_num_buf--;
|
||||
|
||||
gfs2_unpin(sdp, bd->bd_bh, ai);
|
||||
}
|
||||
gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
|
||||
}
|
||||
|
||||
static void buf_lo_before_scan(struct gfs2_jdesc *jd,
|
||||
struct gfs2_log_header *head, int pass)
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
|
||||
|
||||
if (pass != 0)
|
||||
return;
|
||||
|
||||
sdp->sd_found_blocks = 0;
|
||||
sdp->sd_replayed_blocks = 0;
|
||||
}
|
||||
|
||||
static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
|
||||
struct gfs2_log_descriptor *ld, __be64 *ptr,
|
||||
int pass)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
|
||||
struct gfs2_glock *gl = ip->i_gl;
|
||||
unsigned int blks = be32_to_cpu(ld->ld_data1);
|
||||
struct buffer_head *bh_log, *bh_ip;
|
||||
u64 blkno;
|
||||
int error = 0;
|
||||
|
||||
if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
|
||||
return 0;
|
||||
|
||||
gfs2_replay_incr_blk(sdp, &start);
|
||||
|
||||
for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
|
||||
blkno = be64_to_cpu(*ptr++);
|
||||
|
||||
sdp->sd_found_blocks++;
|
||||
|
||||
if (gfs2_revoke_check(sdp, blkno, start))
|
||||
continue;
|
||||
|
||||
error = gfs2_replay_read_block(jd, start, &bh_log);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
bh_ip = gfs2_meta_new(gl, blkno);
|
||||
memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
|
||||
|
||||
if (gfs2_meta_check(sdp, bh_ip))
|
||||
error = -EIO;
|
||||
else
|
||||
mark_buffer_dirty(bh_ip);
|
||||
|
||||
brelse(bh_log);
|
||||
brelse(bh_ip);
|
||||
|
||||
if (error)
|
||||
break;
|
||||
|
||||
sdp->sd_replayed_blocks++;
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
|
||||
|
||||
if (error) {
|
||||
gfs2_meta_sync(ip->i_gl);
|
||||
return;
|
||||
}
|
||||
if (pass != 1)
|
||||
return;
|
||||
|
||||
gfs2_meta_sync(ip->i_gl);
|
||||
|
||||
fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
|
||||
jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
|
||||
}
|
||||
|
||||
static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
|
||||
{
|
||||
struct gfs2_trans *tr;
|
||||
|
||||
tr = current->journal_info;
|
||||
tr->tr_touched = 1;
|
||||
tr->tr_num_revoke++;
|
||||
|
||||
gfs2_log_lock(sdp);
|
||||
sdp->sd_log_num_revoke++;
|
||||
list_add(&le->le_list, &sdp->sd_log_le_revoke);
|
||||
gfs2_log_unlock(sdp);
|
||||
}
|
||||
|
||||
static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct gfs2_log_descriptor *ld;
|
||||
struct gfs2_meta_header *mh;
|
||||
struct buffer_head *bh;
|
||||
unsigned int offset;
|
||||
struct list_head *head = &sdp->sd_log_le_revoke;
|
||||
struct gfs2_revoke *rv;
|
||||
|
||||
if (!sdp->sd_log_num_revoke)
|
||||
return;
|
||||
|
||||
bh = gfs2_log_get_buf(sdp);
|
||||
ld = (struct gfs2_log_descriptor *)bh->b_data;
|
||||
ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
|
||||
ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
|
||||
ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
|
||||
ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_REVOKE);
|
||||
ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke,
|
||||
sizeof(u64)));
|
||||
ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke);
|
||||
ld->ld_data2 = cpu_to_be32(0);
|
||||
memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
|
||||
offset = sizeof(struct gfs2_log_descriptor);
|
||||
|
||||
while (!list_empty(head)) {
|
||||
rv = list_entry(head->next, struct gfs2_revoke, rv_le.le_list);
|
||||
list_del_init(&rv->rv_le.le_list);
|
||||
sdp->sd_log_num_revoke--;
|
||||
|
||||
if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
|
||||
set_buffer_dirty(bh);
|
||||
ll_rw_block(WRITE, 1, &bh);
|
||||
|
||||
bh = gfs2_log_get_buf(sdp);
|
||||
mh = (struct gfs2_meta_header *)bh->b_data;
|
||||
mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
|
||||
mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
|
||||
mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
|
||||
offset = sizeof(struct gfs2_meta_header);
|
||||
}
|
||||
|
||||
*(__be64 *)(bh->b_data + offset) = cpu_to_be64(rv->rv_blkno);
|
||||
kfree(rv);
|
||||
|
||||
offset += sizeof(u64);
|
||||
}
|
||||
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
|
||||
|
||||
set_buffer_dirty(bh);
|
||||
ll_rw_block(WRITE, 1, &bh);
|
||||
}
|
||||
|
||||
static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
|
||||
struct gfs2_log_header *head, int pass)
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
|
||||
|
||||
if (pass != 0)
|
||||
return;
|
||||
|
||||
sdp->sd_found_revokes = 0;
|
||||
sdp->sd_replay_tail = head->lh_tail;
|
||||
}
|
||||
|
||||
static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
|
||||
struct gfs2_log_descriptor *ld, __be64 *ptr,
|
||||
int pass)
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
|
||||
unsigned int blks = be32_to_cpu(ld->ld_length);
|
||||
unsigned int revokes = be32_to_cpu(ld->ld_data1);
|
||||
struct buffer_head *bh;
|
||||
unsigned int offset;
|
||||
u64 blkno;
|
||||
int first = 1;
|
||||
int error;
|
||||
|
||||
if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
|
||||
return 0;
|
||||
|
||||
offset = sizeof(struct gfs2_log_descriptor);
|
||||
|
||||
for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
|
||||
error = gfs2_replay_read_block(jd, start, &bh);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (!first)
|
||||
gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
|
||||
|
||||
while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
|
||||
blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
|
||||
|
||||
error = gfs2_revoke_add(sdp, blkno, start);
|
||||
if (error < 0)
|
||||
return error;
|
||||
else if (error)
|
||||
sdp->sd_found_revokes++;
|
||||
|
||||
if (!--revokes)
|
||||
break;
|
||||
offset += sizeof(u64);
|
||||
}
|
||||
|
||||
brelse(bh);
|
||||
offset = sizeof(struct gfs2_meta_header);
|
||||
first = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
|
||||
|
||||
if (error) {
|
||||
gfs2_revoke_clean(sdp);
|
||||
return;
|
||||
}
|
||||
if (pass != 1)
|
||||
return;
|
||||
|
||||
fs_info(sdp, "jid=%u: Found %u revoke tags\n",
|
||||
jd->jd_jid, sdp->sd_found_revokes);
|
||||
|
||||
gfs2_revoke_clean(sdp);
|
||||
}
|
||||
|
||||
static void rg_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
|
||||
{
|
||||
struct gfs2_rgrpd *rgd;
|
||||
struct gfs2_trans *tr = current->journal_info;
|
||||
|
||||
tr->tr_touched = 1;
|
||||
|
||||
if (!list_empty(&le->le_list))
|
||||
return;
|
||||
|
||||
rgd = container_of(le, struct gfs2_rgrpd, rd_le);
|
||||
gfs2_rgrp_bh_hold(rgd);
|
||||
|
||||
gfs2_log_lock(sdp);
|
||||
sdp->sd_log_num_rg++;
|
||||
list_add(&le->le_list, &sdp->sd_log_le_rg);
|
||||
gfs2_log_unlock(sdp);
|
||||
}
|
||||
|
||||
static void rg_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
|
||||
{
|
||||
struct list_head *head = &sdp->sd_log_le_rg;
|
||||
struct gfs2_rgrpd *rgd;
|
||||
|
||||
while (!list_empty(head)) {
|
||||
rgd = list_entry(head->next, struct gfs2_rgrpd, rd_le.le_list);
|
||||
list_del_init(&rgd->rd_le.le_list);
|
||||
sdp->sd_log_num_rg--;
|
||||
|
||||
gfs2_rgrp_repolish_clones(rgd);
|
||||
gfs2_rgrp_bh_put(rgd);
|
||||
}
|
||||
gfs2_assert_warn(sdp, !sdp->sd_log_num_rg);
|
||||
}
|
||||
|
||||
/**
|
||||
* databuf_lo_add - Add a databuf to the transaction.
|
||||
*
|
||||
* This is used in two distinct cases:
|
||||
* i) In ordered write mode
|
||||
* We put the data buffer on a list so that we can ensure that its
|
||||
* synced to disk at the right time
|
||||
* ii) In journaled data mode
|
||||
* We need to journal the data block in the same way as metadata in
|
||||
* the functions above. The difference is that here we have a tag
|
||||
* which is two __be64's being the block number (as per meta data)
|
||||
* and a flag which says whether the data block needs escaping or
|
||||
* not. This means we need a new log entry for each 251 or so data
|
||||
* blocks, which isn't an enormous overhead but twice as much as
|
||||
* for normal metadata blocks.
|
||||
*/
|
||||
static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
|
||||
{
|
||||
struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
|
||||
struct gfs2_trans *tr = current->journal_info;
|
||||
struct address_space *mapping = bd->bd_bh->b_page->mapping;
|
||||
struct gfs2_inode *ip = GFS2_I(mapping->host);
|
||||
|
||||
tr->tr_touched = 1;
|
||||
if (list_empty(&bd->bd_list_tr) &&
|
||||
(ip->i_di.di_flags & GFS2_DIF_JDATA)) {
|
||||
tr->tr_num_buf++;
|
||||
list_add(&bd->bd_list_tr, &tr->tr_list_buf);
|
||||
gfs2_pin(sdp, bd->bd_bh);
|
||||
tr->tr_num_buf_new++;
|
||||
}
|
||||
gfs2_trans_add_gl(bd->bd_gl);
|
||||
gfs2_log_lock(sdp);
|
||||
if (list_empty(&le->le_list)) {
|
||||
if (ip->i_di.di_flags & GFS2_DIF_JDATA)
|
||||
sdp->sd_log_num_jdata++;
|
||||
sdp->sd_log_num_databuf++;
|
||||
list_add(&le->le_list, &sdp->sd_log_le_databuf);
|
||||
}
|
||||
gfs2_log_unlock(sdp);
|
||||
}
|
||||
|
||||
static int gfs2_check_magic(struct buffer_head *bh)
|
||||
{
|
||||
struct page *page = bh->b_page;
|
||||
void *kaddr;
|
||||
__be32 *ptr;
|
||||
int rv = 0;
|
||||
|
||||
kaddr = kmap_atomic(page, KM_USER0);
|
||||
ptr = kaddr + bh_offset(bh);
|
||||
if (*ptr == cpu_to_be32(GFS2_MAGIC))
|
||||
rv = 1;
|
||||
kunmap_atomic(page, KM_USER0);
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* databuf_lo_before_commit - Scan the data buffers, writing as we go
|
||||
*
|
||||
* Here we scan through the lists of buffers and make the assumption
|
||||
* that any buffer thats been pinned is being journaled, and that
|
||||
* any unpinned buffer is an ordered write data buffer and therefore
|
||||
* will be written back rather than journaled.
|
||||
*/
|
||||
static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
|
||||
{
|
||||
LIST_HEAD(started);
|
||||
struct gfs2_bufdata *bd1 = NULL, *bd2, *bdt;
|
||||
struct buffer_head *bh = NULL;
|
||||
unsigned int offset = sizeof(struct gfs2_log_descriptor);
|
||||
struct gfs2_log_descriptor *ld;
|
||||
unsigned int limit;
|
||||
unsigned int total_dbuf = sdp->sd_log_num_databuf;
|
||||
unsigned int total_jdata = sdp->sd_log_num_jdata;
|
||||
unsigned int num, n;
|
||||
__be64 *ptr = NULL;
|
||||
|
||||
offset += 2*sizeof(__be64) - 1;
|
||||
offset &= ~(2*sizeof(__be64) - 1);
|
||||
limit = (sdp->sd_sb.sb_bsize - offset)/sizeof(__be64);
|
||||
|
||||
/*
|
||||
* Start writing ordered buffers, write journaled buffers
|
||||
* into the log along with a header
|
||||
*/
|
||||
gfs2_log_lock(sdp);
|
||||
bd2 = bd1 = list_prepare_entry(bd1, &sdp->sd_log_le_databuf,
|
||||
bd_le.le_list);
|
||||
while(total_dbuf) {
|
||||
num = total_jdata;
|
||||
if (num > limit)
|
||||
num = limit;
|
||||
n = 0;
|
||||
list_for_each_entry_safe_continue(bd1, bdt,
|
||||
&sdp->sd_log_le_databuf,
|
||||
bd_le.le_list) {
|
||||
/* An ordered write buffer */
|
||||
if (bd1->bd_bh && !buffer_pinned(bd1->bd_bh)) {
|
||||
list_move(&bd1->bd_le.le_list, &started);
|
||||
if (bd1 == bd2) {
|
||||
bd2 = NULL;
|
||||
bd2 = list_prepare_entry(bd2,
|
||||
&sdp->sd_log_le_databuf,
|
||||
bd_le.le_list);
|
||||
}
|
||||
total_dbuf--;
|
||||
if (bd1->bd_bh) {
|
||||
get_bh(bd1->bd_bh);
|
||||
if (buffer_dirty(bd1->bd_bh)) {
|
||||
gfs2_log_unlock(sdp);
|
||||
wait_on_buffer(bd1->bd_bh);
|
||||
ll_rw_block(WRITE, 1,
|
||||
&bd1->bd_bh);
|
||||
gfs2_log_lock(sdp);
|
||||
}
|
||||
brelse(bd1->bd_bh);
|
||||
continue;
|
||||
}
|
||||
continue;
|
||||
} else if (bd1->bd_bh) { /* A journaled buffer */
|
||||
int magic;
|
||||
gfs2_log_unlock(sdp);
|
||||
if (!bh) {
|
||||
bh = gfs2_log_get_buf(sdp);
|
||||
sdp->sd_log_num_hdrs++;
|
||||
ld = (struct gfs2_log_descriptor *)
|
||||
bh->b_data;
|
||||
ptr = (__be64 *)(bh->b_data + offset);
|
||||
ld->ld_header.mh_magic =
|
||||
cpu_to_be32(GFS2_MAGIC);
|
||||
ld->ld_header.mh_type =
|
||||
cpu_to_be32(GFS2_METATYPE_LD);
|
||||
ld->ld_header.mh_format =
|
||||
cpu_to_be32(GFS2_FORMAT_LD);
|
||||
ld->ld_type =
|
||||
cpu_to_be32(GFS2_LOG_DESC_JDATA);
|
||||
ld->ld_length = cpu_to_be32(num + 1);
|
||||
ld->ld_data1 = cpu_to_be32(num);
|
||||
ld->ld_data2 = cpu_to_be32(0);
|
||||
memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
|
||||
}
|
||||
magic = gfs2_check_magic(bd1->bd_bh);
|
||||
*ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
|
||||
*ptr++ = cpu_to_be64((__u64)magic);
|
||||
clear_buffer_escaped(bd1->bd_bh);
|
||||
if (unlikely(magic != 0))
|
||||
set_buffer_escaped(bd1->bd_bh);
|
||||
gfs2_log_lock(sdp);
|
||||
if (n++ > num)
|
||||
break;
|
||||
} else if (!bd1->bd_bh) {
|
||||
total_dbuf--;
|
||||
sdp->sd_log_num_databuf--;
|
||||
list_del_init(&bd1->bd_le.le_list);
|
||||
if (bd1 == bd2) {
|
||||
bd2 = NULL;
|
||||
bd2 = list_prepare_entry(bd2,
|
||||
&sdp->sd_log_le_databuf,
|
||||
bd_le.le_list);
|
||||
}
|
||||
kmem_cache_free(gfs2_bufdata_cachep, bd1);
|
||||
}
|
||||
}
|
||||
gfs2_log_unlock(sdp);
|
||||
if (bh) {
|
||||
set_buffer_dirty(bh);
|
||||
ll_rw_block(WRITE, 1, &bh);
|
||||
bh = NULL;
|
||||
}
|
||||
n = 0;
|
||||
gfs2_log_lock(sdp);
|
||||
list_for_each_entry_continue(bd2, &sdp->sd_log_le_databuf,
|
||||
bd_le.le_list) {
|
||||
if (!bd2->bd_bh)
|
||||
continue;
|
||||
/* copy buffer if it needs escaping */
|
||||
gfs2_log_unlock(sdp);
|
||||
if (unlikely(buffer_escaped(bd2->bd_bh))) {
|
||||
void *kaddr;
|
||||
struct page *page = bd2->bd_bh->b_page;
|
||||
bh = gfs2_log_get_buf(sdp);
|
||||
kaddr = kmap_atomic(page, KM_USER0);
|
||||
memcpy(bh->b_data,
|
||||
kaddr + bh_offset(bd2->bd_bh),
|
||||
sdp->sd_sb.sb_bsize);
|
||||
kunmap_atomic(page, KM_USER0);
|
||||
*(__be32 *)bh->b_data = 0;
|
||||
} else {
|
||||
bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
|
||||
}
|
||||
set_buffer_dirty(bh);
|
||||
ll_rw_block(WRITE, 1, &bh);
|
||||
gfs2_log_lock(sdp);
|
||||
if (++n >= num)
|
||||
break;
|
||||
}
|
||||
bh = NULL;
|
||||
total_dbuf -= num;
|
||||
total_jdata -= num;
|
||||
}
|
||||
gfs2_log_unlock(sdp);
|
||||
|
||||
/* Wait on all ordered buffers */
|
||||
while (!list_empty(&started)) {
|
||||
gfs2_log_lock(sdp);
|
||||
bd1 = list_entry(started.next, struct gfs2_bufdata,
|
||||
bd_le.le_list);
|
||||
list_del_init(&bd1->bd_le.le_list);
|
||||
sdp->sd_log_num_databuf--;
|
||||
bh = bd1->bd_bh;
|
||||
if (bh) {
|
||||
bh->b_private = NULL;
|
||||
get_bh(bh);
|
||||
gfs2_log_unlock(sdp);
|
||||
wait_on_buffer(bh);
|
||||
brelse(bh);
|
||||
} else
|
||||
gfs2_log_unlock(sdp);
|
||||
|
||||
kmem_cache_free(gfs2_bufdata_cachep, bd1);
|
||||
}
|
||||
|
||||
/* We've removed all the ordered write bufs here, so only jdata left */
|
||||
gfs2_assert_warn(sdp, sdp->sd_log_num_databuf == sdp->sd_log_num_jdata);
|
||||
}
|
||||
|
||||
static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
|
||||
struct gfs2_log_descriptor *ld,
|
||||
__be64 *ptr, int pass)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
|
||||
struct gfs2_glock *gl = ip->i_gl;
|
||||
unsigned int blks = be32_to_cpu(ld->ld_data1);
|
||||
struct buffer_head *bh_log, *bh_ip;
|
||||
u64 blkno;
|
||||
u64 esc;
|
||||
int error = 0;
|
||||
|
||||
if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
|
||||
return 0;
|
||||
|
||||
gfs2_replay_incr_blk(sdp, &start);
|
||||
for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
|
||||
blkno = be64_to_cpu(*ptr++);
|
||||
esc = be64_to_cpu(*ptr++);
|
||||
|
||||
sdp->sd_found_blocks++;
|
||||
|
||||
if (gfs2_revoke_check(sdp, blkno, start))
|
||||
continue;
|
||||
|
||||
error = gfs2_replay_read_block(jd, start, &bh_log);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
bh_ip = gfs2_meta_new(gl, blkno);
|
||||
memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
|
||||
|
||||
/* Unescape */
|
||||
if (esc) {
|
||||
__be32 *eptr = (__be32 *)bh_ip->b_data;
|
||||
*eptr = cpu_to_be32(GFS2_MAGIC);
|
||||
}
|
||||
mark_buffer_dirty(bh_ip);
|
||||
|
||||
brelse(bh_log);
|
||||
brelse(bh_ip);
|
||||
if (error)
|
||||
break;
|
||||
|
||||
sdp->sd_replayed_blocks++;
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/* FIXME: sort out accounting for log blocks etc. */
|
||||
|
||||
static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
|
||||
|
||||
if (error) {
|
||||
gfs2_meta_sync(ip->i_gl);
|
||||
return;
|
||||
}
|
||||
if (pass != 1)
|
||||
return;
|
||||
|
||||
/* data sync? */
|
||||
gfs2_meta_sync(ip->i_gl);
|
||||
|
||||
fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
|
||||
jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
|
||||
}
|
||||
|
||||
static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
|
||||
{
|
||||
struct list_head *head = &sdp->sd_log_le_databuf;
|
||||
struct gfs2_bufdata *bd;
|
||||
|
||||
while (!list_empty(head)) {
|
||||
bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
|
||||
list_del_init(&bd->bd_le.le_list);
|
||||
sdp->sd_log_num_databuf--;
|
||||
sdp->sd_log_num_jdata--;
|
||||
gfs2_unpin(sdp, bd->bd_bh, ai);
|
||||
}
|
||||
gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
|
||||
gfs2_assert_warn(sdp, !sdp->sd_log_num_jdata);
|
||||
}
|
||||
|
||||
|
||||
const struct gfs2_log_operations gfs2_glock_lops = {
|
||||
.lo_add = glock_lo_add,
|
||||
.lo_after_commit = glock_lo_after_commit,
|
||||
.lo_name = "glock",
|
||||
};
|
||||
|
||||
const struct gfs2_log_operations gfs2_buf_lops = {
|
||||
.lo_add = buf_lo_add,
|
||||
.lo_incore_commit = buf_lo_incore_commit,
|
||||
.lo_before_commit = buf_lo_before_commit,
|
||||
.lo_after_commit = buf_lo_after_commit,
|
||||
.lo_before_scan = buf_lo_before_scan,
|
||||
.lo_scan_elements = buf_lo_scan_elements,
|
||||
.lo_after_scan = buf_lo_after_scan,
|
||||
.lo_name = "buf",
|
||||
};
|
||||
|
||||
const struct gfs2_log_operations gfs2_revoke_lops = {
|
||||
.lo_add = revoke_lo_add,
|
||||
.lo_before_commit = revoke_lo_before_commit,
|
||||
.lo_before_scan = revoke_lo_before_scan,
|
||||
.lo_scan_elements = revoke_lo_scan_elements,
|
||||
.lo_after_scan = revoke_lo_after_scan,
|
||||
.lo_name = "revoke",
|
||||
};
|
||||
|
||||
const struct gfs2_log_operations gfs2_rg_lops = {
|
||||
.lo_add = rg_lo_add,
|
||||
.lo_after_commit = rg_lo_after_commit,
|
||||
.lo_name = "rg",
|
||||
};
|
||||
|
||||
const struct gfs2_log_operations gfs2_databuf_lops = {
|
||||
.lo_add = databuf_lo_add,
|
||||
.lo_incore_commit = buf_lo_incore_commit,
|
||||
.lo_before_commit = databuf_lo_before_commit,
|
||||
.lo_after_commit = databuf_lo_after_commit,
|
||||
.lo_scan_elements = databuf_lo_scan_elements,
|
||||
.lo_after_scan = databuf_lo_after_scan,
|
||||
.lo_name = "databuf",
|
||||
};
|
||||
|
||||
const struct gfs2_log_operations *gfs2_log_ops[] = {
|
||||
&gfs2_glock_lops,
|
||||
&gfs2_buf_lops,
|
||||
&gfs2_revoke_lops,
|
||||
&gfs2_rg_lops,
|
||||
&gfs2_databuf_lops,
|
||||
NULL,
|
||||
};
|
||||
|
99
fs/gfs2/lops.h
Normal file
99
fs/gfs2/lops.h
Normal file
|
@ -0,0 +1,99 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __LOPS_DOT_H__
|
||||
#define __LOPS_DOT_H__
|
||||
|
||||
#include <linux/list.h>
|
||||
#include "incore.h"
|
||||
|
||||
extern const struct gfs2_log_operations gfs2_glock_lops;
|
||||
extern const struct gfs2_log_operations gfs2_buf_lops;
|
||||
extern const struct gfs2_log_operations gfs2_revoke_lops;
|
||||
extern const struct gfs2_log_operations gfs2_rg_lops;
|
||||
extern const struct gfs2_log_operations gfs2_databuf_lops;
|
||||
|
||||
extern const struct gfs2_log_operations *gfs2_log_ops[];
|
||||
|
||||
static inline void lops_init_le(struct gfs2_log_element *le,
|
||||
const struct gfs2_log_operations *lops)
|
||||
{
|
||||
INIT_LIST_HEAD(&le->le_list);
|
||||
le->le_ops = lops;
|
||||
}
|
||||
|
||||
static inline void lops_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
|
||||
{
|
||||
if (le->le_ops->lo_add)
|
||||
le->le_ops->lo_add(sdp, le);
|
||||
}
|
||||
|
||||
static inline void lops_incore_commit(struct gfs2_sbd *sdp,
|
||||
struct gfs2_trans *tr)
|
||||
{
|
||||
int x;
|
||||
for (x = 0; gfs2_log_ops[x]; x++)
|
||||
if (gfs2_log_ops[x]->lo_incore_commit)
|
||||
gfs2_log_ops[x]->lo_incore_commit(sdp, tr);
|
||||
}
|
||||
|
||||
static inline void lops_before_commit(struct gfs2_sbd *sdp)
|
||||
{
|
||||
int x;
|
||||
for (x = 0; gfs2_log_ops[x]; x++)
|
||||
if (gfs2_log_ops[x]->lo_before_commit)
|
||||
gfs2_log_ops[x]->lo_before_commit(sdp);
|
||||
}
|
||||
|
||||
static inline void lops_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
|
||||
{
|
||||
int x;
|
||||
for (x = 0; gfs2_log_ops[x]; x++)
|
||||
if (gfs2_log_ops[x]->lo_after_commit)
|
||||
gfs2_log_ops[x]->lo_after_commit(sdp, ai);
|
||||
}
|
||||
|
||||
static inline void lops_before_scan(struct gfs2_jdesc *jd,
|
||||
struct gfs2_log_header *head,
|
||||
unsigned int pass)
|
||||
{
|
||||
int x;
|
||||
for (x = 0; gfs2_log_ops[x]; x++)
|
||||
if (gfs2_log_ops[x]->lo_before_scan)
|
||||
gfs2_log_ops[x]->lo_before_scan(jd, head, pass);
|
||||
}
|
||||
|
||||
static inline int lops_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
|
||||
struct gfs2_log_descriptor *ld,
|
||||
__be64 *ptr,
|
||||
unsigned int pass)
|
||||
{
|
||||
int x, error;
|
||||
for (x = 0; gfs2_log_ops[x]; x++)
|
||||
if (gfs2_log_ops[x]->lo_scan_elements) {
|
||||
error = gfs2_log_ops[x]->lo_scan_elements(jd, start,
|
||||
ld, ptr, pass);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void lops_after_scan(struct gfs2_jdesc *jd, int error,
|
||||
unsigned int pass)
|
||||
{
|
||||
int x;
|
||||
for (x = 0; gfs2_log_ops[x]; x++)
|
||||
if (gfs2_log_ops[x]->lo_before_scan)
|
||||
gfs2_log_ops[x]->lo_after_scan(jd, error, pass);
|
||||
}
|
||||
|
||||
#endif /* __LOPS_DOT_H__ */
|
||||
|
150
fs/gfs2/main.c
Normal file
150
fs/gfs2/main.c
Normal file
|
@ -0,0 +1,150 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/lm_interface.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
#include "ops_fstype.h"
|
||||
#include "sys.h"
|
||||
#include "util.h"
|
||||
#include "glock.h"
|
||||
|
||||
static void gfs2_init_inode_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
|
||||
{
|
||||
struct gfs2_inode *ip = foo;
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
inode_init_once(&ip->i_inode);
|
||||
spin_lock_init(&ip->i_spin);
|
||||
init_rwsem(&ip->i_rw_mutex);
|
||||
memset(ip->i_cache, 0, sizeof(ip->i_cache));
|
||||
}
|
||||
}
|
||||
|
||||
static void gfs2_init_glock_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
|
||||
{
|
||||
struct gfs2_glock *gl = foo;
|
||||
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
|
||||
SLAB_CTOR_CONSTRUCTOR) {
|
||||
INIT_HLIST_NODE(&gl->gl_list);
|
||||
spin_lock_init(&gl->gl_spin);
|
||||
INIT_LIST_HEAD(&gl->gl_holders);
|
||||
INIT_LIST_HEAD(&gl->gl_waiters1);
|
||||
INIT_LIST_HEAD(&gl->gl_waiters2);
|
||||
INIT_LIST_HEAD(&gl->gl_waiters3);
|
||||
gl->gl_lvb = NULL;
|
||||
atomic_set(&gl->gl_lvb_count, 0);
|
||||
INIT_LIST_HEAD(&gl->gl_reclaim);
|
||||
INIT_LIST_HEAD(&gl->gl_ail_list);
|
||||
atomic_set(&gl->gl_ail_count, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* init_gfs2_fs - Register GFS2 as a filesystem
|
||||
*
|
||||
* Returns: 0 on success, error code on failure
|
||||
*/
|
||||
|
||||
static int __init init_gfs2_fs(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = gfs2_sys_init();
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = gfs2_glock_init();
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
error = -ENOMEM;
|
||||
gfs2_glock_cachep = kmem_cache_create("gfs2_glock",
|
||||
sizeof(struct gfs2_glock),
|
||||
0, 0,
|
||||
gfs2_init_glock_once, NULL);
|
||||
if (!gfs2_glock_cachep)
|
||||
goto fail;
|
||||
|
||||
gfs2_inode_cachep = kmem_cache_create("gfs2_inode",
|
||||
sizeof(struct gfs2_inode),
|
||||
0, (SLAB_RECLAIM_ACCOUNT|
|
||||
SLAB_PANIC|SLAB_MEM_SPREAD),
|
||||
gfs2_init_inode_once, NULL);
|
||||
if (!gfs2_inode_cachep)
|
||||
goto fail;
|
||||
|
||||
gfs2_bufdata_cachep = kmem_cache_create("gfs2_bufdata",
|
||||
sizeof(struct gfs2_bufdata),
|
||||
0, 0, NULL, NULL);
|
||||
if (!gfs2_bufdata_cachep)
|
||||
goto fail;
|
||||
|
||||
error = register_filesystem(&gfs2_fs_type);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
error = register_filesystem(&gfs2meta_fs_type);
|
||||
if (error)
|
||||
goto fail_unregister;
|
||||
|
||||
printk("GFS2 (built %s %s) installed\n", __DATE__, __TIME__);
|
||||
|
||||
return 0;
|
||||
|
||||
fail_unregister:
|
||||
unregister_filesystem(&gfs2_fs_type);
|
||||
fail:
|
||||
if (gfs2_bufdata_cachep)
|
||||
kmem_cache_destroy(gfs2_bufdata_cachep);
|
||||
|
||||
if (gfs2_inode_cachep)
|
||||
kmem_cache_destroy(gfs2_inode_cachep);
|
||||
|
||||
if (gfs2_glock_cachep)
|
||||
kmem_cache_destroy(gfs2_glock_cachep);
|
||||
|
||||
gfs2_sys_uninit();
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* exit_gfs2_fs - Unregister the file system
|
||||
*
|
||||
*/
|
||||
|
||||
static void __exit exit_gfs2_fs(void)
|
||||
{
|
||||
unregister_filesystem(&gfs2_fs_type);
|
||||
unregister_filesystem(&gfs2meta_fs_type);
|
||||
|
||||
kmem_cache_destroy(gfs2_bufdata_cachep);
|
||||
kmem_cache_destroy(gfs2_inode_cachep);
|
||||
kmem_cache_destroy(gfs2_glock_cachep);
|
||||
|
||||
gfs2_sys_uninit();
|
||||
}
|
||||
|
||||
MODULE_DESCRIPTION("Global File System");
|
||||
MODULE_AUTHOR("Red Hat, Inc.");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
module_init(init_gfs2_fs);
|
||||
module_exit(exit_gfs2_fs);
|
||||
|
590
fs/gfs2/meta_io.c
Normal file
590
fs/gfs2/meta_io.c
Normal file
|
@ -0,0 +1,590 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/lm_interface.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
#include "glock.h"
|
||||
#include "glops.h"
|
||||
#include "inode.h"
|
||||
#include "log.h"
|
||||
#include "lops.h"
|
||||
#include "meta_io.h"
|
||||
#include "rgrp.h"
|
||||
#include "trans.h"
|
||||
#include "util.h"
|
||||
#include "ops_address.h"
|
||||
|
||||
static int aspace_get_block(struct inode *inode, sector_t lblock,
|
||||
struct buffer_head *bh_result, int create)
|
||||
{
|
||||
gfs2_assert_warn(inode->i_sb->s_fs_info, 0);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static int gfs2_aspace_writepage(struct page *page,
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
return block_write_full_page(page, aspace_get_block, wbc);
|
||||
}
|
||||
|
||||
static const struct address_space_operations aspace_aops = {
|
||||
.writepage = gfs2_aspace_writepage,
|
||||
.releasepage = gfs2_releasepage,
|
||||
};
|
||||
|
||||
/**
|
||||
* gfs2_aspace_get - Create and initialize a struct inode structure
|
||||
* @sdp: the filesystem the aspace is in
|
||||
*
|
||||
* Right now a struct inode is just a struct inode. Maybe Linux
|
||||
* will supply a more lightweight address space construct (that works)
|
||||
* in the future.
|
||||
*
|
||||
* Make sure pages/buffers in this aspace aren't in high memory.
|
||||
*
|
||||
* Returns: the aspace
|
||||
*/
|
||||
|
||||
struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct inode *aspace;
|
||||
|
||||
aspace = new_inode(sdp->sd_vfs);
|
||||
if (aspace) {
|
||||
mapping_set_gfp_mask(aspace->i_mapping, GFP_NOFS);
|
||||
aspace->i_mapping->a_ops = &aspace_aops;
|
||||
aspace->i_size = ~0ULL;
|
||||
aspace->i_private = NULL;
|
||||
insert_inode_hash(aspace);
|
||||
}
|
||||
return aspace;
|
||||
}
|
||||
|
||||
void gfs2_aspace_put(struct inode *aspace)
|
||||
{
|
||||
remove_inode_hash(aspace);
|
||||
iput(aspace);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_meta_inval - Invalidate all buffers associated with a glock
|
||||
* @gl: the glock
|
||||
*
|
||||
*/
|
||||
|
||||
void gfs2_meta_inval(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||
struct inode *aspace = gl->gl_aspace;
|
||||
struct address_space *mapping = gl->gl_aspace->i_mapping;
|
||||
|
||||
gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
|
||||
|
||||
atomic_inc(&aspace->i_writecount);
|
||||
truncate_inode_pages(mapping, 0);
|
||||
atomic_dec(&aspace->i_writecount);
|
||||
|
||||
gfs2_assert_withdraw(sdp, !mapping->nrpages);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_meta_sync - Sync all buffers associated with a glock
|
||||
* @gl: The glock
|
||||
*
|
||||
*/
|
||||
|
||||
void gfs2_meta_sync(struct gfs2_glock *gl)
|
||||
{
|
||||
struct address_space *mapping = gl->gl_aspace->i_mapping;
|
||||
int error;
|
||||
|
||||
filemap_fdatawrite(mapping);
|
||||
error = filemap_fdatawait(mapping);
|
||||
|
||||
if (error)
|
||||
gfs2_io_error(gl->gl_sbd);
|
||||
}
|
||||
|
||||
/**
|
||||
* getbuf - Get a buffer with a given address space
|
||||
* @sdp: the filesystem
|
||||
* @aspace: the address space
|
||||
* @blkno: the block number (filesystem scope)
|
||||
* @create: 1 if the buffer should be created
|
||||
*
|
||||
* Returns: the buffer
|
||||
*/
|
||||
|
||||
static struct buffer_head *getbuf(struct gfs2_sbd *sdp, struct inode *aspace,
|
||||
u64 blkno, int create)
|
||||
{
|
||||
struct page *page;
|
||||
struct buffer_head *bh;
|
||||
unsigned int shift;
|
||||
unsigned long index;
|
||||
unsigned int bufnum;
|
||||
|
||||
shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift;
|
||||
index = blkno >> shift; /* convert block to page */
|
||||
bufnum = blkno - (index << shift); /* block buf index within page */
|
||||
|
||||
if (create) {
|
||||
for (;;) {
|
||||
page = grab_cache_page(aspace->i_mapping, index);
|
||||
if (page)
|
||||
break;
|
||||
yield();
|
||||
}
|
||||
} else {
|
||||
page = find_lock_page(aspace->i_mapping, index);
|
||||
if (!page)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!page_has_buffers(page))
|
||||
create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
|
||||
|
||||
/* Locate header for our buffer within our page */
|
||||
for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
|
||||
/* Do nothing */;
|
||||
get_bh(bh);
|
||||
|
||||
if (!buffer_mapped(bh))
|
||||
map_bh(bh, sdp->sd_vfs, blkno);
|
||||
|
||||
unlock_page(page);
|
||||
mark_page_accessed(page);
|
||||
page_cache_release(page);
|
||||
|
||||
return bh;
|
||||
}
|
||||
|
||||
static void meta_prep_new(struct buffer_head *bh)
|
||||
{
|
||||
struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
|
||||
|
||||
lock_buffer(bh);
|
||||
clear_buffer_dirty(bh);
|
||||
set_buffer_uptodate(bh);
|
||||
unlock_buffer(bh);
|
||||
|
||||
mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_meta_new - Get a block
|
||||
* @gl: The glock associated with this block
|
||||
* @blkno: The block number
|
||||
*
|
||||
* Returns: The buffer
|
||||
*/
|
||||
|
||||
struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
|
||||
{
|
||||
struct buffer_head *bh;
|
||||
bh = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE);
|
||||
meta_prep_new(bh);
|
||||
return bh;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_meta_read - Read a block from disk
|
||||
* @gl: The glock covering the block
|
||||
* @blkno: The block number
|
||||
* @flags: flags
|
||||
* @bhp: the place where the buffer is returned (NULL on failure)
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
|
||||
struct buffer_head **bhp)
|
||||
{
|
||||
*bhp = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE);
|
||||
if (!buffer_uptodate(*bhp))
|
||||
ll_rw_block(READ_META, 1, bhp);
|
||||
if (flags & DIO_WAIT) {
|
||||
int error = gfs2_meta_wait(gl->gl_sbd, *bhp);
|
||||
if (error) {
|
||||
brelse(*bhp);
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_meta_wait - Reread a block from disk
|
||||
* @sdp: the filesystem
|
||||
* @bh: The block to wait for
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
|
||||
{
|
||||
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
||||
return -EIO;
|
||||
|
||||
wait_on_buffer(bh);
|
||||
|
||||
if (!buffer_uptodate(bh)) {
|
||||
struct gfs2_trans *tr = current->journal_info;
|
||||
if (tr && tr->tr_touched)
|
||||
gfs2_io_error_bh(sdp, bh);
|
||||
return -EIO;
|
||||
}
|
||||
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
||||
return -EIO;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_attach_bufdata - attach a struct gfs2_bufdata structure to a buffer
|
||||
* @gl: the glock the buffer belongs to
|
||||
* @bh: The buffer to be attached to
|
||||
* @meta: Flag to indicate whether its metadata or not
|
||||
*/
|
||||
|
||||
void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
|
||||
int meta)
|
||||
{
|
||||
struct gfs2_bufdata *bd;
|
||||
|
||||
if (meta)
|
||||
lock_page(bh->b_page);
|
||||
|
||||
if (bh->b_private) {
|
||||
if (meta)
|
||||
unlock_page(bh->b_page);
|
||||
return;
|
||||
}
|
||||
|
||||
bd = kmem_cache_alloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL),
|
||||
memset(bd, 0, sizeof(struct gfs2_bufdata));
|
||||
bd->bd_bh = bh;
|
||||
bd->bd_gl = gl;
|
||||
|
||||
INIT_LIST_HEAD(&bd->bd_list_tr);
|
||||
if (meta)
|
||||
lops_init_le(&bd->bd_le, &gfs2_buf_lops);
|
||||
else
|
||||
lops_init_le(&bd->bd_le, &gfs2_databuf_lops);
|
||||
bh->b_private = bd;
|
||||
|
||||
if (meta)
|
||||
unlock_page(bh->b_page);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_pin - Pin a buffer in memory
|
||||
* @sdp: the filesystem the buffer belongs to
|
||||
* @bh: The buffer to be pinned
|
||||
*
|
||||
*/
|
||||
|
||||
void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
|
||||
{
|
||||
struct gfs2_bufdata *bd = bh->b_private;
|
||||
|
||||
gfs2_assert_withdraw(sdp, test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
|
||||
|
||||
if (test_set_buffer_pinned(bh))
|
||||
gfs2_assert_withdraw(sdp, 0);
|
||||
|
||||
wait_on_buffer(bh);
|
||||
|
||||
/* If this buffer is in the AIL and it has already been written
|
||||
to in-place disk block, remove it from the AIL. */
|
||||
|
||||
gfs2_log_lock(sdp);
|
||||
if (bd->bd_ail && !buffer_in_io(bh))
|
||||
list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
|
||||
gfs2_log_unlock(sdp);
|
||||
|
||||
clear_buffer_dirty(bh);
|
||||
wait_on_buffer(bh);
|
||||
|
||||
if (!buffer_uptodate(bh))
|
||||
gfs2_io_error_bh(sdp, bh);
|
||||
|
||||
get_bh(bh);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_unpin - Unpin a buffer
|
||||
* @sdp: the filesystem the buffer belongs to
|
||||
* @bh: The buffer to unpin
|
||||
* @ai:
|
||||
*
|
||||
*/
|
||||
|
||||
void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
|
||||
struct gfs2_ail *ai)
|
||||
{
|
||||
struct gfs2_bufdata *bd = bh->b_private;
|
||||
|
||||
gfs2_assert_withdraw(sdp, buffer_uptodate(bh));
|
||||
|
||||
if (!buffer_pinned(bh))
|
||||
gfs2_assert_withdraw(sdp, 0);
|
||||
|
||||
mark_buffer_dirty(bh);
|
||||
clear_buffer_pinned(bh);
|
||||
|
||||
gfs2_log_lock(sdp);
|
||||
if (bd->bd_ail) {
|
||||
list_del(&bd->bd_ail_st_list);
|
||||
brelse(bh);
|
||||
} else {
|
||||
struct gfs2_glock *gl = bd->bd_gl;
|
||||
list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
|
||||
atomic_inc(&gl->gl_ail_count);
|
||||
}
|
||||
bd->bd_ail = ai;
|
||||
list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
|
||||
gfs2_log_unlock(sdp);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
|
||||
* @ip: the inode who owns the buffers
|
||||
* @bstart: the first buffer in the run
|
||||
* @blen: the number of buffers in the run
|
||||
*
|
||||
*/
|
||||
|
||||
void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
||||
struct inode *aspace = ip->i_gl->gl_aspace;
|
||||
struct buffer_head *bh;
|
||||
|
||||
while (blen) {
|
||||
bh = getbuf(sdp, aspace, bstart, NO_CREATE);
|
||||
if (bh) {
|
||||
struct gfs2_bufdata *bd = bh->b_private;
|
||||
|
||||
if (test_clear_buffer_pinned(bh)) {
|
||||
struct gfs2_trans *tr = current->journal_info;
|
||||
gfs2_log_lock(sdp);
|
||||
list_del_init(&bd->bd_le.le_list);
|
||||
gfs2_assert_warn(sdp, sdp->sd_log_num_buf);
|
||||
sdp->sd_log_num_buf--;
|
||||
gfs2_log_unlock(sdp);
|
||||
tr->tr_num_buf_rm++;
|
||||
brelse(bh);
|
||||
}
|
||||
if (bd) {
|
||||
gfs2_log_lock(sdp);
|
||||
if (bd->bd_ail) {
|
||||
u64 blkno = bh->b_blocknr;
|
||||
bd->bd_ail = NULL;
|
||||
list_del(&bd->bd_ail_st_list);
|
||||
list_del(&bd->bd_ail_gl_list);
|
||||
atomic_dec(&bd->bd_gl->gl_ail_count);
|
||||
brelse(bh);
|
||||
gfs2_log_unlock(sdp);
|
||||
gfs2_trans_add_revoke(sdp, blkno);
|
||||
} else
|
||||
gfs2_log_unlock(sdp);
|
||||
}
|
||||
|
||||
lock_buffer(bh);
|
||||
clear_buffer_dirty(bh);
|
||||
clear_buffer_uptodate(bh);
|
||||
unlock_buffer(bh);
|
||||
|
||||
brelse(bh);
|
||||
}
|
||||
|
||||
bstart++;
|
||||
blen--;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_meta_cache_flush - get rid of any references on buffers for this inode
|
||||
* @ip: The GFS2 inode
|
||||
*
|
||||
* This releases buffers that are in the most-recently-used array of
|
||||
* blocks used for indirect block addressing for this inode.
|
||||
*/
|
||||
|
||||
void gfs2_meta_cache_flush(struct gfs2_inode *ip)
|
||||
{
|
||||
struct buffer_head **bh_slot;
|
||||
unsigned int x;
|
||||
|
||||
spin_lock(&ip->i_spin);
|
||||
|
||||
for (x = 0; x < GFS2_MAX_META_HEIGHT; x++) {
|
||||
bh_slot = &ip->i_cache[x];
|
||||
if (!*bh_slot)
|
||||
break;
|
||||
brelse(*bh_slot);
|
||||
*bh_slot = NULL;
|
||||
}
|
||||
|
||||
spin_unlock(&ip->i_spin);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_meta_indirect_buffer - Get a metadata buffer
|
||||
* @ip: The GFS2 inode
|
||||
* @height: The level of this buf in the metadata (indir addr) tree (if any)
|
||||
* @num: The block number (device relative) of the buffer
|
||||
* @new: Non-zero if we may create a new buffer
|
||||
* @bhp: the buffer is returned here
|
||||
*
|
||||
* Try to use the gfs2_inode's MRU metadata tree cache.
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
|
||||
int new, struct buffer_head **bhp)
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
||||
struct gfs2_glock *gl = ip->i_gl;
|
||||
struct buffer_head *bh = NULL, **bh_slot = ip->i_cache + height;
|
||||
int in_cache = 0;
|
||||
|
||||
spin_lock(&ip->i_spin);
|
||||
if (*bh_slot && (*bh_slot)->b_blocknr == num) {
|
||||
bh = *bh_slot;
|
||||
get_bh(bh);
|
||||
in_cache = 1;
|
||||
}
|
||||
spin_unlock(&ip->i_spin);
|
||||
|
||||
if (!bh)
|
||||
bh = getbuf(gl->gl_sbd, gl->gl_aspace, num, CREATE);
|
||||
|
||||
if (!bh)
|
||||
return -ENOBUFS;
|
||||
|
||||
if (new) {
|
||||
if (gfs2_assert_warn(sdp, height))
|
||||
goto err;
|
||||
meta_prep_new(bh);
|
||||
gfs2_trans_add_bh(ip->i_gl, bh, 1);
|
||||
gfs2_metatype_set(bh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
|
||||
gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header));
|
||||
} else {
|
||||
u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI;
|
||||
if (!buffer_uptodate(bh)) {
|
||||
ll_rw_block(READ_META, 1, &bh);
|
||||
if (gfs2_meta_wait(sdp, bh))
|
||||
goto err;
|
||||
}
|
||||
if (gfs2_metatype_check(sdp, bh, mtype))
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!in_cache) {
|
||||
spin_lock(&ip->i_spin);
|
||||
if (*bh_slot)
|
||||
brelse(*bh_slot);
|
||||
*bh_slot = bh;
|
||||
get_bh(bh);
|
||||
spin_unlock(&ip->i_spin);
|
||||
}
|
||||
|
||||
*bhp = bh;
|
||||
return 0;
|
||||
err:
|
||||
brelse(bh);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_meta_ra - start readahead on an extent of a file
|
||||
* @gl: the glock the blocks belong to
|
||||
* @dblock: the starting disk block
|
||||
* @extlen: the number of blocks in the extent
|
||||
*
|
||||
* returns: the first buffer in the extent
|
||||
*/
|
||||
|
||||
struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||
struct inode *aspace = gl->gl_aspace;
|
||||
struct buffer_head *first_bh, *bh;
|
||||
u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
|
||||
sdp->sd_sb.sb_bsize_shift;
|
||||
|
||||
BUG_ON(!extlen);
|
||||
|
||||
if (max_ra < 1)
|
||||
max_ra = 1;
|
||||
if (extlen > max_ra)
|
||||
extlen = max_ra;
|
||||
|
||||
first_bh = getbuf(sdp, aspace, dblock, CREATE);
|
||||
|
||||
if (buffer_uptodate(first_bh))
|
||||
goto out;
|
||||
if (!buffer_locked(first_bh))
|
||||
ll_rw_block(READ_META, 1, &first_bh);
|
||||
|
||||
dblock++;
|
||||
extlen--;
|
||||
|
||||
while (extlen) {
|
||||
bh = getbuf(sdp, aspace, dblock, CREATE);
|
||||
|
||||
if (!buffer_uptodate(bh) && !buffer_locked(bh))
|
||||
ll_rw_block(READA, 1, &bh);
|
||||
brelse(bh);
|
||||
dblock++;
|
||||
extlen--;
|
||||
if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
|
||||
goto out;
|
||||
}
|
||||
|
||||
wait_on_buffer(first_bh);
|
||||
out:
|
||||
return first_bh;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_meta_syncfs - sync all the buffers in a filesystem
|
||||
* @sdp: the filesystem
|
||||
*
|
||||
*/
|
||||
|
||||
void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
|
||||
{
|
||||
gfs2_log_flush(sdp, NULL);
|
||||
for (;;) {
|
||||
gfs2_ail1_start(sdp, DIO_ALL);
|
||||
if (gfs2_ail1_empty(sdp, DIO_ALL))
|
||||
break;
|
||||
msleep(10);
|
||||
}
|
||||
}
|
||||
|
78
fs/gfs2/meta_io.h
Normal file
78
fs/gfs2/meta_io.h
Normal file
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __DIO_DOT_H__
|
||||
#define __DIO_DOT_H__
|
||||
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/string.h>
|
||||
#include "incore.h"
|
||||
|
||||
static inline void gfs2_buffer_clear(struct buffer_head *bh)
|
||||
{
|
||||
memset(bh->b_data, 0, bh->b_size);
|
||||
}
|
||||
|
||||
static inline void gfs2_buffer_clear_tail(struct buffer_head *bh, int head)
|
||||
{
|
||||
BUG_ON(head > bh->b_size);
|
||||
memset(bh->b_data + head, 0, bh->b_size - head);
|
||||
}
|
||||
|
||||
static inline void gfs2_buffer_copy_tail(struct buffer_head *to_bh,
|
||||
int to_head,
|
||||
struct buffer_head *from_bh,
|
||||
int from_head)
|
||||
{
|
||||
BUG_ON(from_head < to_head);
|
||||
memcpy(to_bh->b_data + to_head, from_bh->b_data + from_head,
|
||||
from_bh->b_size - from_head);
|
||||
memset(to_bh->b_data + to_bh->b_size + to_head - from_head,
|
||||
0, from_head - to_head);
|
||||
}
|
||||
|
||||
struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp);
|
||||
void gfs2_aspace_put(struct inode *aspace);
|
||||
|
||||
void gfs2_meta_inval(struct gfs2_glock *gl);
|
||||
void gfs2_meta_sync(struct gfs2_glock *gl);
|
||||
|
||||
struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno);
|
||||
int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno,
|
||||
int flags, struct buffer_head **bhp);
|
||||
int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh);
|
||||
|
||||
void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
|
||||
int meta);
|
||||
void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
|
||||
void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
|
||||
struct gfs2_ail *ai);
|
||||
|
||||
void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen);
|
||||
|
||||
void gfs2_meta_cache_flush(struct gfs2_inode *ip);
|
||||
int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
|
||||
int new, struct buffer_head **bhp);
|
||||
|
||||
static inline int gfs2_meta_inode_buffer(struct gfs2_inode *ip,
|
||||
struct buffer_head **bhp)
|
||||
{
|
||||
return gfs2_meta_indirect_buffer(ip, 0, ip->i_num.no_addr, 0, bhp);
|
||||
}
|
||||
|
||||
struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen);
|
||||
void gfs2_meta_syncfs(struct gfs2_sbd *sdp);
|
||||
|
||||
#define buffer_busy(bh) \
|
||||
((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock) | (1ul << BH_Pinned)))
|
||||
#define buffer_in_io(bh) \
|
||||
((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock)))
|
||||
|
||||
#endif /* __DIO_DOT_H__ */
|
||||
|
214
fs/gfs2/mount.c
Normal file
214
fs/gfs2/mount.c
Normal file
|
@ -0,0 +1,214 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/lm_interface.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
#include "mount.h"
|
||||
#include "sys.h"
|
||||
#include "util.h"
|
||||
|
||||
/**
|
||||
* gfs2_mount_args - Parse mount options
|
||||
* @sdp:
|
||||
* @data:
|
||||
*
|
||||
* Return: errno
|
||||
*/
|
||||
|
||||
int gfs2_mount_args(struct gfs2_sbd *sdp, char *data_arg, int remount)
|
||||
{
|
||||
struct gfs2_args *args = &sdp->sd_args;
|
||||
char *data = data_arg;
|
||||
char *options, *o, *v;
|
||||
int error = 0;
|
||||
|
||||
if (!remount) {
|
||||
/* If someone preloaded options, use those instead */
|
||||
spin_lock(&gfs2_sys_margs_lock);
|
||||
if (gfs2_sys_margs) {
|
||||
data = gfs2_sys_margs;
|
||||
gfs2_sys_margs = NULL;
|
||||
}
|
||||
spin_unlock(&gfs2_sys_margs_lock);
|
||||
|
||||
/* Set some defaults */
|
||||
args->ar_num_glockd = GFS2_GLOCKD_DEFAULT;
|
||||
args->ar_quota = GFS2_QUOTA_DEFAULT;
|
||||
args->ar_data = GFS2_DATA_DEFAULT;
|
||||
}
|
||||
|
||||
/* Split the options into tokens with the "," character and
|
||||
process them */
|
||||
|
||||
for (options = data; (o = strsep(&options, ",")); ) {
|
||||
if (!*o)
|
||||
continue;
|
||||
|
||||
v = strchr(o, '=');
|
||||
if (v)
|
||||
*v++ = 0;
|
||||
|
||||
if (!strcmp(o, "lockproto")) {
|
||||
if (!v)
|
||||
goto need_value;
|
||||
if (remount && strcmp(v, args->ar_lockproto))
|
||||
goto cant_remount;
|
||||
strncpy(args->ar_lockproto, v, GFS2_LOCKNAME_LEN);
|
||||
args->ar_lockproto[GFS2_LOCKNAME_LEN - 1] = 0;
|
||||
}
|
||||
|
||||
else if (!strcmp(o, "locktable")) {
|
||||
if (!v)
|
||||
goto need_value;
|
||||
if (remount && strcmp(v, args->ar_locktable))
|
||||
goto cant_remount;
|
||||
strncpy(args->ar_locktable, v, GFS2_LOCKNAME_LEN);
|
||||
args->ar_locktable[GFS2_LOCKNAME_LEN - 1] = 0;
|
||||
}
|
||||
|
||||
else if (!strcmp(o, "hostdata")) {
|
||||
if (!v)
|
||||
goto need_value;
|
||||
if (remount && strcmp(v, args->ar_hostdata))
|
||||
goto cant_remount;
|
||||
strncpy(args->ar_hostdata, v, GFS2_LOCKNAME_LEN);
|
||||
args->ar_hostdata[GFS2_LOCKNAME_LEN - 1] = 0;
|
||||
}
|
||||
|
||||
else if (!strcmp(o, "spectator")) {
|
||||
if (remount && !args->ar_spectator)
|
||||
goto cant_remount;
|
||||
args->ar_spectator = 1;
|
||||
sdp->sd_vfs->s_flags |= MS_RDONLY;
|
||||
}
|
||||
|
||||
else if (!strcmp(o, "ignore_local_fs")) {
|
||||
if (remount && !args->ar_ignore_local_fs)
|
||||
goto cant_remount;
|
||||
args->ar_ignore_local_fs = 1;
|
||||
}
|
||||
|
||||
else if (!strcmp(o, "localflocks")) {
|
||||
if (remount && !args->ar_localflocks)
|
||||
goto cant_remount;
|
||||
args->ar_localflocks = 1;
|
||||
}
|
||||
|
||||
else if (!strcmp(o, "localcaching")) {
|
||||
if (remount && !args->ar_localcaching)
|
||||
goto cant_remount;
|
||||
args->ar_localcaching = 1;
|
||||
}
|
||||
|
||||
else if (!strcmp(o, "debug"))
|
||||
args->ar_debug = 1;
|
||||
|
||||
else if (!strcmp(o, "nodebug"))
|
||||
args->ar_debug = 0;
|
||||
|
||||
else if (!strcmp(o, "upgrade")) {
|
||||
if (remount && !args->ar_upgrade)
|
||||
goto cant_remount;
|
||||
args->ar_upgrade = 1;
|
||||
}
|
||||
|
||||
else if (!strcmp(o, "num_glockd")) {
|
||||
unsigned int x;
|
||||
if (!v)
|
||||
goto need_value;
|
||||
sscanf(v, "%u", &x);
|
||||
if (remount && x != args->ar_num_glockd)
|
||||
goto cant_remount;
|
||||
if (!x || x > GFS2_GLOCKD_MAX) {
|
||||
fs_info(sdp, "0 < num_glockd <= %u (not %u)\n",
|
||||
GFS2_GLOCKD_MAX, x);
|
||||
error = -EINVAL;
|
||||
break;
|
||||
}
|
||||
args->ar_num_glockd = x;
|
||||
}
|
||||
|
||||
else if (!strcmp(o, "acl")) {
|
||||
args->ar_posix_acl = 1;
|
||||
sdp->sd_vfs->s_flags |= MS_POSIXACL;
|
||||
}
|
||||
|
||||
else if (!strcmp(o, "noacl")) {
|
||||
args->ar_posix_acl = 0;
|
||||
sdp->sd_vfs->s_flags &= ~MS_POSIXACL;
|
||||
}
|
||||
|
||||
else if (!strcmp(o, "quota")) {
|
||||
if (!v)
|
||||
goto need_value;
|
||||
if (!strcmp(v, "off"))
|
||||
args->ar_quota = GFS2_QUOTA_OFF;
|
||||
else if (!strcmp(v, "account"))
|
||||
args->ar_quota = GFS2_QUOTA_ACCOUNT;
|
||||
else if (!strcmp(v, "on"))
|
||||
args->ar_quota = GFS2_QUOTA_ON;
|
||||
else {
|
||||
fs_info(sdp, "invalid value for quota\n");
|
||||
error = -EINVAL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
else if (!strcmp(o, "suiddir"))
|
||||
args->ar_suiddir = 1;
|
||||
|
||||
else if (!strcmp(o, "nosuiddir"))
|
||||
args->ar_suiddir = 0;
|
||||
|
||||
else if (!strcmp(o, "data")) {
|
||||
if (!v)
|
||||
goto need_value;
|
||||
if (!strcmp(v, "writeback"))
|
||||
args->ar_data = GFS2_DATA_WRITEBACK;
|
||||
else if (!strcmp(v, "ordered"))
|
||||
args->ar_data = GFS2_DATA_ORDERED;
|
||||
else {
|
||||
fs_info(sdp, "invalid value for data\n");
|
||||
error = -EINVAL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
else {
|
||||
fs_info(sdp, "unknown option: %s\n", o);
|
||||
error = -EINVAL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (error)
|
||||
fs_info(sdp, "invalid mount option(s)\n");
|
||||
|
||||
if (data != data_arg)
|
||||
kfree(data);
|
||||
|
||||
return error;
|
||||
|
||||
need_value:
|
||||
fs_info(sdp, "need value for option %s\n", o);
|
||||
return -EINVAL;
|
||||
|
||||
cant_remount:
|
||||
fs_info(sdp, "can't remount with option %s\n", o);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
17
fs/gfs2/mount.h
Normal file
17
fs/gfs2/mount.h
Normal file
|
@ -0,0 +1,17 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __MOUNT_DOT_H__
|
||||
#define __MOUNT_DOT_H__
|
||||
|
||||
struct gfs2_sbd;
|
||||
|
||||
int gfs2_mount_args(struct gfs2_sbd *sdp, char *data_arg, int remount);
|
||||
|
||||
#endif /* __MOUNT_DOT_H__ */
|
308
fs/gfs2/ondisk.c
Normal file
308
fs/gfs2/ondisk.c
Normal file
|
@ -0,0 +1,308 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/buffer_head.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
|
||||
#define pv(struct, member, fmt) printk(KERN_INFO " "#member" = "fmt"\n", \
|
||||
struct->member);
|
||||
|
||||
/*
|
||||
* gfs2_xxx_in - read in an xxx struct
|
||||
* first arg: the cpu-order structure
|
||||
* buf: the disk-order buffer
|
||||
*
|
||||
* gfs2_xxx_out - write out an xxx struct
|
||||
* first arg: the cpu-order structure
|
||||
* buf: the disk-order buffer
|
||||
*
|
||||
* gfs2_xxx_print - print out an xxx struct
|
||||
* first arg: the cpu-order structure
|
||||
*/
|
||||
|
||||
void gfs2_inum_in(struct gfs2_inum *no, const void *buf)
|
||||
{
|
||||
const struct gfs2_inum *str = buf;
|
||||
|
||||
no->no_formal_ino = be64_to_cpu(str->no_formal_ino);
|
||||
no->no_addr = be64_to_cpu(str->no_addr);
|
||||
}
|
||||
|
||||
void gfs2_inum_out(const struct gfs2_inum *no, void *buf)
|
||||
{
|
||||
struct gfs2_inum *str = buf;
|
||||
|
||||
str->no_formal_ino = cpu_to_be64(no->no_formal_ino);
|
||||
str->no_addr = cpu_to_be64(no->no_addr);
|
||||
}
|
||||
|
||||
static void gfs2_inum_print(const struct gfs2_inum *no)
|
||||
{
|
||||
printk(KERN_INFO " no_formal_ino = %llu\n", (unsigned long long)no->no_formal_ino);
|
||||
printk(KERN_INFO " no_addr = %llu\n", (unsigned long long)no->no_addr);
|
||||
}
|
||||
|
||||
static void gfs2_meta_header_in(struct gfs2_meta_header *mh, const void *buf)
|
||||
{
|
||||
const struct gfs2_meta_header *str = buf;
|
||||
|
||||
mh->mh_magic = be32_to_cpu(str->mh_magic);
|
||||
mh->mh_type = be32_to_cpu(str->mh_type);
|
||||
mh->mh_format = be32_to_cpu(str->mh_format);
|
||||
}
|
||||
|
||||
static void gfs2_meta_header_out(const struct gfs2_meta_header *mh, void *buf)
|
||||
{
|
||||
struct gfs2_meta_header *str = buf;
|
||||
|
||||
str->mh_magic = cpu_to_be32(mh->mh_magic);
|
||||
str->mh_type = cpu_to_be32(mh->mh_type);
|
||||
str->mh_format = cpu_to_be32(mh->mh_format);
|
||||
}
|
||||
|
||||
static void gfs2_meta_header_print(const struct gfs2_meta_header *mh)
|
||||
{
|
||||
pv(mh, mh_magic, "0x%.8X");
|
||||
pv(mh, mh_type, "%u");
|
||||
pv(mh, mh_format, "%u");
|
||||
}
|
||||
|
||||
void gfs2_sb_in(struct gfs2_sb *sb, const void *buf)
|
||||
{
|
||||
const struct gfs2_sb *str = buf;
|
||||
|
||||
gfs2_meta_header_in(&sb->sb_header, buf);
|
||||
|
||||
sb->sb_fs_format = be32_to_cpu(str->sb_fs_format);
|
||||
sb->sb_multihost_format = be32_to_cpu(str->sb_multihost_format);
|
||||
sb->sb_bsize = be32_to_cpu(str->sb_bsize);
|
||||
sb->sb_bsize_shift = be32_to_cpu(str->sb_bsize_shift);
|
||||
|
||||
gfs2_inum_in(&sb->sb_master_dir, (char *)&str->sb_master_dir);
|
||||
gfs2_inum_in(&sb->sb_root_dir, (char *)&str->sb_root_dir);
|
||||
|
||||
memcpy(sb->sb_lockproto, str->sb_lockproto, GFS2_LOCKNAME_LEN);
|
||||
memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN);
|
||||
}
|
||||
|
||||
void gfs2_rindex_in(struct gfs2_rindex *ri, const void *buf)
|
||||
{
|
||||
const struct gfs2_rindex *str = buf;
|
||||
|
||||
ri->ri_addr = be64_to_cpu(str->ri_addr);
|
||||
ri->ri_length = be32_to_cpu(str->ri_length);
|
||||
ri->ri_data0 = be64_to_cpu(str->ri_data0);
|
||||
ri->ri_data = be32_to_cpu(str->ri_data);
|
||||
ri->ri_bitbytes = be32_to_cpu(str->ri_bitbytes);
|
||||
|
||||
}
|
||||
|
||||
void gfs2_rindex_print(const struct gfs2_rindex *ri)
|
||||
{
|
||||
printk(KERN_INFO " ri_addr = %llu\n", (unsigned long long)ri->ri_addr);
|
||||
pv(ri, ri_length, "%u");
|
||||
|
||||
printk(KERN_INFO " ri_data0 = %llu\n", (unsigned long long)ri->ri_data0);
|
||||
pv(ri, ri_data, "%u");
|
||||
|
||||
pv(ri, ri_bitbytes, "%u");
|
||||
}
|
||||
|
||||
void gfs2_rgrp_in(struct gfs2_rgrp *rg, const void *buf)
|
||||
{
|
||||
const struct gfs2_rgrp *str = buf;
|
||||
|
||||
gfs2_meta_header_in(&rg->rg_header, buf);
|
||||
rg->rg_flags = be32_to_cpu(str->rg_flags);
|
||||
rg->rg_free = be32_to_cpu(str->rg_free);
|
||||
rg->rg_dinodes = be32_to_cpu(str->rg_dinodes);
|
||||
rg->rg_igeneration = be64_to_cpu(str->rg_igeneration);
|
||||
}
|
||||
|
||||
void gfs2_rgrp_out(const struct gfs2_rgrp *rg, void *buf)
|
||||
{
|
||||
struct gfs2_rgrp *str = buf;
|
||||
|
||||
gfs2_meta_header_out(&rg->rg_header, buf);
|
||||
str->rg_flags = cpu_to_be32(rg->rg_flags);
|
||||
str->rg_free = cpu_to_be32(rg->rg_free);
|
||||
str->rg_dinodes = cpu_to_be32(rg->rg_dinodes);
|
||||
str->__pad = cpu_to_be32(0);
|
||||
str->rg_igeneration = cpu_to_be64(rg->rg_igeneration);
|
||||
memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
|
||||
}
|
||||
|
||||
void gfs2_quota_in(struct gfs2_quota *qu, const void *buf)
|
||||
{
|
||||
const struct gfs2_quota *str = buf;
|
||||
|
||||
qu->qu_limit = be64_to_cpu(str->qu_limit);
|
||||
qu->qu_warn = be64_to_cpu(str->qu_warn);
|
||||
qu->qu_value = be64_to_cpu(str->qu_value);
|
||||
}
|
||||
|
||||
void gfs2_dinode_in(struct gfs2_dinode *di, const void *buf)
|
||||
{
|
||||
const struct gfs2_dinode *str = buf;
|
||||
|
||||
gfs2_meta_header_in(&di->di_header, buf);
|
||||
gfs2_inum_in(&di->di_num, &str->di_num);
|
||||
|
||||
di->di_mode = be32_to_cpu(str->di_mode);
|
||||
di->di_uid = be32_to_cpu(str->di_uid);
|
||||
di->di_gid = be32_to_cpu(str->di_gid);
|
||||
di->di_nlink = be32_to_cpu(str->di_nlink);
|
||||
di->di_size = be64_to_cpu(str->di_size);
|
||||
di->di_blocks = be64_to_cpu(str->di_blocks);
|
||||
di->di_atime = be64_to_cpu(str->di_atime);
|
||||
di->di_mtime = be64_to_cpu(str->di_mtime);
|
||||
di->di_ctime = be64_to_cpu(str->di_ctime);
|
||||
di->di_major = be32_to_cpu(str->di_major);
|
||||
di->di_minor = be32_to_cpu(str->di_minor);
|
||||
|
||||
di->di_goal_meta = be64_to_cpu(str->di_goal_meta);
|
||||
di->di_goal_data = be64_to_cpu(str->di_goal_data);
|
||||
di->di_generation = be64_to_cpu(str->di_generation);
|
||||
|
||||
di->di_flags = be32_to_cpu(str->di_flags);
|
||||
di->di_payload_format = be32_to_cpu(str->di_payload_format);
|
||||
di->di_height = be16_to_cpu(str->di_height);
|
||||
|
||||
di->di_depth = be16_to_cpu(str->di_depth);
|
||||
di->di_entries = be32_to_cpu(str->di_entries);
|
||||
|
||||
di->di_eattr = be64_to_cpu(str->di_eattr);
|
||||
|
||||
}
|
||||
|
||||
void gfs2_dinode_out(const struct gfs2_dinode *di, void *buf)
|
||||
{
|
||||
struct gfs2_dinode *str = buf;
|
||||
|
||||
gfs2_meta_header_out(&di->di_header, buf);
|
||||
gfs2_inum_out(&di->di_num, (char *)&str->di_num);
|
||||
|
||||
str->di_mode = cpu_to_be32(di->di_mode);
|
||||
str->di_uid = cpu_to_be32(di->di_uid);
|
||||
str->di_gid = cpu_to_be32(di->di_gid);
|
||||
str->di_nlink = cpu_to_be32(di->di_nlink);
|
||||
str->di_size = cpu_to_be64(di->di_size);
|
||||
str->di_blocks = cpu_to_be64(di->di_blocks);
|
||||
str->di_atime = cpu_to_be64(di->di_atime);
|
||||
str->di_mtime = cpu_to_be64(di->di_mtime);
|
||||
str->di_ctime = cpu_to_be64(di->di_ctime);
|
||||
str->di_major = cpu_to_be32(di->di_major);
|
||||
str->di_minor = cpu_to_be32(di->di_minor);
|
||||
|
||||
str->di_goal_meta = cpu_to_be64(di->di_goal_meta);
|
||||
str->di_goal_data = cpu_to_be64(di->di_goal_data);
|
||||
str->di_generation = cpu_to_be64(di->di_generation);
|
||||
|
||||
str->di_flags = cpu_to_be32(di->di_flags);
|
||||
str->di_payload_format = cpu_to_be32(di->di_payload_format);
|
||||
str->di_height = cpu_to_be16(di->di_height);
|
||||
|
||||
str->di_depth = cpu_to_be16(di->di_depth);
|
||||
str->di_entries = cpu_to_be32(di->di_entries);
|
||||
|
||||
str->di_eattr = cpu_to_be64(di->di_eattr);
|
||||
|
||||
}
|
||||
|
||||
void gfs2_dinode_print(const struct gfs2_dinode *di)
|
||||
{
|
||||
gfs2_meta_header_print(&di->di_header);
|
||||
gfs2_inum_print(&di->di_num);
|
||||
|
||||
pv(di, di_mode, "0%o");
|
||||
pv(di, di_uid, "%u");
|
||||
pv(di, di_gid, "%u");
|
||||
pv(di, di_nlink, "%u");
|
||||
printk(KERN_INFO " di_size = %llu\n", (unsigned long long)di->di_size);
|
||||
printk(KERN_INFO " di_blocks = %llu\n", (unsigned long long)di->di_blocks);
|
||||
printk(KERN_INFO " di_atime = %lld\n", (long long)di->di_atime);
|
||||
printk(KERN_INFO " di_mtime = %lld\n", (long long)di->di_mtime);
|
||||
printk(KERN_INFO " di_ctime = %lld\n", (long long)di->di_ctime);
|
||||
pv(di, di_major, "%u");
|
||||
pv(di, di_minor, "%u");
|
||||
|
||||
printk(KERN_INFO " di_goal_meta = %llu\n", (unsigned long long)di->di_goal_meta);
|
||||
printk(KERN_INFO " di_goal_data = %llu\n", (unsigned long long)di->di_goal_data);
|
||||
|
||||
pv(di, di_flags, "0x%.8X");
|
||||
pv(di, di_payload_format, "%u");
|
||||
pv(di, di_height, "%u");
|
||||
|
||||
pv(di, di_depth, "%u");
|
||||
pv(di, di_entries, "%u");
|
||||
|
||||
printk(KERN_INFO " di_eattr = %llu\n", (unsigned long long)di->di_eattr);
|
||||
}
|
||||
|
||||
void gfs2_log_header_in(struct gfs2_log_header *lh, const void *buf)
|
||||
{
|
||||
const struct gfs2_log_header *str = buf;
|
||||
|
||||
gfs2_meta_header_in(&lh->lh_header, buf);
|
||||
lh->lh_sequence = be64_to_cpu(str->lh_sequence);
|
||||
lh->lh_flags = be32_to_cpu(str->lh_flags);
|
||||
lh->lh_tail = be32_to_cpu(str->lh_tail);
|
||||
lh->lh_blkno = be32_to_cpu(str->lh_blkno);
|
||||
lh->lh_hash = be32_to_cpu(str->lh_hash);
|
||||
}
|
||||
|
||||
void gfs2_inum_range_in(struct gfs2_inum_range *ir, const void *buf)
|
||||
{
|
||||
const struct gfs2_inum_range *str = buf;
|
||||
|
||||
ir->ir_start = be64_to_cpu(str->ir_start);
|
||||
ir->ir_length = be64_to_cpu(str->ir_length);
|
||||
}
|
||||
|
||||
void gfs2_inum_range_out(const struct gfs2_inum_range *ir, void *buf)
|
||||
{
|
||||
struct gfs2_inum_range *str = buf;
|
||||
|
||||
str->ir_start = cpu_to_be64(ir->ir_start);
|
||||
str->ir_length = cpu_to_be64(ir->ir_length);
|
||||
}
|
||||
|
||||
void gfs2_statfs_change_in(struct gfs2_statfs_change *sc, const void *buf)
|
||||
{
|
||||
const struct gfs2_statfs_change *str = buf;
|
||||
|
||||
sc->sc_total = be64_to_cpu(str->sc_total);
|
||||
sc->sc_free = be64_to_cpu(str->sc_free);
|
||||
sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
|
||||
}
|
||||
|
||||
void gfs2_statfs_change_out(const struct gfs2_statfs_change *sc, void *buf)
|
||||
{
|
||||
struct gfs2_statfs_change *str = buf;
|
||||
|
||||
str->sc_total = cpu_to_be64(sc->sc_total);
|
||||
str->sc_free = cpu_to_be64(sc->sc_free);
|
||||
str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
|
||||
}
|
||||
|
||||
void gfs2_quota_change_in(struct gfs2_quota_change *qc, const void *buf)
|
||||
{
|
||||
const struct gfs2_quota_change *str = buf;
|
||||
|
||||
qc->qc_change = be64_to_cpu(str->qc_change);
|
||||
qc->qc_flags = be32_to_cpu(str->qc_flags);
|
||||
qc->qc_id = be32_to_cpu(str->qc_id);
|
||||
}
|
||||
|
790
fs/gfs2/ops_address.c
Normal file
790
fs/gfs2/ops_address.c
Normal file
|
@ -0,0 +1,790 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/pagevec.h>
|
||||
#include <linux/mpage.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/lm_interface.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
#include "bmap.h"
|
||||
#include "glock.h"
|
||||
#include "inode.h"
|
||||
#include "log.h"
|
||||
#include "meta_io.h"
|
||||
#include "ops_address.h"
|
||||
#include "quota.h"
|
||||
#include "trans.h"
|
||||
#include "rgrp.h"
|
||||
#include "ops_file.h"
|
||||
#include "util.h"
|
||||
#include "glops.h"
|
||||
|
||||
|
||||
static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
|
||||
unsigned int from, unsigned int to)
|
||||
{
|
||||
struct buffer_head *head = page_buffers(page);
|
||||
unsigned int bsize = head->b_size;
|
||||
struct buffer_head *bh;
|
||||
unsigned int start, end;
|
||||
|
||||
for (bh = head, start = 0; bh != head || !start;
|
||||
bh = bh->b_this_page, start = end) {
|
||||
end = start + bsize;
|
||||
if (end <= from || start >= to)
|
||||
continue;
|
||||
gfs2_trans_add_bh(ip->i_gl, bh, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_get_block - Fills in a buffer head with details about a block
|
||||
* @inode: The inode
|
||||
* @lblock: The block number to look up
|
||||
* @bh_result: The buffer head to return the result in
|
||||
* @create: Non-zero if we may add block to the file
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
int gfs2_get_block(struct inode *inode, sector_t lblock,
|
||||
struct buffer_head *bh_result, int create)
|
||||
{
|
||||
return gfs2_block_map(inode, lblock, create, bh_result, 32);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_get_block_noalloc - Fills in a buffer head with details about a block
|
||||
* @inode: The inode
|
||||
* @lblock: The block number to look up
|
||||
* @bh_result: The buffer head to return the result in
|
||||
* @create: Non-zero if we may add block to the file
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
|
||||
struct buffer_head *bh_result, int create)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = gfs2_block_map(inode, lblock, 0, bh_result, 1);
|
||||
if (error)
|
||||
return error;
|
||||
if (bh_result->b_blocknr == 0)
|
||||
return -EIO;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
|
||||
struct buffer_head *bh_result, int create)
|
||||
{
|
||||
return gfs2_block_map(inode, lblock, 0, bh_result, 32);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_writepage - Write complete page
|
||||
* @page: Page to write
|
||||
*
|
||||
* Returns: errno
|
||||
*
|
||||
* Some of this is copied from block_write_full_page() although we still
|
||||
* call it to do most of the work.
|
||||
*/
|
||||
|
||||
static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
|
||||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
||||
loff_t i_size = i_size_read(inode);
|
||||
pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
|
||||
unsigned offset;
|
||||
int error;
|
||||
int done_trans = 0;
|
||||
|
||||
if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) {
|
||||
unlock_page(page);
|
||||
return -EIO;
|
||||
}
|
||||
if (current->journal_info)
|
||||
goto out_ignore;
|
||||
|
||||
/* Is the page fully outside i_size? (truncate in progress) */
|
||||
offset = i_size & (PAGE_CACHE_SIZE-1);
|
||||
if (page->index > end_index || (page->index == end_index && !offset)) {
|
||||
page->mapping->a_ops->invalidatepage(page, 0);
|
||||
unlock_page(page);
|
||||
return 0; /* don't care */
|
||||
}
|
||||
|
||||
if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip)) {
|
||||
error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
|
||||
if (error)
|
||||
goto out_ignore;
|
||||
if (!page_has_buffers(page)) {
|
||||
create_empty_buffers(page, inode->i_sb->s_blocksize,
|
||||
(1 << BH_Dirty)|(1 << BH_Uptodate));
|
||||
}
|
||||
gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
|
||||
done_trans = 1;
|
||||
}
|
||||
error = block_write_full_page(page, gfs2_get_block_noalloc, wbc);
|
||||
if (done_trans)
|
||||
gfs2_trans_end(sdp);
|
||||
gfs2_meta_cache_flush(ip);
|
||||
return error;
|
||||
|
||||
out_ignore:
|
||||
redirty_page_for_writepage(wbc, page);
|
||||
unlock_page(page);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int zero_readpage(struct page *page)
|
||||
{
|
||||
void *kaddr;
|
||||
|
||||
kaddr = kmap_atomic(page, KM_USER0);
|
||||
memset(kaddr, 0, PAGE_CACHE_SIZE);
|
||||
kunmap_atomic(page, KM_USER0);
|
||||
|
||||
SetPageUptodate(page);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* stuffed_readpage - Fill in a Linux page with stuffed file data
|
||||
* @ip: the inode
|
||||
* @page: the page
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
|
||||
{
|
||||
struct buffer_head *dibh;
|
||||
void *kaddr;
|
||||
int error;
|
||||
|
||||
/* Only the first page of a stuffed file might contain data */
|
||||
if (unlikely(page->index))
|
||||
return zero_readpage(page);
|
||||
|
||||
error = gfs2_meta_inode_buffer(ip, &dibh);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
kaddr = kmap_atomic(page, KM_USER0);
|
||||
memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
|
||||
ip->i_di.di_size);
|
||||
memset(kaddr + ip->i_di.di_size, 0, PAGE_CACHE_SIZE - ip->i_di.di_size);
|
||||
kunmap_atomic(page, KM_USER0);
|
||||
|
||||
brelse(dibh);
|
||||
|
||||
SetPageUptodate(page);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* gfs2_readpage - readpage with locking
|
||||
* @file: The file to read a page for. N.B. This may be NULL if we are
|
||||
* reading an internal file.
|
||||
* @page: The page to read
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
static int gfs2_readpage(struct file *file, struct page *page)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(page->mapping->host);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
|
||||
struct gfs2_file *gf = NULL;
|
||||
struct gfs2_holder gh;
|
||||
int error;
|
||||
int do_unlock = 0;
|
||||
|
||||
if (likely(file != &gfs2_internal_file_sentinel)) {
|
||||
if (file) {
|
||||
gf = file->private_data;
|
||||
if (test_bit(GFF_EXLOCK, &gf->f_flags))
|
||||
/* gfs2_sharewrite_nopage has grabbed the ip->i_gl already */
|
||||
goto skip_lock;
|
||||
}
|
||||
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|GL_AOP, &gh);
|
||||
do_unlock = 1;
|
||||
error = gfs2_glock_nq_m_atime(1, &gh);
|
||||
if (unlikely(error))
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
skip_lock:
|
||||
if (gfs2_is_stuffed(ip)) {
|
||||
error = stuffed_readpage(ip, page);
|
||||
unlock_page(page);
|
||||
} else
|
||||
error = mpage_readpage(page, gfs2_get_block);
|
||||
|
||||
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
||||
error = -EIO;
|
||||
|
||||
if (do_unlock) {
|
||||
gfs2_glock_dq_m(1, &gh);
|
||||
gfs2_holder_uninit(&gh);
|
||||
}
|
||||
out:
|
||||
return error;
|
||||
out_unlock:
|
||||
unlock_page(page);
|
||||
if (do_unlock)
|
||||
gfs2_holder_uninit(&gh);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_readpages - Read a bunch of pages at once
|
||||
*
|
||||
* Some notes:
|
||||
* 1. This is only for readahead, so we can simply ignore any things
|
||||
* which are slightly inconvenient (such as locking conflicts between
|
||||
* the page lock and the glock) and return having done no I/O. Its
|
||||
* obviously not something we'd want to do on too regular a basis.
|
||||
* Any I/O we ignore at this time will be done via readpage later.
|
||||
* 2. We have to handle stuffed files here too.
|
||||
* 3. mpage_readpages() does most of the heavy lifting in the common case.
|
||||
* 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places.
|
||||
* 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as
|
||||
* well as read-ahead.
|
||||
*/
|
||||
static int gfs2_readpages(struct file *file, struct address_space *mapping,
|
||||
struct list_head *pages, unsigned nr_pages)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
||||
struct gfs2_holder gh;
|
||||
unsigned page_idx;
|
||||
int ret;
|
||||
int do_unlock = 0;
|
||||
|
||||
if (likely(file != &gfs2_internal_file_sentinel)) {
|
||||
if (file) {
|
||||
struct gfs2_file *gf = file->private_data;
|
||||
if (test_bit(GFF_EXLOCK, &gf->f_flags))
|
||||
goto skip_lock;
|
||||
}
|
||||
gfs2_holder_init(ip->i_gl, LM_ST_SHARED,
|
||||
LM_FLAG_TRY_1CB|GL_ATIME|GL_AOP, &gh);
|
||||
do_unlock = 1;
|
||||
ret = gfs2_glock_nq_m_atime(1, &gh);
|
||||
if (ret == GLR_TRYFAILED)
|
||||
goto out_noerror;
|
||||
if (unlikely(ret))
|
||||
goto out_unlock;
|
||||
}
|
||||
skip_lock:
|
||||
if (gfs2_is_stuffed(ip)) {
|
||||
struct pagevec lru_pvec;
|
||||
pagevec_init(&lru_pvec, 0);
|
||||
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
|
||||
struct page *page = list_entry(pages->prev, struct page, lru);
|
||||
prefetchw(&page->flags);
|
||||
list_del(&page->lru);
|
||||
if (!add_to_page_cache(page, mapping,
|
||||
page->index, GFP_KERNEL)) {
|
||||
ret = stuffed_readpage(ip, page);
|
||||
unlock_page(page);
|
||||
if (!pagevec_add(&lru_pvec, page))
|
||||
__pagevec_lru_add(&lru_pvec);
|
||||
} else {
|
||||
page_cache_release(page);
|
||||
}
|
||||
}
|
||||
pagevec_lru_add(&lru_pvec);
|
||||
ret = 0;
|
||||
} else {
|
||||
/* What we really want to do .... */
|
||||
ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block);
|
||||
}
|
||||
|
||||
if (do_unlock) {
|
||||
gfs2_glock_dq_m(1, &gh);
|
||||
gfs2_holder_uninit(&gh);
|
||||
}
|
||||
out:
|
||||
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
||||
ret = -EIO;
|
||||
return ret;
|
||||
out_noerror:
|
||||
ret = 0;
|
||||
out_unlock:
|
||||
/* unlock all pages, we can't do any I/O right now */
|
||||
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
|
||||
struct page *page = list_entry(pages->prev, struct page, lru);
|
||||
list_del(&page->lru);
|
||||
unlock_page(page);
|
||||
page_cache_release(page);
|
||||
}
|
||||
if (do_unlock)
|
||||
gfs2_holder_uninit(&gh);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_prepare_write - Prepare to write a page to a file
|
||||
* @file: The file to write to
|
||||
* @page: The page which is to be prepared for writing
|
||||
* @from: From (byte range within page)
|
||||
* @to: To (byte range within page)
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
static int gfs2_prepare_write(struct file *file, struct page *page,
|
||||
unsigned from, unsigned to)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(page->mapping->host);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
|
||||
unsigned int data_blocks, ind_blocks, rblocks;
|
||||
int alloc_required;
|
||||
int error = 0;
|
||||
loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + from;
|
||||
loff_t end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
|
||||
struct gfs2_alloc *al;
|
||||
|
||||
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|GL_AOP, &ip->i_gh);
|
||||
error = gfs2_glock_nq_m_atime(1, &ip->i_gh);
|
||||
if (error)
|
||||
goto out_uninit;
|
||||
|
||||
gfs2_write_calc_reserv(ip, to - from, &data_blocks, &ind_blocks);
|
||||
|
||||
error = gfs2_write_alloc_required(ip, pos, from - to, &alloc_required);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
|
||||
|
||||
if (alloc_required) {
|
||||
al = gfs2_alloc_get(ip);
|
||||
|
||||
error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
|
||||
if (error)
|
||||
goto out_alloc_put;
|
||||
|
||||
error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
|
||||
if (error)
|
||||
goto out_qunlock;
|
||||
|
||||
al->al_requested = data_blocks + ind_blocks;
|
||||
error = gfs2_inplace_reserve(ip);
|
||||
if (error)
|
||||
goto out_qunlock;
|
||||
}
|
||||
|
||||
rblocks = RES_DINODE + ind_blocks;
|
||||
if (gfs2_is_jdata(ip))
|
||||
rblocks += data_blocks ? data_blocks : 1;
|
||||
if (ind_blocks || data_blocks)
|
||||
rblocks += RES_STATFS + RES_QUOTA;
|
||||
|
||||
error = gfs2_trans_begin(sdp, rblocks, 0);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
if (gfs2_is_stuffed(ip)) {
|
||||
if (end > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
|
||||
error = gfs2_unstuff_dinode(ip, page);
|
||||
if (error == 0)
|
||||
goto prepare_write;
|
||||
} else if (!PageUptodate(page))
|
||||
error = stuffed_readpage(ip, page);
|
||||
goto out;
|
||||
}
|
||||
|
||||
prepare_write:
|
||||
error = block_prepare_write(page, from, to, gfs2_get_block);
|
||||
|
||||
out:
|
||||
if (error) {
|
||||
gfs2_trans_end(sdp);
|
||||
if (alloc_required) {
|
||||
gfs2_inplace_release(ip);
|
||||
out_qunlock:
|
||||
gfs2_quota_unlock(ip);
|
||||
out_alloc_put:
|
||||
gfs2_alloc_put(ip);
|
||||
}
|
||||
out_unlock:
|
||||
gfs2_glock_dq_m(1, &ip->i_gh);
|
||||
out_uninit:
|
||||
gfs2_holder_uninit(&ip->i_gh);
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_commit_write - Commit write to a file
|
||||
* @file: The file to write to
|
||||
* @page: The page containing the data
|
||||
* @from: From (byte range within page)
|
||||
* @to: To (byte range within page)
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
static int gfs2_commit_write(struct file *file, struct page *page,
|
||||
unsigned from, unsigned to)
|
||||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
||||
int error = -EOPNOTSUPP;
|
||||
struct buffer_head *dibh;
|
||||
struct gfs2_alloc *al = &ip->i_alloc;
|
||||
struct gfs2_dinode *di;
|
||||
|
||||
if (gfs2_assert_withdraw(sdp, gfs2_glock_is_locked_by_me(ip->i_gl)))
|
||||
goto fail_nounlock;
|
||||
|
||||
error = gfs2_meta_inode_buffer(ip, &dibh);
|
||||
if (error)
|
||||
goto fail_endtrans;
|
||||
|
||||
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
|
||||
di = (struct gfs2_dinode *)dibh->b_data;
|
||||
|
||||
if (gfs2_is_stuffed(ip)) {
|
||||
u64 file_size;
|
||||
void *kaddr;
|
||||
|
||||
file_size = ((u64)page->index << PAGE_CACHE_SHIFT) + to;
|
||||
|
||||
kaddr = kmap_atomic(page, KM_USER0);
|
||||
memcpy(dibh->b_data + sizeof(struct gfs2_dinode) + from,
|
||||
kaddr + from, to - from);
|
||||
kunmap_atomic(page, KM_USER0);
|
||||
|
||||
SetPageUptodate(page);
|
||||
|
||||
if (inode->i_size < file_size)
|
||||
i_size_write(inode, file_size);
|
||||
} else {
|
||||
if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED ||
|
||||
gfs2_is_jdata(ip))
|
||||
gfs2_page_add_databufs(ip, page, from, to);
|
||||
error = generic_commit_write(file, page, from, to);
|
||||
if (error)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (ip->i_di.di_size < inode->i_size) {
|
||||
ip->i_di.di_size = inode->i_size;
|
||||
di->di_size = cpu_to_be64(inode->i_size);
|
||||
}
|
||||
|
||||
di->di_mode = cpu_to_be32(inode->i_mode);
|
||||
di->di_atime = cpu_to_be64(inode->i_atime.tv_sec);
|
||||
di->di_mtime = cpu_to_be64(inode->i_mtime.tv_sec);
|
||||
di->di_ctime = cpu_to_be64(inode->i_ctime.tv_sec);
|
||||
|
||||
brelse(dibh);
|
||||
gfs2_trans_end(sdp);
|
||||
if (al->al_requested) {
|
||||
gfs2_inplace_release(ip);
|
||||
gfs2_quota_unlock(ip);
|
||||
gfs2_alloc_put(ip);
|
||||
}
|
||||
gfs2_glock_dq_m(1, &ip->i_gh);
|
||||
gfs2_holder_uninit(&ip->i_gh);
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
brelse(dibh);
|
||||
fail_endtrans:
|
||||
gfs2_trans_end(sdp);
|
||||
if (al->al_requested) {
|
||||
gfs2_inplace_release(ip);
|
||||
gfs2_quota_unlock(ip);
|
||||
gfs2_alloc_put(ip);
|
||||
}
|
||||
gfs2_glock_dq_m(1, &ip->i_gh);
|
||||
gfs2_holder_uninit(&ip->i_gh);
|
||||
fail_nounlock:
|
||||
ClearPageUptodate(page);
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_bmap - Block map function
|
||||
* @mapping: Address space info
|
||||
* @lblock: The block to map
|
||||
*
|
||||
* Returns: The disk address for the block or 0 on hole or error
|
||||
*/
|
||||
|
||||
static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(mapping->host);
|
||||
struct gfs2_holder i_gh;
|
||||
sector_t dblock = 0;
|
||||
int error;
|
||||
|
||||
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
|
||||
if (error)
|
||||
return 0;
|
||||
|
||||
if (!gfs2_is_stuffed(ip))
|
||||
dblock = generic_block_bmap(mapping, lblock, gfs2_get_block);
|
||||
|
||||
gfs2_glock_dq_uninit(&i_gh);
|
||||
|
||||
return dblock;
|
||||
}
|
||||
|
||||
static void discard_buffer(struct gfs2_sbd *sdp, struct buffer_head *bh)
|
||||
{
|
||||
struct gfs2_bufdata *bd;
|
||||
|
||||
gfs2_log_lock(sdp);
|
||||
bd = bh->b_private;
|
||||
if (bd) {
|
||||
bd->bd_bh = NULL;
|
||||
bh->b_private = NULL;
|
||||
}
|
||||
gfs2_log_unlock(sdp);
|
||||
|
||||
lock_buffer(bh);
|
||||
clear_buffer_dirty(bh);
|
||||
bh->b_bdev = NULL;
|
||||
clear_buffer_mapped(bh);
|
||||
clear_buffer_req(bh);
|
||||
clear_buffer_new(bh);
|
||||
clear_buffer_delay(bh);
|
||||
unlock_buffer(bh);
|
||||
}
|
||||
|
||||
static void gfs2_invalidatepage(struct page *page, unsigned long offset)
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
|
||||
struct buffer_head *head, *bh, *next;
|
||||
unsigned int curr_off = 0;
|
||||
|
||||
BUG_ON(!PageLocked(page));
|
||||
if (!page_has_buffers(page))
|
||||
return;
|
||||
|
||||
bh = head = page_buffers(page);
|
||||
do {
|
||||
unsigned int next_off = curr_off + bh->b_size;
|
||||
next = bh->b_this_page;
|
||||
|
||||
if (offset <= curr_off)
|
||||
discard_buffer(sdp, bh);
|
||||
|
||||
curr_off = next_off;
|
||||
bh = next;
|
||||
} while (bh != head);
|
||||
|
||||
if (!offset)
|
||||
try_to_release_page(page, 0);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
|
||||
const struct iovec *iov, loff_t offset,
|
||||
unsigned long nr_segs)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
struct gfs2_holder gh;
|
||||
int rv;
|
||||
|
||||
if (rw == READ)
|
||||
mutex_lock(&inode->i_mutex);
|
||||
/*
|
||||
* Shared lock, even if its a write, since we do no allocation
|
||||
* on this path. All we need change is atime.
|
||||
*/
|
||||
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
|
||||
rv = gfs2_glock_nq_m_atime(1, &gh);
|
||||
if (rv)
|
||||
goto out;
|
||||
|
||||
if (offset > i_size_read(inode))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Should we return an error here? I can't see that O_DIRECT for
|
||||
* a journaled file makes any sense. For now we'll silently fall
|
||||
* back to buffered I/O, likewise we do the same for stuffed
|
||||
* files since they are (a) small and (b) unaligned.
|
||||
*/
|
||||
if (gfs2_is_jdata(ip))
|
||||
goto out;
|
||||
|
||||
if (gfs2_is_stuffed(ip))
|
||||
goto out;
|
||||
|
||||
rv = blockdev_direct_IO_own_locking(rw, iocb, inode,
|
||||
inode->i_sb->s_bdev,
|
||||
iov, offset, nr_segs,
|
||||
gfs2_get_block_direct, NULL);
|
||||
out:
|
||||
gfs2_glock_dq_m(1, &gh);
|
||||
gfs2_holder_uninit(&gh);
|
||||
if (rw == READ)
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* stuck_releasepage - We're stuck in gfs2_releasepage(). Print stuff out.
|
||||
* @bh: the buffer we're stuck on
|
||||
*
|
||||
*/
|
||||
|
||||
static void stuck_releasepage(struct buffer_head *bh)
|
||||
{
|
||||
struct inode *inode = bh->b_page->mapping->host;
|
||||
struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
|
||||
struct gfs2_bufdata *bd = bh->b_private;
|
||||
struct gfs2_glock *gl;
|
||||
static unsigned limit = 0;
|
||||
|
||||
if (limit > 3)
|
||||
return;
|
||||
limit++;
|
||||
|
||||
fs_warn(sdp, "stuck in gfs2_releasepage() %p\n", inode);
|
||||
fs_warn(sdp, "blkno = %llu, bh->b_count = %d\n",
|
||||
(unsigned long long)bh->b_blocknr, atomic_read(&bh->b_count));
|
||||
fs_warn(sdp, "pinned = %u\n", buffer_pinned(bh));
|
||||
fs_warn(sdp, "bh->b_private = %s\n", (bd) ? "!NULL" : "NULL");
|
||||
|
||||
if (!bd)
|
||||
return;
|
||||
|
||||
gl = bd->bd_gl;
|
||||
|
||||
fs_warn(sdp, "gl = (%u, %llu)\n",
|
||||
gl->gl_name.ln_type, (unsigned long long)gl->gl_name.ln_number);
|
||||
|
||||
fs_warn(sdp, "bd_list_tr = %s, bd_le.le_list = %s\n",
|
||||
(list_empty(&bd->bd_list_tr)) ? "no" : "yes",
|
||||
(list_empty(&bd->bd_le.le_list)) ? "no" : "yes");
|
||||
|
||||
if (gl->gl_ops == &gfs2_inode_glops) {
|
||||
struct gfs2_inode *ip = gl->gl_object;
|
||||
unsigned int x;
|
||||
|
||||
if (!ip)
|
||||
return;
|
||||
|
||||
fs_warn(sdp, "ip = %llu %llu\n",
|
||||
(unsigned long long)ip->i_num.no_formal_ino,
|
||||
(unsigned long long)ip->i_num.no_addr);
|
||||
|
||||
for (x = 0; x < GFS2_MAX_META_HEIGHT; x++)
|
||||
fs_warn(sdp, "ip->i_cache[%u] = %s\n",
|
||||
x, (ip->i_cache[x]) ? "!NULL" : "NULL");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_releasepage - free the metadata associated with a page
|
||||
* @page: the page that's being released
|
||||
* @gfp_mask: passed from Linux VFS, ignored by us
|
||||
*
|
||||
* Call try_to_free_buffers() if the buffers in this page can be
|
||||
* released.
|
||||
*
|
||||
* Returns: 0
|
||||
*/
|
||||
|
||||
int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
|
||||
{
|
||||
struct inode *aspace = page->mapping->host;
|
||||
struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info;
|
||||
struct buffer_head *bh, *head;
|
||||
struct gfs2_bufdata *bd;
|
||||
unsigned long t = jiffies + gfs2_tune_get(sdp, gt_stall_secs) * HZ;
|
||||
|
||||
if (!page_has_buffers(page))
|
||||
goto out;
|
||||
|
||||
head = bh = page_buffers(page);
|
||||
do {
|
||||
while (atomic_read(&bh->b_count)) {
|
||||
if (!atomic_read(&aspace->i_writecount))
|
||||
return 0;
|
||||
|
||||
if (time_after_eq(jiffies, t)) {
|
||||
stuck_releasepage(bh);
|
||||
/* should we withdraw here? */
|
||||
return 0;
|
||||
}
|
||||
|
||||
yield();
|
||||
}
|
||||
|
||||
gfs2_assert_warn(sdp, !buffer_pinned(bh));
|
||||
gfs2_assert_warn(sdp, !buffer_dirty(bh));
|
||||
|
||||
gfs2_log_lock(sdp);
|
||||
bd = bh->b_private;
|
||||
if (bd) {
|
||||
gfs2_assert_warn(sdp, bd->bd_bh == bh);
|
||||
gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr));
|
||||
gfs2_assert_warn(sdp, !bd->bd_ail);
|
||||
bd->bd_bh = NULL;
|
||||
if (!list_empty(&bd->bd_le.le_list))
|
||||
bd = NULL;
|
||||
bh->b_private = NULL;
|
||||
}
|
||||
gfs2_log_unlock(sdp);
|
||||
if (bd)
|
||||
kmem_cache_free(gfs2_bufdata_cachep, bd);
|
||||
|
||||
bh = bh->b_this_page;
|
||||
} while (bh != head);
|
||||
|
||||
out:
|
||||
return try_to_free_buffers(page);
|
||||
}
|
||||
|
||||
const struct address_space_operations gfs2_file_aops = {
|
||||
.writepage = gfs2_writepage,
|
||||
.readpage = gfs2_readpage,
|
||||
.readpages = gfs2_readpages,
|
||||
.sync_page = block_sync_page,
|
||||
.prepare_write = gfs2_prepare_write,
|
||||
.commit_write = gfs2_commit_write,
|
||||
.bmap = gfs2_bmap,
|
||||
.invalidatepage = gfs2_invalidatepage,
|
||||
.releasepage = gfs2_releasepage,
|
||||
.direct_IO = gfs2_direct_IO,
|
||||
};
|
||||
|
22
fs/gfs2/ops_address.h
Normal file
22
fs/gfs2/ops_address.h
Normal file
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __OPS_ADDRESS_DOT_H__
|
||||
#define __OPS_ADDRESS_DOT_H__
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
extern const struct address_space_operations gfs2_file_aops;
|
||||
extern int gfs2_get_block(struct inode *inode, sector_t lblock,
|
||||
struct buffer_head *bh_result, int create);
|
||||
extern int gfs2_releasepage(struct page *page, gfp_t gfp_mask);
|
||||
|
||||
#endif /* __OPS_ADDRESS_DOT_H__ */
|
119
fs/gfs2/ops_dentry.c
Normal file
119
fs/gfs2/ops_dentry.c
Normal file
|
@ -0,0 +1,119 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/lm_interface.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
#include "dir.h"
|
||||
#include "glock.h"
|
||||
#include "ops_dentry.h"
|
||||
#include "util.h"
|
||||
|
||||
/**
|
||||
* gfs2_drevalidate - Check directory lookup consistency
|
||||
* @dentry: the mapping to check
|
||||
* @nd:
|
||||
*
|
||||
* Check to make sure the lookup necessary to arrive at this inode from its
|
||||
* parent is still good.
|
||||
*
|
||||
* Returns: 1 if the dentry is ok, 0 if it isn't
|
||||
*/
|
||||
|
||||
static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
|
||||
{
|
||||
struct dentry *parent = dget_parent(dentry);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(parent->d_inode);
|
||||
struct gfs2_inode *dip = GFS2_I(parent->d_inode);
|
||||
struct inode *inode = dentry->d_inode;
|
||||
struct gfs2_holder d_gh;
|
||||
struct gfs2_inode *ip;
|
||||
struct gfs2_inum inum;
|
||||
unsigned int type;
|
||||
int error;
|
||||
|
||||
if (inode && is_bad_inode(inode))
|
||||
goto invalid;
|
||||
|
||||
if (sdp->sd_args.ar_localcaching)
|
||||
goto valid;
|
||||
|
||||
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
error = gfs2_dir_search(parent->d_inode, &dentry->d_name, &inum, &type);
|
||||
switch (error) {
|
||||
case 0:
|
||||
if (!inode)
|
||||
goto invalid_gunlock;
|
||||
break;
|
||||
case -ENOENT:
|
||||
if (!inode)
|
||||
goto valid_gunlock;
|
||||
goto invalid_gunlock;
|
||||
default:
|
||||
goto fail_gunlock;
|
||||
}
|
||||
|
||||
ip = GFS2_I(inode);
|
||||
|
||||
if (!gfs2_inum_equal(&ip->i_num, &inum))
|
||||
goto invalid_gunlock;
|
||||
|
||||
if (IF2DT(ip->i_di.di_mode) != type) {
|
||||
gfs2_consist_inode(dip);
|
||||
goto fail_gunlock;
|
||||
}
|
||||
|
||||
valid_gunlock:
|
||||
gfs2_glock_dq_uninit(&d_gh);
|
||||
valid:
|
||||
dput(parent);
|
||||
return 1;
|
||||
|
||||
invalid_gunlock:
|
||||
gfs2_glock_dq_uninit(&d_gh);
|
||||
invalid:
|
||||
if (inode && S_ISDIR(inode->i_mode)) {
|
||||
if (have_submounts(dentry))
|
||||
goto valid;
|
||||
shrink_dcache_parent(dentry);
|
||||
}
|
||||
d_drop(dentry);
|
||||
dput(parent);
|
||||
return 0;
|
||||
|
||||
fail_gunlock:
|
||||
gfs2_glock_dq_uninit(&d_gh);
|
||||
fail:
|
||||
dput(parent);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gfs2_dhash(struct dentry *dentry, struct qstr *str)
|
||||
{
|
||||
str->hash = gfs2_disk_hash(str->name, str->len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct dentry_operations gfs2_dops = {
|
||||
.d_revalidate = gfs2_drevalidate,
|
||||
.d_hash = gfs2_dhash,
|
||||
};
|
||||
|
17
fs/gfs2/ops_dentry.h
Normal file
17
fs/gfs2/ops_dentry.h
Normal file
|
@ -0,0 +1,17 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __OPS_DENTRY_DOT_H__
|
||||
#define __OPS_DENTRY_DOT_H__
|
||||
|
||||
#include <linux/dcache.h>
|
||||
|
||||
extern struct dentry_operations gfs2_dops;
|
||||
|
||||
#endif /* __OPS_DENTRY_DOT_H__ */
|
298
fs/gfs2/ops_export.c
Normal file
298
fs/gfs2/ops_export.c
Normal file
|
@ -0,0 +1,298 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/lm_interface.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
#include "dir.h"
|
||||
#include "glock.h"
|
||||
#include "glops.h"
|
||||
#include "inode.h"
|
||||
#include "ops_export.h"
|
||||
#include "rgrp.h"
|
||||
#include "util.h"
|
||||
|
||||
static struct dentry *gfs2_decode_fh(struct super_block *sb,
|
||||
__u32 *fh,
|
||||
int fh_len,
|
||||
int fh_type,
|
||||
int (*acceptable)(void *context,
|
||||
struct dentry *dentry),
|
||||
void *context)
|
||||
{
|
||||
struct gfs2_fh_obj fh_obj;
|
||||
struct gfs2_inum *this, parent;
|
||||
|
||||
if (fh_type != fh_len)
|
||||
return NULL;
|
||||
|
||||
this = &fh_obj.this;
|
||||
fh_obj.imode = DT_UNKNOWN;
|
||||
memset(&parent, 0, sizeof(struct gfs2_inum));
|
||||
|
||||
switch (fh_type) {
|
||||
case GFS2_LARGE_FH_SIZE:
|
||||
parent.no_formal_ino = ((u64)be32_to_cpu(fh[4])) << 32;
|
||||
parent.no_formal_ino |= be32_to_cpu(fh[5]);
|
||||
parent.no_addr = ((u64)be32_to_cpu(fh[6])) << 32;
|
||||
parent.no_addr |= be32_to_cpu(fh[7]);
|
||||
fh_obj.imode = be32_to_cpu(fh[8]);
|
||||
case GFS2_SMALL_FH_SIZE:
|
||||
this->no_formal_ino = ((u64)be32_to_cpu(fh[0])) << 32;
|
||||
this->no_formal_ino |= be32_to_cpu(fh[1]);
|
||||
this->no_addr = ((u64)be32_to_cpu(fh[2])) << 32;
|
||||
this->no_addr |= be32_to_cpu(fh[3]);
|
||||
break;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return gfs2_export_ops.find_exported_dentry(sb, &fh_obj, &parent,
|
||||
acceptable, context);
|
||||
}
|
||||
|
||||
static int gfs2_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
|
||||
int connectable)
|
||||
{
|
||||
struct inode *inode = dentry->d_inode;
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
|
||||
if (*len < GFS2_SMALL_FH_SIZE ||
|
||||
(connectable && *len < GFS2_LARGE_FH_SIZE))
|
||||
return 255;
|
||||
|
||||
fh[0] = ip->i_num.no_formal_ino >> 32;
|
||||
fh[0] = cpu_to_be32(fh[0]);
|
||||
fh[1] = ip->i_num.no_formal_ino & 0xFFFFFFFF;
|
||||
fh[1] = cpu_to_be32(fh[1]);
|
||||
fh[2] = ip->i_num.no_addr >> 32;
|
||||
fh[2] = cpu_to_be32(fh[2]);
|
||||
fh[3] = ip->i_num.no_addr & 0xFFFFFFFF;
|
||||
fh[3] = cpu_to_be32(fh[3]);
|
||||
*len = GFS2_SMALL_FH_SIZE;
|
||||
|
||||
if (!connectable || inode == sb->s_root->d_inode)
|
||||
return *len;
|
||||
|
||||
spin_lock(&dentry->d_lock);
|
||||
inode = dentry->d_parent->d_inode;
|
||||
ip = GFS2_I(inode);
|
||||
igrab(inode);
|
||||
spin_unlock(&dentry->d_lock);
|
||||
|
||||
fh[4] = ip->i_num.no_formal_ino >> 32;
|
||||
fh[4] = cpu_to_be32(fh[4]);
|
||||
fh[5] = ip->i_num.no_formal_ino & 0xFFFFFFFF;
|
||||
fh[5] = cpu_to_be32(fh[5]);
|
||||
fh[6] = ip->i_num.no_addr >> 32;
|
||||
fh[6] = cpu_to_be32(fh[6]);
|
||||
fh[7] = ip->i_num.no_addr & 0xFFFFFFFF;
|
||||
fh[7] = cpu_to_be32(fh[7]);
|
||||
|
||||
fh[8] = cpu_to_be32(inode->i_mode);
|
||||
fh[9] = 0; /* pad to double word */
|
||||
*len = GFS2_LARGE_FH_SIZE;
|
||||
|
||||
iput(inode);
|
||||
|
||||
return *len;
|
||||
}
|
||||
|
||||
struct get_name_filldir {
|
||||
struct gfs2_inum inum;
|
||||
char *name;
|
||||
};
|
||||
|
||||
static int get_name_filldir(void *opaque, const char *name, unsigned int length,
|
||||
u64 offset, struct gfs2_inum *inum,
|
||||
unsigned int type)
|
||||
{
|
||||
struct get_name_filldir *gnfd = (struct get_name_filldir *)opaque;
|
||||
|
||||
if (!gfs2_inum_equal(inum, &gnfd->inum))
|
||||
return 0;
|
||||
|
||||
memcpy(gnfd->name, name, length);
|
||||
gnfd->name[length] = 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int gfs2_get_name(struct dentry *parent, char *name,
|
||||
struct dentry *child)
|
||||
{
|
||||
struct inode *dir = parent->d_inode;
|
||||
struct inode *inode = child->d_inode;
|
||||
struct gfs2_inode *dip, *ip;
|
||||
struct get_name_filldir gnfd;
|
||||
struct gfs2_holder gh;
|
||||
u64 offset = 0;
|
||||
int error;
|
||||
|
||||
if (!dir)
|
||||
return -EINVAL;
|
||||
|
||||
if (!S_ISDIR(dir->i_mode) || !inode)
|
||||
return -EINVAL;
|
||||
|
||||
dip = GFS2_I(dir);
|
||||
ip = GFS2_I(inode);
|
||||
|
||||
*name = 0;
|
||||
gnfd.inum = ip->i_num;
|
||||
gnfd.name = name;
|
||||
|
||||
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &gh);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = gfs2_dir_read(dir, &offset, &gnfd, get_name_filldir);
|
||||
|
||||
gfs2_glock_dq_uninit(&gh);
|
||||
|
||||
if (!error && !*name)
|
||||
error = -ENOENT;
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
static struct dentry *gfs2_get_parent(struct dentry *child)
|
||||
{
|
||||
struct qstr dotdot;
|
||||
struct inode *inode;
|
||||
struct dentry *dentry;
|
||||
|
||||
gfs2_str2qstr(&dotdot, "..");
|
||||
inode = gfs2_lookupi(child->d_inode, &dotdot, 1, NULL);
|
||||
|
||||
if (!inode)
|
||||
return ERR_PTR(-ENOENT);
|
||||
/*
|
||||
* In case of an error, @inode carries the error value, and we
|
||||
* have to return that as a(n invalid) pointer to dentry.
|
||||
*/
|
||||
if (IS_ERR(inode))
|
||||
return ERR_PTR(PTR_ERR(inode));
|
||||
|
||||
dentry = d_alloc_anon(inode);
|
||||
if (!dentry) {
|
||||
iput(inode);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
return dentry;
|
||||
}
|
||||
|
||||
static struct dentry *gfs2_get_dentry(struct super_block *sb, void *inum_obj)
|
||||
{
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
struct gfs2_fh_obj *fh_obj = (struct gfs2_fh_obj *)inum_obj;
|
||||
struct gfs2_inum *inum = &fh_obj->this;
|
||||
struct gfs2_holder i_gh, ri_gh, rgd_gh;
|
||||
struct gfs2_rgrpd *rgd;
|
||||
struct inode *inode;
|
||||
struct dentry *dentry;
|
||||
int error;
|
||||
|
||||
/* System files? */
|
||||
|
||||
inode = gfs2_ilookup(sb, inum);
|
||||
if (inode) {
|
||||
if (GFS2_I(inode)->i_num.no_formal_ino != inum->no_formal_ino) {
|
||||
iput(inode);
|
||||
return ERR_PTR(-ESTALE);
|
||||
}
|
||||
goto out_inode;
|
||||
}
|
||||
|
||||
error = gfs2_glock_nq_num(sdp, inum->no_addr, &gfs2_inode_glops,
|
||||
LM_ST_SHARED, LM_FLAG_ANY | GL_LOCAL_EXCL,
|
||||
&i_gh);
|
||||
if (error)
|
||||
return ERR_PTR(error);
|
||||
|
||||
error = gfs2_rindex_hold(sdp, &ri_gh);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
error = -EINVAL;
|
||||
rgd = gfs2_blk2rgrpd(sdp, inum->no_addr);
|
||||
if (!rgd)
|
||||
goto fail_rindex;
|
||||
|
||||
error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
|
||||
if (error)
|
||||
goto fail_rindex;
|
||||
|
||||
error = -ESTALE;
|
||||
if (gfs2_get_block_type(rgd, inum->no_addr) != GFS2_BLKST_DINODE)
|
||||
goto fail_rgd;
|
||||
|
||||
gfs2_glock_dq_uninit(&rgd_gh);
|
||||
gfs2_glock_dq_uninit(&ri_gh);
|
||||
|
||||
inode = gfs2_inode_lookup(sb, inum, fh_obj->imode);
|
||||
if (!inode)
|
||||
goto fail;
|
||||
if (IS_ERR(inode)) {
|
||||
error = PTR_ERR(inode);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
error = gfs2_inode_refresh(GFS2_I(inode));
|
||||
if (error) {
|
||||
iput(inode);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
error = -EIO;
|
||||
if (GFS2_I(inode)->i_di.di_flags & GFS2_DIF_SYSTEM) {
|
||||
iput(inode);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
gfs2_glock_dq_uninit(&i_gh);
|
||||
|
||||
out_inode:
|
||||
dentry = d_alloc_anon(inode);
|
||||
if (!dentry) {
|
||||
iput(inode);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
return dentry;
|
||||
|
||||
fail_rgd:
|
||||
gfs2_glock_dq_uninit(&rgd_gh);
|
||||
|
||||
fail_rindex:
|
||||
gfs2_glock_dq_uninit(&ri_gh);
|
||||
|
||||
fail:
|
||||
gfs2_glock_dq_uninit(&i_gh);
|
||||
return ERR_PTR(error);
|
||||
}
|
||||
|
||||
struct export_operations gfs2_export_ops = {
|
||||
.decode_fh = gfs2_decode_fh,
|
||||
.encode_fh = gfs2_encode_fh,
|
||||
.get_name = gfs2_get_name,
|
||||
.get_parent = gfs2_get_parent,
|
||||
.get_dentry = gfs2_get_dentry,
|
||||
};
|
||||
|
22
fs/gfs2/ops_export.h
Normal file
22
fs/gfs2/ops_export.h
Normal file
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __OPS_EXPORT_DOT_H__
|
||||
#define __OPS_EXPORT_DOT_H__
|
||||
|
||||
#define GFS2_SMALL_FH_SIZE 4
|
||||
#define GFS2_LARGE_FH_SIZE 10
|
||||
|
||||
extern struct export_operations gfs2_export_ops;
|
||||
struct gfs2_fh_obj {
|
||||
struct gfs2_inum this;
|
||||
__u32 imode;
|
||||
};
|
||||
|
||||
#endif /* __OPS_EXPORT_DOT_H__ */
|
661
fs/gfs2/ops_file.c
Normal file
661
fs/gfs2/ops_file.c
Normal file
|
@ -0,0 +1,661 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/ext2_fs.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/lm_interface.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
#include "bmap.h"
|
||||
#include "dir.h"
|
||||
#include "glock.h"
|
||||
#include "glops.h"
|
||||
#include "inode.h"
|
||||
#include "lm.h"
|
||||
#include "log.h"
|
||||
#include "meta_io.h"
|
||||
#include "ops_file.h"
|
||||
#include "ops_vm.h"
|
||||
#include "quota.h"
|
||||
#include "rgrp.h"
|
||||
#include "trans.h"
|
||||
#include "util.h"
|
||||
#include "eaops.h"
|
||||
|
||||
/* For regular, non-NFS */
|
||||
struct filldir_reg {
|
||||
struct gfs2_sbd *fdr_sbd;
|
||||
int fdr_prefetch;
|
||||
|
||||
filldir_t fdr_filldir;
|
||||
void *fdr_opaque;
|
||||
};
|
||||
|
||||
/*
|
||||
* Most fields left uninitialised to catch anybody who tries to
|
||||
* use them. f_flags set to prevent file_accessed() from touching
|
||||
* any other part of this. Its use is purely as a flag so that we
|
||||
* know (in readpage()) whether or not do to locking.
|
||||
*/
|
||||
struct file gfs2_internal_file_sentinel = {
|
||||
.f_flags = O_NOATIME|O_RDONLY,
|
||||
};
|
||||
|
||||
static int gfs2_read_actor(read_descriptor_t *desc, struct page *page,
|
||||
unsigned long offset, unsigned long size)
|
||||
{
|
||||
char *kaddr;
|
||||
unsigned long count = desc->count;
|
||||
|
||||
if (size > count)
|
||||
size = count;
|
||||
|
||||
kaddr = kmap(page);
|
||||
memcpy(desc->arg.buf, kaddr + offset, size);
|
||||
kunmap(page);
|
||||
|
||||
desc->count = count - size;
|
||||
desc->written += size;
|
||||
desc->arg.buf += size;
|
||||
return size;
|
||||
}
|
||||
|
||||
int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
|
||||
char *buf, loff_t *pos, unsigned size)
|
||||
{
|
||||
struct inode *inode = &ip->i_inode;
|
||||
read_descriptor_t desc;
|
||||
desc.written = 0;
|
||||
desc.arg.buf = buf;
|
||||
desc.count = size;
|
||||
desc.error = 0;
|
||||
do_generic_mapping_read(inode->i_mapping, ra_state,
|
||||
&gfs2_internal_file_sentinel, pos, &desc,
|
||||
gfs2_read_actor);
|
||||
return desc.written ? desc.written : desc.error;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_llseek - seek to a location in a file
|
||||
* @file: the file
|
||||
* @offset: the offset
|
||||
* @origin: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
|
||||
*
|
||||
* SEEK_END requires the glock for the file because it references the
|
||||
* file's size.
|
||||
*
|
||||
* Returns: The new offset, or errno
|
||||
*/
|
||||
|
||||
static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
|
||||
struct gfs2_holder i_gh;
|
||||
loff_t error;
|
||||
|
||||
if (origin == 2) {
|
||||
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
|
||||
&i_gh);
|
||||
if (!error) {
|
||||
error = remote_llseek(file, offset, origin);
|
||||
gfs2_glock_dq_uninit(&i_gh);
|
||||
}
|
||||
} else
|
||||
error = remote_llseek(file, offset, origin);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* filldir_func - Report a directory entry to the caller of gfs2_dir_read()
|
||||
* @opaque: opaque data used by the function
|
||||
* @name: the name of the directory entry
|
||||
* @length: the length of the name
|
||||
* @offset: the entry's offset in the directory
|
||||
* @inum: the inode number the entry points to
|
||||
* @type: the type of inode the entry points to
|
||||
*
|
||||
* Returns: 0 on success, 1 if buffer full
|
||||
*/
|
||||
|
||||
static int filldir_func(void *opaque, const char *name, unsigned int length,
|
||||
u64 offset, struct gfs2_inum *inum,
|
||||
unsigned int type)
|
||||
{
|
||||
struct filldir_reg *fdr = (struct filldir_reg *)opaque;
|
||||
struct gfs2_sbd *sdp = fdr->fdr_sbd;
|
||||
int error;
|
||||
|
||||
error = fdr->fdr_filldir(fdr->fdr_opaque, name, length, offset,
|
||||
inum->no_addr, type);
|
||||
if (error)
|
||||
return 1;
|
||||
|
||||
if (fdr->fdr_prefetch && !(length == 1 && *name == '.')) {
|
||||
gfs2_glock_prefetch_num(sdp, inum->no_addr, &gfs2_inode_glops,
|
||||
LM_ST_SHARED, LM_FLAG_TRY | LM_FLAG_ANY);
|
||||
gfs2_glock_prefetch_num(sdp, inum->no_addr, &gfs2_iopen_glops,
|
||||
LM_ST_SHARED, LM_FLAG_TRY);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_readdir - Read directory entries from a directory
|
||||
* @file: The directory to read from
|
||||
* @dirent: Buffer for dirents
|
||||
* @filldir: Function used to do the copying
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
|
||||
{
|
||||
struct inode *dir = file->f_mapping->host;
|
||||
struct gfs2_inode *dip = GFS2_I(dir);
|
||||
struct filldir_reg fdr;
|
||||
struct gfs2_holder d_gh;
|
||||
u64 offset = file->f_pos;
|
||||
int error;
|
||||
|
||||
fdr.fdr_sbd = GFS2_SB(dir);
|
||||
fdr.fdr_prefetch = 1;
|
||||
fdr.fdr_filldir = filldir;
|
||||
fdr.fdr_opaque = dirent;
|
||||
|
||||
gfs2_holder_init(dip->i_gl, LM_ST_SHARED, GL_ATIME, &d_gh);
|
||||
error = gfs2_glock_nq_atime(&d_gh);
|
||||
if (error) {
|
||||
gfs2_holder_uninit(&d_gh);
|
||||
return error;
|
||||
}
|
||||
|
||||
error = gfs2_dir_read(dir, &offset, &fdr, filldir_func);
|
||||
|
||||
gfs2_glock_dq_uninit(&d_gh);
|
||||
|
||||
file->f_pos = offset;
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* fsflags_cvt
|
||||
* @table: A table of 32 u32 flags
|
||||
* @val: a 32 bit value to convert
|
||||
*
|
||||
* This function can be used to convert between fsflags values and
|
||||
* GFS2's own flags values.
|
||||
*
|
||||
* Returns: the converted flags
|
||||
*/
|
||||
static u32 fsflags_cvt(const u32 *table, u32 val)
|
||||
{
|
||||
u32 res = 0;
|
||||
while(val) {
|
||||
if (val & 1)
|
||||
res |= *table;
|
||||
table++;
|
||||
val >>= 1;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
static const u32 fsflags_to_gfs2[32] = {
|
||||
[3] = GFS2_DIF_SYNC,
|
||||
[4] = GFS2_DIF_IMMUTABLE,
|
||||
[5] = GFS2_DIF_APPENDONLY,
|
||||
[7] = GFS2_DIF_NOATIME,
|
||||
[12] = GFS2_DIF_EXHASH,
|
||||
[14] = GFS2_DIF_JDATA,
|
||||
[20] = GFS2_DIF_DIRECTIO,
|
||||
};
|
||||
|
||||
static const u32 gfs2_to_fsflags[32] = {
|
||||
[gfs2fl_Sync] = FS_SYNC_FL,
|
||||
[gfs2fl_Immutable] = FS_IMMUTABLE_FL,
|
||||
[gfs2fl_AppendOnly] = FS_APPEND_FL,
|
||||
[gfs2fl_NoAtime] = FS_NOATIME_FL,
|
||||
[gfs2fl_ExHash] = FS_INDEX_FL,
|
||||
[gfs2fl_Jdata] = FS_JOURNAL_DATA_FL,
|
||||
[gfs2fl_Directio] = FS_DIRECTIO_FL,
|
||||
[gfs2fl_InheritDirectio] = FS_DIRECTIO_FL,
|
||||
[gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
|
||||
};
|
||||
|
||||
static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
|
||||
{
|
||||
struct inode *inode = filp->f_dentry->d_inode;
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
struct gfs2_holder gh;
|
||||
int error;
|
||||
u32 fsflags;
|
||||
|
||||
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
|
||||
error = gfs2_glock_nq_m_atime(1, &gh);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_di.di_flags);
|
||||
if (put_user(fsflags, ptr))
|
||||
error = -EFAULT;
|
||||
|
||||
gfs2_glock_dq_m(1, &gh);
|
||||
gfs2_holder_uninit(&gh);
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Flags that can be set by user space */
|
||||
#define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
|
||||
GFS2_DIF_DIRECTIO| \
|
||||
GFS2_DIF_IMMUTABLE| \
|
||||
GFS2_DIF_APPENDONLY| \
|
||||
GFS2_DIF_NOATIME| \
|
||||
GFS2_DIF_SYNC| \
|
||||
GFS2_DIF_SYSTEM| \
|
||||
GFS2_DIF_INHERIT_DIRECTIO| \
|
||||
GFS2_DIF_INHERIT_JDATA)
|
||||
|
||||
/**
|
||||
* gfs2_set_flags - set flags on an inode
|
||||
* @inode: The inode
|
||||
* @flags: The flags to set
|
||||
* @mask: Indicates which flags are valid
|
||||
*
|
||||
*/
|
||||
static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
|
||||
{
|
||||
struct inode *inode = filp->f_dentry->d_inode;
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
||||
struct buffer_head *bh;
|
||||
struct gfs2_holder gh;
|
||||
int error;
|
||||
u32 new_flags, flags;
|
||||
|
||||
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
flags = ip->i_di.di_flags;
|
||||
new_flags = (flags & ~mask) | (reqflags & mask);
|
||||
if ((new_flags ^ flags) == 0)
|
||||
goto out;
|
||||
|
||||
if (S_ISDIR(inode->i_mode)) {
|
||||
if ((new_flags ^ flags) & GFS2_DIF_JDATA)
|
||||
new_flags ^= (GFS2_DIF_JDATA|GFS2_DIF_INHERIT_JDATA);
|
||||
if ((new_flags ^ flags) & GFS2_DIF_DIRECTIO)
|
||||
new_flags ^= (GFS2_DIF_DIRECTIO|GFS2_DIF_INHERIT_DIRECTIO);
|
||||
}
|
||||
|
||||
error = -EINVAL;
|
||||
if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
|
||||
goto out;
|
||||
|
||||
error = -EPERM;
|
||||
if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
|
||||
goto out;
|
||||
if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
|
||||
goto out;
|
||||
if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
|
||||
!capable(CAP_LINUX_IMMUTABLE))
|
||||
goto out;
|
||||
if (!IS_IMMUTABLE(inode)) {
|
||||
error = permission(inode, MAY_WRITE, NULL);
|
||||
if (error)
|
||||
goto out;
|
||||
}
|
||||
|
||||
error = gfs2_trans_begin(sdp, RES_DINODE, 0);
|
||||
if (error)
|
||||
goto out;
|
||||
error = gfs2_meta_inode_buffer(ip, &bh);
|
||||
if (error)
|
||||
goto out_trans_end;
|
||||
gfs2_trans_add_bh(ip->i_gl, bh, 1);
|
||||
ip->i_di.di_flags = new_flags;
|
||||
gfs2_dinode_out(&ip->i_di, bh->b_data);
|
||||
brelse(bh);
|
||||
out_trans_end:
|
||||
gfs2_trans_end(sdp);
|
||||
out:
|
||||
gfs2_glock_dq_uninit(&gh);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
|
||||
{
|
||||
u32 fsflags, gfsflags;
|
||||
if (get_user(fsflags, ptr))
|
||||
return -EFAULT;
|
||||
gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
|
||||
return do_gfs2_set_flags(filp, gfsflags, ~0);
|
||||
}
|
||||
|
||||
static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
switch(cmd) {
|
||||
case FS_IOC_GETFLAGS:
|
||||
return gfs2_get_flags(filp, (u32 __user *)arg);
|
||||
case FS_IOC_SETFLAGS:
|
||||
return gfs2_set_flags(filp, (u32 __user *)arg);
|
||||
}
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* gfs2_mmap -
|
||||
* @file: The file to map
|
||||
* @vma: The VMA which described the mapping
|
||||
*
|
||||
* Returns: 0 or error code
|
||||
*/
|
||||
|
||||
static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
|
||||
struct gfs2_holder i_gh;
|
||||
int error;
|
||||
|
||||
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh);
|
||||
error = gfs2_glock_nq_atime(&i_gh);
|
||||
if (error) {
|
||||
gfs2_holder_uninit(&i_gh);
|
||||
return error;
|
||||
}
|
||||
|
||||
/* This is VM_MAYWRITE instead of VM_WRITE because a call
|
||||
to mprotect() can turn on VM_WRITE later. */
|
||||
|
||||
if ((vma->vm_flags & (VM_MAYSHARE | VM_MAYWRITE)) ==
|
||||
(VM_MAYSHARE | VM_MAYWRITE))
|
||||
vma->vm_ops = &gfs2_vm_ops_sharewrite;
|
||||
else
|
||||
vma->vm_ops = &gfs2_vm_ops_private;
|
||||
|
||||
gfs2_glock_dq_uninit(&i_gh);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_open - open a file
|
||||
* @inode: the inode to open
|
||||
* @file: the struct file for this opening
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
static int gfs2_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
struct gfs2_holder i_gh;
|
||||
struct gfs2_file *fp;
|
||||
int error;
|
||||
|
||||
fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL);
|
||||
if (!fp)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_init(&fp->f_fl_mutex);
|
||||
|
||||
gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
|
||||
file->private_data = fp;
|
||||
|
||||
if (S_ISREG(ip->i_di.di_mode)) {
|
||||
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
|
||||
&i_gh);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
if (!(file->f_flags & O_LARGEFILE) &&
|
||||
ip->i_di.di_size > MAX_NON_LFS) {
|
||||
error = -EFBIG;
|
||||
goto fail_gunlock;
|
||||
}
|
||||
|
||||
/* Listen to the Direct I/O flag */
|
||||
|
||||
if (ip->i_di.di_flags & GFS2_DIF_DIRECTIO)
|
||||
file->f_flags |= O_DIRECT;
|
||||
|
||||
gfs2_glock_dq_uninit(&i_gh);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail_gunlock:
|
||||
gfs2_glock_dq_uninit(&i_gh);
|
||||
fail:
|
||||
file->private_data = NULL;
|
||||
kfree(fp);
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_close - called to close a struct file
|
||||
* @inode: the inode the struct file belongs to
|
||||
* @file: the struct file being closed
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
static int gfs2_close(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
|
||||
struct gfs2_file *fp;
|
||||
|
||||
fp = file->private_data;
|
||||
file->private_data = NULL;
|
||||
|
||||
if (gfs2_assert_warn(sdp, fp))
|
||||
return -EIO;
|
||||
|
||||
kfree(fp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_fsync - sync the dirty data for a file (across the cluster)
|
||||
* @file: the file that points to the dentry (we ignore this)
|
||||
* @dentry: the dentry that points to the inode to sync
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
static int gfs2_fsync(struct file *file, struct dentry *dentry, int datasync)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
|
||||
|
||||
gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_lock - acquire/release a posix lock on a file
|
||||
* @file: the file pointer
|
||||
* @cmd: either modify or retrieve lock state, possibly wait
|
||||
* @fl: type and range of lock
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
|
||||
struct lm_lockname name =
|
||||
{ .ln_number = ip->i_num.no_addr,
|
||||
.ln_type = LM_TYPE_PLOCK };
|
||||
|
||||
if (!(fl->fl_flags & FL_POSIX))
|
||||
return -ENOLCK;
|
||||
if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
|
||||
return -ENOLCK;
|
||||
|
||||
if (sdp->sd_args.ar_localflocks) {
|
||||
if (IS_GETLK(cmd)) {
|
||||
struct file_lock tmp;
|
||||
int ret;
|
||||
ret = posix_test_lock(file, fl, &tmp);
|
||||
fl->fl_type = F_UNLCK;
|
||||
if (ret)
|
||||
memcpy(fl, &tmp, sizeof(struct file_lock));
|
||||
return 0;
|
||||
} else {
|
||||
return posix_lock_file_wait(file, fl);
|
||||
}
|
||||
}
|
||||
|
||||
if (IS_GETLK(cmd))
|
||||
return gfs2_lm_plock_get(sdp, &name, file, fl);
|
||||
else if (fl->fl_type == F_UNLCK)
|
||||
return gfs2_lm_punlock(sdp, &name, file, fl);
|
||||
else
|
||||
return gfs2_lm_plock(sdp, &name, file, cmd, fl);
|
||||
}
|
||||
|
||||
static int do_flock(struct file *file, int cmd, struct file_lock *fl)
|
||||
{
|
||||
struct gfs2_file *fp = file->private_data;
|
||||
struct gfs2_holder *fl_gh = &fp->f_fl_gh;
|
||||
struct gfs2_inode *ip = GFS2_I(file->f_dentry->d_inode);
|
||||
struct gfs2_glock *gl;
|
||||
unsigned int state;
|
||||
int flags;
|
||||
int error = 0;
|
||||
|
||||
state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
|
||||
flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
|
||||
|
||||
mutex_lock(&fp->f_fl_mutex);
|
||||
|
||||
gl = fl_gh->gh_gl;
|
||||
if (gl) {
|
||||
if (fl_gh->gh_state == state)
|
||||
goto out;
|
||||
gfs2_glock_hold(gl);
|
||||
flock_lock_file_wait(file,
|
||||
&(struct file_lock){.fl_type = F_UNLCK});
|
||||
gfs2_glock_dq_uninit(fl_gh);
|
||||
} else {
|
||||
error = gfs2_glock_get(GFS2_SB(&ip->i_inode),
|
||||
ip->i_num.no_addr, &gfs2_flock_glops,
|
||||
CREATE, &gl);
|
||||
if (error)
|
||||
goto out;
|
||||
}
|
||||
|
||||
gfs2_holder_init(gl, state, flags, fl_gh);
|
||||
gfs2_glock_put(gl);
|
||||
|
||||
error = gfs2_glock_nq(fl_gh);
|
||||
if (error) {
|
||||
gfs2_holder_uninit(fl_gh);
|
||||
if (error == GLR_TRYFAILED)
|
||||
error = -EAGAIN;
|
||||
} else {
|
||||
error = flock_lock_file_wait(file, fl);
|
||||
gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&fp->f_fl_mutex);
|
||||
return error;
|
||||
}
|
||||
|
||||
static void do_unflock(struct file *file, struct file_lock *fl)
|
||||
{
|
||||
struct gfs2_file *fp = file->private_data;
|
||||
struct gfs2_holder *fl_gh = &fp->f_fl_gh;
|
||||
|
||||
mutex_lock(&fp->f_fl_mutex);
|
||||
flock_lock_file_wait(file, fl);
|
||||
if (fl_gh->gh_gl)
|
||||
gfs2_glock_dq_uninit(fl_gh);
|
||||
mutex_unlock(&fp->f_fl_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_flock - acquire/release a flock lock on a file
|
||||
* @file: the file pointer
|
||||
* @cmd: either modify or retrieve lock state, possibly wait
|
||||
* @fl: type and range of lock
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
|
||||
|
||||
if (!(fl->fl_flags & FL_FLOCK))
|
||||
return -ENOLCK;
|
||||
if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
|
||||
return -ENOLCK;
|
||||
|
||||
if (sdp->sd_args.ar_localflocks)
|
||||
return flock_lock_file_wait(file, fl);
|
||||
|
||||
if (fl->fl_type == F_UNLCK) {
|
||||
do_unflock(file, fl);
|
||||
return 0;
|
||||
} else {
|
||||
return do_flock(file, cmd, fl);
|
||||
}
|
||||
}
|
||||
|
||||
const struct file_operations gfs2_file_fops = {
|
||||
.llseek = gfs2_llseek,
|
||||
.read = do_sync_read,
|
||||
.aio_read = generic_file_aio_read,
|
||||
.write = do_sync_write,
|
||||
.aio_write = generic_file_aio_write,
|
||||
.unlocked_ioctl = gfs2_ioctl,
|
||||
.mmap = gfs2_mmap,
|
||||
.open = gfs2_open,
|
||||
.release = gfs2_close,
|
||||
.fsync = gfs2_fsync,
|
||||
.lock = gfs2_lock,
|
||||
.sendfile = generic_file_sendfile,
|
||||
.flock = gfs2_flock,
|
||||
.splice_read = generic_file_splice_read,
|
||||
.splice_write = generic_file_splice_write,
|
||||
};
|
||||
|
||||
const struct file_operations gfs2_dir_fops = {
|
||||
.readdir = gfs2_readdir,
|
||||
.unlocked_ioctl = gfs2_ioctl,
|
||||
.open = gfs2_open,
|
||||
.release = gfs2_close,
|
||||
.fsync = gfs2_fsync,
|
||||
.lock = gfs2_lock,
|
||||
.flock = gfs2_flock,
|
||||
};
|
||||
|
24
fs/gfs2/ops_file.h
Normal file
24
fs/gfs2/ops_file.h
Normal file
|
@ -0,0 +1,24 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __OPS_FILE_DOT_H__
|
||||
#define __OPS_FILE_DOT_H__
|
||||
|
||||
#include <linux/fs.h>
|
||||
struct gfs2_inode;
|
||||
|
||||
extern struct file gfs2_internal_file_sentinel;
|
||||
extern int gfs2_internal_read(struct gfs2_inode *ip,
|
||||
struct file_ra_state *ra_state,
|
||||
char *buf, loff_t *pos, unsigned size);
|
||||
|
||||
extern const struct file_operations gfs2_file_fops;
|
||||
extern const struct file_operations gfs2_dir_fops;
|
||||
|
||||
#endif /* __OPS_FILE_DOT_H__ */
|
928
fs/gfs2/ops_fstype.c
Normal file
928
fs/gfs2/ops_fstype.c
Normal file
|
@ -0,0 +1,928 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/lm_interface.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
#include "daemon.h"
|
||||
#include "glock.h"
|
||||
#include "glops.h"
|
||||
#include "inode.h"
|
||||
#include "lm.h"
|
||||
#include "mount.h"
|
||||
#include "ops_export.h"
|
||||
#include "ops_fstype.h"
|
||||
#include "ops_super.h"
|
||||
#include "recovery.h"
|
||||
#include "rgrp.h"
|
||||
#include "super.h"
|
||||
#include "sys.h"
|
||||
#include "util.h"
|
||||
|
||||
#define DO 0
|
||||
#define UNDO 1
|
||||
|
||||
extern struct dentry_operations gfs2_dops;
|
||||
|
||||
static struct gfs2_sbd *init_sbd(struct super_block *sb)
|
||||
{
|
||||
struct gfs2_sbd *sdp;
|
||||
|
||||
sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL);
|
||||
if (!sdp)
|
||||
return NULL;
|
||||
|
||||
sb->s_fs_info = sdp;
|
||||
sdp->sd_vfs = sb;
|
||||
|
||||
gfs2_tune_init(&sdp->sd_tune);
|
||||
|
||||
INIT_LIST_HEAD(&sdp->sd_reclaim_list);
|
||||
spin_lock_init(&sdp->sd_reclaim_lock);
|
||||
init_waitqueue_head(&sdp->sd_reclaim_wq);
|
||||
|
||||
mutex_init(&sdp->sd_inum_mutex);
|
||||
spin_lock_init(&sdp->sd_statfs_spin);
|
||||
mutex_init(&sdp->sd_statfs_mutex);
|
||||
|
||||
spin_lock_init(&sdp->sd_rindex_spin);
|
||||
mutex_init(&sdp->sd_rindex_mutex);
|
||||
INIT_LIST_HEAD(&sdp->sd_rindex_list);
|
||||
INIT_LIST_HEAD(&sdp->sd_rindex_mru_list);
|
||||
INIT_LIST_HEAD(&sdp->sd_rindex_recent_list);
|
||||
|
||||
INIT_LIST_HEAD(&sdp->sd_jindex_list);
|
||||
spin_lock_init(&sdp->sd_jindex_spin);
|
||||
mutex_init(&sdp->sd_jindex_mutex);
|
||||
|
||||
INIT_LIST_HEAD(&sdp->sd_quota_list);
|
||||
spin_lock_init(&sdp->sd_quota_spin);
|
||||
mutex_init(&sdp->sd_quota_mutex);
|
||||
|
||||
spin_lock_init(&sdp->sd_log_lock);
|
||||
|
||||
INIT_LIST_HEAD(&sdp->sd_log_le_gl);
|
||||
INIT_LIST_HEAD(&sdp->sd_log_le_buf);
|
||||
INIT_LIST_HEAD(&sdp->sd_log_le_revoke);
|
||||
INIT_LIST_HEAD(&sdp->sd_log_le_rg);
|
||||
INIT_LIST_HEAD(&sdp->sd_log_le_databuf);
|
||||
|
||||
mutex_init(&sdp->sd_log_reserve_mutex);
|
||||
INIT_LIST_HEAD(&sdp->sd_ail1_list);
|
||||
INIT_LIST_HEAD(&sdp->sd_ail2_list);
|
||||
|
||||
init_rwsem(&sdp->sd_log_flush_lock);
|
||||
INIT_LIST_HEAD(&sdp->sd_log_flush_list);
|
||||
|
||||
INIT_LIST_HEAD(&sdp->sd_revoke_list);
|
||||
|
||||
mutex_init(&sdp->sd_freeze_lock);
|
||||
|
||||
return sdp;
|
||||
}
|
||||
|
||||
static void init_vfs(struct super_block *sb, unsigned noatime)
|
||||
{
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
|
||||
sb->s_magic = GFS2_MAGIC;
|
||||
sb->s_op = &gfs2_super_ops;
|
||||
sb->s_export_op = &gfs2_export_ops;
|
||||
sb->s_maxbytes = MAX_LFS_FILESIZE;
|
||||
|
||||
if (sb->s_flags & (MS_NOATIME | MS_NODIRATIME))
|
||||
set_bit(noatime, &sdp->sd_flags);
|
||||
|
||||
/* Don't let the VFS update atimes. GFS2 handles this itself. */
|
||||
sb->s_flags |= MS_NOATIME | MS_NODIRATIME;
|
||||
}
|
||||
|
||||
static int init_names(struct gfs2_sbd *sdp, int silent)
|
||||
{
|
||||
struct page *page;
|
||||
char *proto, *table;
|
||||
int error = 0;
|
||||
|
||||
proto = sdp->sd_args.ar_lockproto;
|
||||
table = sdp->sd_args.ar_locktable;
|
||||
|
||||
/* Try to autodetect */
|
||||
|
||||
if (!proto[0] || !table[0]) {
|
||||
struct gfs2_sb *sb;
|
||||
page = gfs2_read_super(sdp->sd_vfs, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift);
|
||||
if (!page)
|
||||
return -ENOBUFS;
|
||||
sb = kmap(page);
|
||||
gfs2_sb_in(&sdp->sd_sb, sb);
|
||||
kunmap(page);
|
||||
__free_page(page);
|
||||
|
||||
error = gfs2_check_sb(sdp, &sdp->sd_sb, silent);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
if (!proto[0])
|
||||
proto = sdp->sd_sb.sb_lockproto;
|
||||
if (!table[0])
|
||||
table = sdp->sd_sb.sb_locktable;
|
||||
}
|
||||
|
||||
if (!table[0])
|
||||
table = sdp->sd_vfs->s_id;
|
||||
|
||||
snprintf(sdp->sd_proto_name, GFS2_FSNAME_LEN, "%s", proto);
|
||||
snprintf(sdp->sd_table_name, GFS2_FSNAME_LEN, "%s", table);
|
||||
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
|
||||
int undo)
|
||||
{
|
||||
struct task_struct *p;
|
||||
int error = 0;
|
||||
|
||||
if (undo)
|
||||
goto fail_trans;
|
||||
|
||||
p = kthread_run(gfs2_scand, sdp, "gfs2_scand");
|
||||
error = IS_ERR(p);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't start scand thread: %d\n", error);
|
||||
return error;
|
||||
}
|
||||
sdp->sd_scand_process = p;
|
||||
|
||||
for (sdp->sd_glockd_num = 0;
|
||||
sdp->sd_glockd_num < sdp->sd_args.ar_num_glockd;
|
||||
sdp->sd_glockd_num++) {
|
||||
p = kthread_run(gfs2_glockd, sdp, "gfs2_glockd");
|
||||
error = IS_ERR(p);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't start glockd thread: %d\n", error);
|
||||
goto fail;
|
||||
}
|
||||
sdp->sd_glockd_process[sdp->sd_glockd_num] = p;
|
||||
}
|
||||
|
||||
error = gfs2_glock_nq_num(sdp,
|
||||
GFS2_MOUNT_LOCK, &gfs2_nondisk_glops,
|
||||
LM_ST_EXCLUSIVE, LM_FLAG_NOEXP | GL_NOCACHE,
|
||||
mount_gh);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't acquire mount glock: %d\n", error);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
error = gfs2_glock_nq_num(sdp,
|
||||
GFS2_LIVE_LOCK, &gfs2_nondisk_glops,
|
||||
LM_ST_SHARED,
|
||||
LM_FLAG_NOEXP | GL_EXACT,
|
||||
&sdp->sd_live_gh);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't acquire live glock: %d\n", error);
|
||||
goto fail_mount;
|
||||
}
|
||||
|
||||
error = gfs2_glock_get(sdp, GFS2_RENAME_LOCK, &gfs2_nondisk_glops,
|
||||
CREATE, &sdp->sd_rename_gl);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't create rename glock: %d\n", error);
|
||||
goto fail_live;
|
||||
}
|
||||
|
||||
error = gfs2_glock_get(sdp, GFS2_TRANS_LOCK, &gfs2_trans_glops,
|
||||
CREATE, &sdp->sd_trans_gl);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't create transaction glock: %d\n", error);
|
||||
goto fail_rename;
|
||||
}
|
||||
set_bit(GLF_STICKY, &sdp->sd_trans_gl->gl_flags);
|
||||
|
||||
return 0;
|
||||
|
||||
fail_trans:
|
||||
gfs2_glock_put(sdp->sd_trans_gl);
|
||||
fail_rename:
|
||||
gfs2_glock_put(sdp->sd_rename_gl);
|
||||
fail_live:
|
||||
gfs2_glock_dq_uninit(&sdp->sd_live_gh);
|
||||
fail_mount:
|
||||
gfs2_glock_dq_uninit(mount_gh);
|
||||
fail:
|
||||
while (sdp->sd_glockd_num--)
|
||||
kthread_stop(sdp->sd_glockd_process[sdp->sd_glockd_num]);
|
||||
|
||||
kthread_stop(sdp->sd_scand_process);
|
||||
return error;
|
||||
}
|
||||
|
||||
static struct inode *gfs2_lookup_root(struct super_block *sb,
|
||||
struct gfs2_inum *inum)
|
||||
{
|
||||
return gfs2_inode_lookup(sb, inum, DT_DIR);
|
||||
}
|
||||
|
||||
static int init_sb(struct gfs2_sbd *sdp, int silent, int undo)
|
||||
{
|
||||
struct super_block *sb = sdp->sd_vfs;
|
||||
struct gfs2_holder sb_gh;
|
||||
struct gfs2_inum *inum;
|
||||
struct inode *inode;
|
||||
int error = 0;
|
||||
|
||||
if (undo) {
|
||||
if (sb->s_root) {
|
||||
dput(sb->s_root);
|
||||
sb->s_root = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
error = gfs2_glock_nq_num(sdp, GFS2_SB_LOCK, &gfs2_meta_glops,
|
||||
LM_ST_SHARED, 0, &sb_gh);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't acquire superblock glock: %d\n", error);
|
||||
return error;
|
||||
}
|
||||
|
||||
error = gfs2_read_sb(sdp, sb_gh.gh_gl, silent);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't read superblock: %d\n", error);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Set up the buffer cache and SB for real */
|
||||
if (sdp->sd_sb.sb_bsize < bdev_hardsect_size(sb->s_bdev)) {
|
||||
error = -EINVAL;
|
||||
fs_err(sdp, "FS block size (%u) is too small for device "
|
||||
"block size (%u)\n",
|
||||
sdp->sd_sb.sb_bsize, bdev_hardsect_size(sb->s_bdev));
|
||||
goto out;
|
||||
}
|
||||
if (sdp->sd_sb.sb_bsize > PAGE_SIZE) {
|
||||
error = -EINVAL;
|
||||
fs_err(sdp, "FS block size (%u) is too big for machine "
|
||||
"page size (%u)\n",
|
||||
sdp->sd_sb.sb_bsize, (unsigned int)PAGE_SIZE);
|
||||
goto out;
|
||||
}
|
||||
sb_set_blocksize(sb, sdp->sd_sb.sb_bsize);
|
||||
|
||||
/* Get the root inode */
|
||||
inum = &sdp->sd_sb.sb_root_dir;
|
||||
if (sb->s_type == &gfs2meta_fs_type)
|
||||
inum = &sdp->sd_sb.sb_master_dir;
|
||||
inode = gfs2_lookup_root(sb, inum);
|
||||
if (IS_ERR(inode)) {
|
||||
error = PTR_ERR(inode);
|
||||
fs_err(sdp, "can't read in root inode: %d\n", error);
|
||||
goto out;
|
||||
}
|
||||
|
||||
sb->s_root = d_alloc_root(inode);
|
||||
if (!sb->s_root) {
|
||||
fs_err(sdp, "can't get root dentry\n");
|
||||
error = -ENOMEM;
|
||||
iput(inode);
|
||||
}
|
||||
sb->s_root->d_op = &gfs2_dops;
|
||||
out:
|
||||
gfs2_glock_dq_uninit(&sb_gh);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int init_journal(struct gfs2_sbd *sdp, int undo)
|
||||
{
|
||||
struct gfs2_holder ji_gh;
|
||||
struct task_struct *p;
|
||||
struct gfs2_inode *ip;
|
||||
int jindex = 1;
|
||||
int error = 0;
|
||||
|
||||
if (undo) {
|
||||
jindex = 0;
|
||||
goto fail_recoverd;
|
||||
}
|
||||
|
||||
sdp->sd_jindex = gfs2_lookup_simple(sdp->sd_master_dir, "jindex");
|
||||
if (IS_ERR(sdp->sd_jindex)) {
|
||||
fs_err(sdp, "can't lookup journal index: %d\n", error);
|
||||
return PTR_ERR(sdp->sd_jindex);
|
||||
}
|
||||
ip = GFS2_I(sdp->sd_jindex);
|
||||
set_bit(GLF_STICKY, &ip->i_gl->gl_flags);
|
||||
|
||||
/* Load in the journal index special file */
|
||||
|
||||
error = gfs2_jindex_hold(sdp, &ji_gh);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't read journal index: %d\n", error);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
error = -EINVAL;
|
||||
if (!gfs2_jindex_size(sdp)) {
|
||||
fs_err(sdp, "no journals!\n");
|
||||
goto fail_jindex;
|
||||
}
|
||||
|
||||
if (sdp->sd_args.ar_spectator) {
|
||||
sdp->sd_jdesc = gfs2_jdesc_find(sdp, 0);
|
||||
sdp->sd_log_blks_free = sdp->sd_jdesc->jd_blocks;
|
||||
} else {
|
||||
if (sdp->sd_lockstruct.ls_jid >= gfs2_jindex_size(sdp)) {
|
||||
fs_err(sdp, "can't mount journal #%u\n",
|
||||
sdp->sd_lockstruct.ls_jid);
|
||||
fs_err(sdp, "there are only %u journals (0 - %u)\n",
|
||||
gfs2_jindex_size(sdp),
|
||||
gfs2_jindex_size(sdp) - 1);
|
||||
goto fail_jindex;
|
||||
}
|
||||
sdp->sd_jdesc = gfs2_jdesc_find(sdp, sdp->sd_lockstruct.ls_jid);
|
||||
|
||||
error = gfs2_glock_nq_num(sdp, sdp->sd_lockstruct.ls_jid,
|
||||
&gfs2_journal_glops,
|
||||
LM_ST_EXCLUSIVE, LM_FLAG_NOEXP,
|
||||
&sdp->sd_journal_gh);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't acquire journal glock: %d\n", error);
|
||||
goto fail_jindex;
|
||||
}
|
||||
|
||||
ip = GFS2_I(sdp->sd_jdesc->jd_inode);
|
||||
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
|
||||
LM_FLAG_NOEXP | GL_EXACT,
|
||||
&sdp->sd_jinode_gh);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't acquire journal inode glock: %d\n",
|
||||
error);
|
||||
goto fail_journal_gh;
|
||||
}
|
||||
|
||||
error = gfs2_jdesc_check(sdp->sd_jdesc);
|
||||
if (error) {
|
||||
fs_err(sdp, "my journal (%u) is bad: %d\n",
|
||||
sdp->sd_jdesc->jd_jid, error);
|
||||
goto fail_jinode_gh;
|
||||
}
|
||||
sdp->sd_log_blks_free = sdp->sd_jdesc->jd_blocks;
|
||||
}
|
||||
|
||||
if (sdp->sd_lockstruct.ls_first) {
|
||||
unsigned int x;
|
||||
for (x = 0; x < sdp->sd_journals; x++) {
|
||||
error = gfs2_recover_journal(gfs2_jdesc_find(sdp, x));
|
||||
if (error) {
|
||||
fs_err(sdp, "error recovering journal %u: %d\n",
|
||||
x, error);
|
||||
goto fail_jinode_gh;
|
||||
}
|
||||
}
|
||||
|
||||
gfs2_lm_others_may_mount(sdp);
|
||||
} else if (!sdp->sd_args.ar_spectator) {
|
||||
error = gfs2_recover_journal(sdp->sd_jdesc);
|
||||
if (error) {
|
||||
fs_err(sdp, "error recovering my journal: %d\n", error);
|
||||
goto fail_jinode_gh;
|
||||
}
|
||||
}
|
||||
|
||||
set_bit(SDF_JOURNAL_CHECKED, &sdp->sd_flags);
|
||||
gfs2_glock_dq_uninit(&ji_gh);
|
||||
jindex = 0;
|
||||
|
||||
p = kthread_run(gfs2_recoverd, sdp, "gfs2_recoverd");
|
||||
error = IS_ERR(p);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't start recoverd thread: %d\n", error);
|
||||
goto fail_jinode_gh;
|
||||
}
|
||||
sdp->sd_recoverd_process = p;
|
||||
|
||||
return 0;
|
||||
|
||||
fail_recoverd:
|
||||
kthread_stop(sdp->sd_recoverd_process);
|
||||
fail_jinode_gh:
|
||||
if (!sdp->sd_args.ar_spectator)
|
||||
gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
|
||||
fail_journal_gh:
|
||||
if (!sdp->sd_args.ar_spectator)
|
||||
gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
|
||||
fail_jindex:
|
||||
gfs2_jindex_free(sdp);
|
||||
if (jindex)
|
||||
gfs2_glock_dq_uninit(&ji_gh);
|
||||
fail:
|
||||
iput(sdp->sd_jindex);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
static int init_inodes(struct gfs2_sbd *sdp, int undo)
|
||||
{
|
||||
int error = 0;
|
||||
struct gfs2_inode *ip;
|
||||
struct inode *inode;
|
||||
|
||||
if (undo)
|
||||
goto fail_qinode;
|
||||
|
||||
inode = gfs2_lookup_root(sdp->sd_vfs, &sdp->sd_sb.sb_master_dir);
|
||||
if (IS_ERR(inode)) {
|
||||
error = PTR_ERR(inode);
|
||||
fs_err(sdp, "can't read in master directory: %d\n", error);
|
||||
goto fail;
|
||||
}
|
||||
sdp->sd_master_dir = inode;
|
||||
|
||||
error = init_journal(sdp, undo);
|
||||
if (error)
|
||||
goto fail_master;
|
||||
|
||||
/* Read in the master inode number inode */
|
||||
sdp->sd_inum_inode = gfs2_lookup_simple(sdp->sd_master_dir, "inum");
|
||||
if (IS_ERR(sdp->sd_inum_inode)) {
|
||||
error = PTR_ERR(sdp->sd_inum_inode);
|
||||
fs_err(sdp, "can't read in inum inode: %d\n", error);
|
||||
goto fail_journal;
|
||||
}
|
||||
|
||||
|
||||
/* Read in the master statfs inode */
|
||||
sdp->sd_statfs_inode = gfs2_lookup_simple(sdp->sd_master_dir, "statfs");
|
||||
if (IS_ERR(sdp->sd_statfs_inode)) {
|
||||
error = PTR_ERR(sdp->sd_statfs_inode);
|
||||
fs_err(sdp, "can't read in statfs inode: %d\n", error);
|
||||
goto fail_inum;
|
||||
}
|
||||
|
||||
/* Read in the resource index inode */
|
||||
sdp->sd_rindex = gfs2_lookup_simple(sdp->sd_master_dir, "rindex");
|
||||
if (IS_ERR(sdp->sd_rindex)) {
|
||||
error = PTR_ERR(sdp->sd_rindex);
|
||||
fs_err(sdp, "can't get resource index inode: %d\n", error);
|
||||
goto fail_statfs;
|
||||
}
|
||||
ip = GFS2_I(sdp->sd_rindex);
|
||||
set_bit(GLF_STICKY, &ip->i_gl->gl_flags);
|
||||
sdp->sd_rindex_vn = ip->i_gl->gl_vn - 1;
|
||||
|
||||
/* Read in the quota inode */
|
||||
sdp->sd_quota_inode = gfs2_lookup_simple(sdp->sd_master_dir, "quota");
|
||||
if (IS_ERR(sdp->sd_quota_inode)) {
|
||||
error = PTR_ERR(sdp->sd_quota_inode);
|
||||
fs_err(sdp, "can't get quota file inode: %d\n", error);
|
||||
goto fail_rindex;
|
||||
}
|
||||
return 0;
|
||||
|
||||
fail_qinode:
|
||||
iput(sdp->sd_quota_inode);
|
||||
fail_rindex:
|
||||
gfs2_clear_rgrpd(sdp);
|
||||
iput(sdp->sd_rindex);
|
||||
fail_statfs:
|
||||
iput(sdp->sd_statfs_inode);
|
||||
fail_inum:
|
||||
iput(sdp->sd_inum_inode);
|
||||
fail_journal:
|
||||
init_journal(sdp, UNDO);
|
||||
fail_master:
|
||||
iput(sdp->sd_master_dir);
|
||||
fail:
|
||||
return error;
|
||||
}
|
||||
|
||||
static int init_per_node(struct gfs2_sbd *sdp, int undo)
|
||||
{
|
||||
struct inode *pn = NULL;
|
||||
char buf[30];
|
||||
int error = 0;
|
||||
struct gfs2_inode *ip;
|
||||
|
||||
if (sdp->sd_args.ar_spectator)
|
||||
return 0;
|
||||
|
||||
if (undo)
|
||||
goto fail_qc_gh;
|
||||
|
||||
pn = gfs2_lookup_simple(sdp->sd_master_dir, "per_node");
|
||||
if (IS_ERR(pn)) {
|
||||
error = PTR_ERR(pn);
|
||||
fs_err(sdp, "can't find per_node directory: %d\n", error);
|
||||
return error;
|
||||
}
|
||||
|
||||
sprintf(buf, "inum_range%u", sdp->sd_jdesc->jd_jid);
|
||||
sdp->sd_ir_inode = gfs2_lookup_simple(pn, buf);
|
||||
if (IS_ERR(sdp->sd_ir_inode)) {
|
||||
error = PTR_ERR(sdp->sd_ir_inode);
|
||||
fs_err(sdp, "can't find local \"ir\" file: %d\n", error);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
sprintf(buf, "statfs_change%u", sdp->sd_jdesc->jd_jid);
|
||||
sdp->sd_sc_inode = gfs2_lookup_simple(pn, buf);
|
||||
if (IS_ERR(sdp->sd_sc_inode)) {
|
||||
error = PTR_ERR(sdp->sd_sc_inode);
|
||||
fs_err(sdp, "can't find local \"sc\" file: %d\n", error);
|
||||
goto fail_ir_i;
|
||||
}
|
||||
|
||||
sprintf(buf, "quota_change%u", sdp->sd_jdesc->jd_jid);
|
||||
sdp->sd_qc_inode = gfs2_lookup_simple(pn, buf);
|
||||
if (IS_ERR(sdp->sd_qc_inode)) {
|
||||
error = PTR_ERR(sdp->sd_qc_inode);
|
||||
fs_err(sdp, "can't find local \"qc\" file: %d\n", error);
|
||||
goto fail_ut_i;
|
||||
}
|
||||
|
||||
iput(pn);
|
||||
pn = NULL;
|
||||
|
||||
ip = GFS2_I(sdp->sd_ir_inode);
|
||||
error = gfs2_glock_nq_init(ip->i_gl,
|
||||
LM_ST_EXCLUSIVE, 0,
|
||||
&sdp->sd_ir_gh);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't lock local \"ir\" file: %d\n", error);
|
||||
goto fail_qc_i;
|
||||
}
|
||||
|
||||
ip = GFS2_I(sdp->sd_sc_inode);
|
||||
error = gfs2_glock_nq_init(ip->i_gl,
|
||||
LM_ST_EXCLUSIVE, 0,
|
||||
&sdp->sd_sc_gh);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't lock local \"sc\" file: %d\n", error);
|
||||
goto fail_ir_gh;
|
||||
}
|
||||
|
||||
ip = GFS2_I(sdp->sd_qc_inode);
|
||||
error = gfs2_glock_nq_init(ip->i_gl,
|
||||
LM_ST_EXCLUSIVE, 0,
|
||||
&sdp->sd_qc_gh);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't lock local \"qc\" file: %d\n", error);
|
||||
goto fail_ut_gh;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail_qc_gh:
|
||||
gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
|
||||
fail_ut_gh:
|
||||
gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
|
||||
fail_ir_gh:
|
||||
gfs2_glock_dq_uninit(&sdp->sd_ir_gh);
|
||||
fail_qc_i:
|
||||
iput(sdp->sd_qc_inode);
|
||||
fail_ut_i:
|
||||
iput(sdp->sd_sc_inode);
|
||||
fail_ir_i:
|
||||
iput(sdp->sd_ir_inode);
|
||||
fail:
|
||||
if (pn)
|
||||
iput(pn);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int init_threads(struct gfs2_sbd *sdp, int undo)
|
||||
{
|
||||
struct task_struct *p;
|
||||
int error = 0;
|
||||
|
||||
if (undo)
|
||||
goto fail_quotad;
|
||||
|
||||
sdp->sd_log_flush_time = jiffies;
|
||||
sdp->sd_jindex_refresh_time = jiffies;
|
||||
|
||||
p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
|
||||
error = IS_ERR(p);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't start logd thread: %d\n", error);
|
||||
return error;
|
||||
}
|
||||
sdp->sd_logd_process = p;
|
||||
|
||||
sdp->sd_statfs_sync_time = jiffies;
|
||||
sdp->sd_quota_sync_time = jiffies;
|
||||
|
||||
p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
|
||||
error = IS_ERR(p);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't start quotad thread: %d\n", error);
|
||||
goto fail;
|
||||
}
|
||||
sdp->sd_quotad_process = p;
|
||||
|
||||
return 0;
|
||||
|
||||
|
||||
fail_quotad:
|
||||
kthread_stop(sdp->sd_quotad_process);
|
||||
fail:
|
||||
kthread_stop(sdp->sd_logd_process);
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* fill_super - Read in superblock
|
||||
* @sb: The VFS superblock
|
||||
* @data: Mount options
|
||||
* @silent: Don't complain if it's not a GFS2 filesystem
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
static int fill_super(struct super_block *sb, void *data, int silent)
|
||||
{
|
||||
struct gfs2_sbd *sdp;
|
||||
struct gfs2_holder mount_gh;
|
||||
int error;
|
||||
|
||||
sdp = init_sbd(sb);
|
||||
if (!sdp) {
|
||||
printk(KERN_WARNING "GFS2: can't alloc struct gfs2_sbd\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
error = gfs2_mount_args(sdp, (char *)data, 0);
|
||||
if (error) {
|
||||
printk(KERN_WARNING "GFS2: can't parse mount arguments\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
init_vfs(sb, SDF_NOATIME);
|
||||
|
||||
/* Set up the buffer cache and fill in some fake block size values
|
||||
to allow us to read-in the on-disk superblock. */
|
||||
sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, GFS2_BASIC_BLOCK);
|
||||
sdp->sd_sb.sb_bsize_shift = sb->s_blocksize_bits;
|
||||
sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
|
||||
GFS2_BASIC_BLOCK_SHIFT;
|
||||
sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift;
|
||||
|
||||
error = init_names(sdp, silent);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
error = gfs2_sys_fs_add(sdp);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
error = gfs2_lm_mount(sdp, silent);
|
||||
if (error)
|
||||
goto fail_sys;
|
||||
|
||||
error = init_locking(sdp, &mount_gh, DO);
|
||||
if (error)
|
||||
goto fail_lm;
|
||||
|
||||
error = init_sb(sdp, silent, DO);
|
||||
if (error)
|
||||
goto fail_locking;
|
||||
|
||||
error = init_inodes(sdp, DO);
|
||||
if (error)
|
||||
goto fail_sb;
|
||||
|
||||
error = init_per_node(sdp, DO);
|
||||
if (error)
|
||||
goto fail_inodes;
|
||||
|
||||
error = gfs2_statfs_init(sdp);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't initialize statfs subsystem: %d\n", error);
|
||||
goto fail_per_node;
|
||||
}
|
||||
|
||||
error = init_threads(sdp, DO);
|
||||
if (error)
|
||||
goto fail_per_node;
|
||||
|
||||
if (!(sb->s_flags & MS_RDONLY)) {
|
||||
error = gfs2_make_fs_rw(sdp);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't make FS RW: %d\n", error);
|
||||
goto fail_threads;
|
||||
}
|
||||
}
|
||||
|
||||
gfs2_glock_dq_uninit(&mount_gh);
|
||||
|
||||
return 0;
|
||||
|
||||
fail_threads:
|
||||
init_threads(sdp, UNDO);
|
||||
fail_per_node:
|
||||
init_per_node(sdp, UNDO);
|
||||
fail_inodes:
|
||||
init_inodes(sdp, UNDO);
|
||||
fail_sb:
|
||||
init_sb(sdp, 0, UNDO);
|
||||
fail_locking:
|
||||
init_locking(sdp, &mount_gh, UNDO);
|
||||
fail_lm:
|
||||
gfs2_gl_hash_clear(sdp, WAIT);
|
||||
gfs2_lm_unmount(sdp);
|
||||
while (invalidate_inodes(sb))
|
||||
yield();
|
||||
fail_sys:
|
||||
gfs2_sys_fs_del(sdp);
|
||||
fail:
|
||||
kfree(sdp);
|
||||
sb->s_fs_info = NULL;
|
||||
return error;
|
||||
}
|
||||
|
||||
static int gfs2_get_sb(struct file_system_type *fs_type, int flags,
|
||||
const char *dev_name, void *data, struct vfsmount *mnt)
|
||||
{
|
||||
struct super_block *sb;
|
||||
struct gfs2_sbd *sdp;
|
||||
int error = get_sb_bdev(fs_type, flags, dev_name, data, fill_super, mnt);
|
||||
if (error)
|
||||
goto out;
|
||||
sb = mnt->mnt_sb;
|
||||
sdp = sb->s_fs_info;
|
||||
sdp->sd_gfs2mnt = mnt;
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
static int fill_super_meta(struct super_block *sb, struct super_block *new,
|
||||
void *data, int silent)
|
||||
{
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
struct inode *inode;
|
||||
int error = 0;
|
||||
|
||||
new->s_fs_info = sdp;
|
||||
sdp->sd_vfs_meta = sb;
|
||||
|
||||
init_vfs(new, SDF_NOATIME);
|
||||
|
||||
/* Get the master inode */
|
||||
inode = igrab(sdp->sd_master_dir);
|
||||
|
||||
new->s_root = d_alloc_root(inode);
|
||||
if (!new->s_root) {
|
||||
fs_err(sdp, "can't get root dentry\n");
|
||||
error = -ENOMEM;
|
||||
iput(inode);
|
||||
}
|
||||
new->s_root->d_op = &gfs2_dops;
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
static int set_bdev_super(struct super_block *s, void *data)
|
||||
{
|
||||
s->s_bdev = data;
|
||||
s->s_dev = s->s_bdev->bd_dev;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int test_bdev_super(struct super_block *s, void *data)
|
||||
{
|
||||
return s->s_bdev == data;
|
||||
}
|
||||
|
||||
static struct super_block* get_gfs2_sb(const char *dev_name)
|
||||
{
|
||||
struct kstat stat;
|
||||
struct nameidata nd;
|
||||
struct file_system_type *fstype;
|
||||
struct super_block *sb = NULL, *s;
|
||||
struct list_head *l;
|
||||
int error;
|
||||
|
||||
error = path_lookup(dev_name, LOOKUP_FOLLOW, &nd);
|
||||
if (error) {
|
||||
printk(KERN_WARNING "GFS2: path_lookup on %s returned error\n",
|
||||
dev_name);
|
||||
goto out;
|
||||
}
|
||||
error = vfs_getattr(nd.mnt, nd.dentry, &stat);
|
||||
|
||||
fstype = get_fs_type("gfs2");
|
||||
list_for_each(l, &fstype->fs_supers) {
|
||||
s = list_entry(l, struct super_block, s_instances);
|
||||
if ((S_ISBLK(stat.mode) && s->s_dev == stat.rdev) ||
|
||||
(S_ISDIR(stat.mode) && s == nd.dentry->d_inode->i_sb)) {
|
||||
sb = s;
|
||||
goto free_nd;
|
||||
}
|
||||
}
|
||||
|
||||
printk(KERN_WARNING "GFS2: Unrecognized block device or "
|
||||
"mount point %s", dev_name);
|
||||
|
||||
free_nd:
|
||||
path_release(&nd);
|
||||
out:
|
||||
return sb;
|
||||
}
|
||||
|
||||
static int gfs2_get_sb_meta(struct file_system_type *fs_type, int flags,
|
||||
const char *dev_name, void *data, struct vfsmount *mnt)
|
||||
{
|
||||
int error = 0;
|
||||
struct super_block *sb = NULL, *new;
|
||||
struct gfs2_sbd *sdp;
|
||||
char *gfs2mnt = NULL;
|
||||
|
||||
sb = get_gfs2_sb(dev_name);
|
||||
if (!sb) {
|
||||
printk(KERN_WARNING "GFS2: gfs2 mount does not exist\n");
|
||||
error = -ENOENT;
|
||||
goto error;
|
||||
}
|
||||
sdp = (struct gfs2_sbd*) sb->s_fs_info;
|
||||
if (sdp->sd_vfs_meta) {
|
||||
printk(KERN_WARNING "GFS2: gfs2meta mount already exists\n");
|
||||
error = -EBUSY;
|
||||
goto error;
|
||||
}
|
||||
mutex_lock(&sb->s_bdev->bd_mount_mutex);
|
||||
new = sget(fs_type, test_bdev_super, set_bdev_super, sb->s_bdev);
|
||||
mutex_unlock(&sb->s_bdev->bd_mount_mutex);
|
||||
if (IS_ERR(new)) {
|
||||
error = PTR_ERR(new);
|
||||
goto error;
|
||||
}
|
||||
module_put(fs_type->owner);
|
||||
new->s_flags = flags;
|
||||
strlcpy(new->s_id, sb->s_id, sizeof(new->s_id));
|
||||
sb_set_blocksize(new, sb->s_blocksize);
|
||||
error = fill_super_meta(sb, new, data, flags & MS_SILENT ? 1 : 0);
|
||||
if (error) {
|
||||
up_write(&new->s_umount);
|
||||
deactivate_super(new);
|
||||
goto error;
|
||||
}
|
||||
|
||||
new->s_flags |= MS_ACTIVE;
|
||||
|
||||
/* Grab a reference to the gfs2 mount point */
|
||||
atomic_inc(&sdp->sd_gfs2mnt->mnt_count);
|
||||
return simple_set_mnt(mnt, new);
|
||||
error:
|
||||
if (gfs2mnt)
|
||||
kfree(gfs2mnt);
|
||||
return error;
|
||||
}
|
||||
|
||||
static void gfs2_kill_sb(struct super_block *sb)
|
||||
{
|
||||
kill_block_super(sb);
|
||||
}
|
||||
|
||||
static void gfs2_kill_sb_meta(struct super_block *sb)
|
||||
{
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
generic_shutdown_super(sb);
|
||||
sdp->sd_vfs_meta = NULL;
|
||||
atomic_dec(&sdp->sd_gfs2mnt->mnt_count);
|
||||
}
|
||||
|
||||
struct file_system_type gfs2_fs_type = {
|
||||
.name = "gfs2",
|
||||
.fs_flags = FS_REQUIRES_DEV,
|
||||
.get_sb = gfs2_get_sb,
|
||||
.kill_sb = gfs2_kill_sb,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
struct file_system_type gfs2meta_fs_type = {
|
||||
.name = "gfs2meta",
|
||||
.fs_flags = FS_REQUIRES_DEV,
|
||||
.get_sb = gfs2_get_sb_meta,
|
||||
.kill_sb = gfs2_kill_sb_meta,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
18
fs/gfs2/ops_fstype.h
Normal file
18
fs/gfs2/ops_fstype.h
Normal file
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __OPS_FSTYPE_DOT_H__
|
||||
#define __OPS_FSTYPE_DOT_H__
|
||||
|
||||
#include <linux/fs.h>
|
||||
|
||||
extern struct file_system_type gfs2_fs_type;
|
||||
extern struct file_system_type gfs2meta_fs_type;
|
||||
|
||||
#endif /* __OPS_FSTYPE_DOT_H__ */
|
1151
fs/gfs2/ops_inode.c
Normal file
1151
fs/gfs2/ops_inode.c
Normal file
File diff suppressed because it is too large
Load diff
20
fs/gfs2/ops_inode.h
Normal file
20
fs/gfs2/ops_inode.h
Normal file
|
@ -0,0 +1,20 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __OPS_INODE_DOT_H__
|
||||
#define __OPS_INODE_DOT_H__
|
||||
|
||||
#include <linux/fs.h>
|
||||
|
||||
extern struct inode_operations gfs2_file_iops;
|
||||
extern struct inode_operations gfs2_dir_iops;
|
||||
extern struct inode_operations gfs2_symlink_iops;
|
||||
extern struct inode_operations gfs2_dev_iops;
|
||||
|
||||
#endif /* __OPS_INODE_DOT_H__ */
|
468
fs/gfs2/ops_super.c
Normal file
468
fs/gfs2/ops_super.c
Normal file
|
@ -0,0 +1,468 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/statfs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/lm_interface.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
#include "glock.h"
|
||||
#include "inode.h"
|
||||
#include "lm.h"
|
||||
#include "log.h"
|
||||
#include "mount.h"
|
||||
#include "ops_super.h"
|
||||
#include "quota.h"
|
||||
#include "recovery.h"
|
||||
#include "rgrp.h"
|
||||
#include "super.h"
|
||||
#include "sys.h"
|
||||
#include "util.h"
|
||||
#include "trans.h"
|
||||
#include "dir.h"
|
||||
#include "eattr.h"
|
||||
#include "bmap.h"
|
||||
|
||||
/**
|
||||
* gfs2_write_inode - Make sure the inode is stable on the disk
|
||||
* @inode: The inode
|
||||
* @sync: synchronous write flag
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
static int gfs2_write_inode(struct inode *inode, int sync)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
|
||||
/* Check this is a "normal" inode */
|
||||
if (inode->i_private) {
|
||||
if (current->flags & PF_MEMALLOC)
|
||||
return 0;
|
||||
if (sync)
|
||||
gfs2_log_flush(GFS2_SB(inode), ip->i_gl);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_put_super - Unmount the filesystem
|
||||
* @sb: The VFS superblock
|
||||
*
|
||||
*/
|
||||
|
||||
static void gfs2_put_super(struct super_block *sb)
|
||||
{
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
int error;
|
||||
|
||||
if (!sdp)
|
||||
return;
|
||||
|
||||
if (!strncmp(sb->s_type->name, "gfs2meta", 8))
|
||||
return; /* Nothing to do */
|
||||
|
||||
/* Unfreeze the filesystem, if we need to */
|
||||
|
||||
mutex_lock(&sdp->sd_freeze_lock);
|
||||
if (sdp->sd_freeze_count)
|
||||
gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
|
||||
mutex_unlock(&sdp->sd_freeze_lock);
|
||||
|
||||
kthread_stop(sdp->sd_quotad_process);
|
||||
kthread_stop(sdp->sd_logd_process);
|
||||
kthread_stop(sdp->sd_recoverd_process);
|
||||
while (sdp->sd_glockd_num--)
|
||||
kthread_stop(sdp->sd_glockd_process[sdp->sd_glockd_num]);
|
||||
kthread_stop(sdp->sd_scand_process);
|
||||
|
||||
if (!(sb->s_flags & MS_RDONLY)) {
|
||||
error = gfs2_make_fs_ro(sdp);
|
||||
if (error)
|
||||
gfs2_io_error(sdp);
|
||||
}
|
||||
/* At this point, we're through modifying the disk */
|
||||
|
||||
/* Release stuff */
|
||||
|
||||
iput(sdp->sd_master_dir);
|
||||
iput(sdp->sd_jindex);
|
||||
iput(sdp->sd_inum_inode);
|
||||
iput(sdp->sd_statfs_inode);
|
||||
iput(sdp->sd_rindex);
|
||||
iput(sdp->sd_quota_inode);
|
||||
|
||||
gfs2_glock_put(sdp->sd_rename_gl);
|
||||
gfs2_glock_put(sdp->sd_trans_gl);
|
||||
|
||||
if (!sdp->sd_args.ar_spectator) {
|
||||
gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
|
||||
gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
|
||||
gfs2_glock_dq_uninit(&sdp->sd_ir_gh);
|
||||
gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
|
||||
gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
|
||||
iput(sdp->sd_ir_inode);
|
||||
iput(sdp->sd_sc_inode);
|
||||
iput(sdp->sd_qc_inode);
|
||||
}
|
||||
|
||||
gfs2_glock_dq_uninit(&sdp->sd_live_gh);
|
||||
gfs2_clear_rgrpd(sdp);
|
||||
gfs2_jindex_free(sdp);
|
||||
/* Take apart glock structures and buffer lists */
|
||||
gfs2_gl_hash_clear(sdp, WAIT);
|
||||
/* Unmount the locking protocol */
|
||||
gfs2_lm_unmount(sdp);
|
||||
|
||||
/* At this point, we're through participating in the lockspace */
|
||||
gfs2_sys_fs_del(sdp);
|
||||
kfree(sdp);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_write_super - disk commit all incore transactions
|
||||
* @sb: the filesystem
|
||||
*
|
||||
* This function is called every time sync(2) is called.
|
||||
* After this exits, all dirty buffers are synced.
|
||||
*/
|
||||
|
||||
static void gfs2_write_super(struct super_block *sb)
|
||||
{
|
||||
gfs2_log_flush(sb->s_fs_info, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_write_super_lockfs - prevent further writes to the filesystem
|
||||
* @sb: the VFS structure for the filesystem
|
||||
*
|
||||
*/
|
||||
|
||||
static void gfs2_write_super_lockfs(struct super_block *sb)
|
||||
{
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
int error;
|
||||
|
||||
for (;;) {
|
||||
error = gfs2_freeze_fs(sdp);
|
||||
if (!error)
|
||||
break;
|
||||
|
||||
switch (error) {
|
||||
case -EBUSY:
|
||||
fs_err(sdp, "waiting for recovery before freeze\n");
|
||||
break;
|
||||
|
||||
default:
|
||||
fs_err(sdp, "error freezing FS: %d\n", error);
|
||||
break;
|
||||
}
|
||||
|
||||
fs_err(sdp, "retrying...\n");
|
||||
msleep(1000);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_unlockfs - reallow writes to the filesystem
|
||||
* @sb: the VFS structure for the filesystem
|
||||
*
|
||||
*/
|
||||
|
||||
static void gfs2_unlockfs(struct super_block *sb)
|
||||
{
|
||||
gfs2_unfreeze_fs(sb->s_fs_info);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_statfs - Gather and return stats about the filesystem
|
||||
* @sb: The superblock
|
||||
* @statfsbuf: The buffer
|
||||
*
|
||||
* Returns: 0 on success or error code
|
||||
*/
|
||||
|
||||
static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
|
||||
{
|
||||
struct super_block *sb = dentry->d_inode->i_sb;
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
struct gfs2_statfs_change sc;
|
||||
int error;
|
||||
|
||||
if (gfs2_tune_get(sdp, gt_statfs_slow))
|
||||
error = gfs2_statfs_slow(sdp, &sc);
|
||||
else
|
||||
error = gfs2_statfs_i(sdp, &sc);
|
||||
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
buf->f_type = GFS2_MAGIC;
|
||||
buf->f_bsize = sdp->sd_sb.sb_bsize;
|
||||
buf->f_blocks = sc.sc_total;
|
||||
buf->f_bfree = sc.sc_free;
|
||||
buf->f_bavail = sc.sc_free;
|
||||
buf->f_files = sc.sc_dinodes + sc.sc_free;
|
||||
buf->f_ffree = sc.sc_free;
|
||||
buf->f_namelen = GFS2_FNAMESIZE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_remount_fs - called when the FS is remounted
|
||||
* @sb: the filesystem
|
||||
* @flags: the remount flags
|
||||
* @data: extra data passed in (not used right now)
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
int error;
|
||||
|
||||
error = gfs2_mount_args(sdp, data, 1);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (sdp->sd_args.ar_spectator)
|
||||
*flags |= MS_RDONLY;
|
||||
else {
|
||||
if (*flags & MS_RDONLY) {
|
||||
if (!(sb->s_flags & MS_RDONLY))
|
||||
error = gfs2_make_fs_ro(sdp);
|
||||
} else if (!(*flags & MS_RDONLY) &&
|
||||
(sb->s_flags & MS_RDONLY)) {
|
||||
error = gfs2_make_fs_rw(sdp);
|
||||
}
|
||||
}
|
||||
|
||||
if (*flags & (MS_NOATIME | MS_NODIRATIME))
|
||||
set_bit(SDF_NOATIME, &sdp->sd_flags);
|
||||
else
|
||||
clear_bit(SDF_NOATIME, &sdp->sd_flags);
|
||||
|
||||
/* Don't let the VFS update atimes. GFS2 handles this itself. */
|
||||
*flags |= MS_NOATIME | MS_NODIRATIME;
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_clear_inode - Deallocate an inode when VFS is done with it
|
||||
* @inode: The VFS inode
|
||||
*
|
||||
*/
|
||||
|
||||
static void gfs2_clear_inode(struct inode *inode)
|
||||
{
|
||||
/* This tells us its a "real" inode and not one which only
|
||||
* serves to contain an address space (see rgrp.c, meta_io.c)
|
||||
* which therefore doesn't have its own glocks.
|
||||
*/
|
||||
if (inode->i_private) {
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
gfs2_glock_inode_squish(inode);
|
||||
gfs2_assert(inode->i_sb->s_fs_info, ip->i_gl->gl_state == LM_ST_UNLOCKED);
|
||||
ip->i_gl->gl_object = NULL;
|
||||
gfs2_glock_schedule_for_reclaim(ip->i_gl);
|
||||
gfs2_glock_put(ip->i_gl);
|
||||
ip->i_gl = NULL;
|
||||
if (ip->i_iopen_gh.gh_gl)
|
||||
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_show_options - Show mount options for /proc/mounts
|
||||
* @s: seq_file structure
|
||||
* @mnt: vfsmount
|
||||
*
|
||||
* Returns: 0 on success or error code
|
||||
*/
|
||||
|
||||
static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
|
||||
{
|
||||
struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info;
|
||||
struct gfs2_args *args = &sdp->sd_args;
|
||||
|
||||
if (args->ar_lockproto[0])
|
||||
seq_printf(s, ",lockproto=%s", args->ar_lockproto);
|
||||
if (args->ar_locktable[0])
|
||||
seq_printf(s, ",locktable=%s", args->ar_locktable);
|
||||
if (args->ar_hostdata[0])
|
||||
seq_printf(s, ",hostdata=%s", args->ar_hostdata);
|
||||
if (args->ar_spectator)
|
||||
seq_printf(s, ",spectator");
|
||||
if (args->ar_ignore_local_fs)
|
||||
seq_printf(s, ",ignore_local_fs");
|
||||
if (args->ar_localflocks)
|
||||
seq_printf(s, ",localflocks");
|
||||
if (args->ar_localcaching)
|
||||
seq_printf(s, ",localcaching");
|
||||
if (args->ar_debug)
|
||||
seq_printf(s, ",debug");
|
||||
if (args->ar_upgrade)
|
||||
seq_printf(s, ",upgrade");
|
||||
if (args->ar_num_glockd != GFS2_GLOCKD_DEFAULT)
|
||||
seq_printf(s, ",num_glockd=%u", args->ar_num_glockd);
|
||||
if (args->ar_posix_acl)
|
||||
seq_printf(s, ",acl");
|
||||
if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
|
||||
char *state;
|
||||
switch (args->ar_quota) {
|
||||
case GFS2_QUOTA_OFF:
|
||||
state = "off";
|
||||
break;
|
||||
case GFS2_QUOTA_ACCOUNT:
|
||||
state = "account";
|
||||
break;
|
||||
case GFS2_QUOTA_ON:
|
||||
state = "on";
|
||||
break;
|
||||
default:
|
||||
state = "unknown";
|
||||
break;
|
||||
}
|
||||
seq_printf(s, ",quota=%s", state);
|
||||
}
|
||||
if (args->ar_suiddir)
|
||||
seq_printf(s, ",suiddir");
|
||||
if (args->ar_data != GFS2_DATA_DEFAULT) {
|
||||
char *state;
|
||||
switch (args->ar_data) {
|
||||
case GFS2_DATA_WRITEBACK:
|
||||
state = "writeback";
|
||||
break;
|
||||
case GFS2_DATA_ORDERED:
|
||||
state = "ordered";
|
||||
break;
|
||||
default:
|
||||
state = "unknown";
|
||||
break;
|
||||
}
|
||||
seq_printf(s, ",data=%s", state);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* We have to (at the moment) hold the inodes main lock to cover
|
||||
* the gap between unlocking the shared lock on the iopen lock and
|
||||
* taking the exclusive lock. I'd rather do a shared -> exclusive
|
||||
* conversion on the iopen lock, but we can change that later. This
|
||||
* is safe, just less efficient.
|
||||
*/
|
||||
static void gfs2_delete_inode(struct inode *inode)
|
||||
{
|
||||
struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
struct gfs2_holder gh;
|
||||
int error;
|
||||
|
||||
if (!inode->i_private)
|
||||
goto out;
|
||||
|
||||
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &gh);
|
||||
if (unlikely(error)) {
|
||||
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
|
||||
goto out;
|
||||
}
|
||||
|
||||
gfs2_glock_dq(&ip->i_iopen_gh);
|
||||
gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh);
|
||||
error = gfs2_glock_nq(&ip->i_iopen_gh);
|
||||
if (error)
|
||||
goto out_uninit;
|
||||
|
||||
if (S_ISDIR(ip->i_di.di_mode) &&
|
||||
(ip->i_di.di_flags & GFS2_DIF_EXHASH)) {
|
||||
error = gfs2_dir_exhash_dealloc(ip);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (ip->i_di.di_eattr) {
|
||||
error = gfs2_ea_dealloc(ip);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (!gfs2_is_stuffed(ip)) {
|
||||
error = gfs2_file_dealloc(ip);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
error = gfs2_dinode_dealloc(ip);
|
||||
|
||||
out_unlock:
|
||||
gfs2_glock_dq(&ip->i_iopen_gh);
|
||||
out_uninit:
|
||||
gfs2_holder_uninit(&ip->i_iopen_gh);
|
||||
gfs2_glock_dq_uninit(&gh);
|
||||
if (error)
|
||||
fs_warn(sdp, "gfs2_delete_inode: %d\n", error);
|
||||
out:
|
||||
truncate_inode_pages(&inode->i_data, 0);
|
||||
clear_inode(inode);
|
||||
}
|
||||
|
||||
|
||||
|
||||
static struct inode *gfs2_alloc_inode(struct super_block *sb)
|
||||
{
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
struct gfs2_inode *ip;
|
||||
|
||||
ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
|
||||
if (ip) {
|
||||
ip->i_flags = 0;
|
||||
ip->i_gl = NULL;
|
||||
ip->i_greedy = gfs2_tune_get(sdp, gt_greedy_default);
|
||||
ip->i_last_pfault = jiffies;
|
||||
}
|
||||
return &ip->i_inode;
|
||||
}
|
||||
|
||||
static void gfs2_destroy_inode(struct inode *inode)
|
||||
{
|
||||
kmem_cache_free(gfs2_inode_cachep, inode);
|
||||
}
|
||||
|
||||
struct super_operations gfs2_super_ops = {
|
||||
.alloc_inode = gfs2_alloc_inode,
|
||||
.destroy_inode = gfs2_destroy_inode,
|
||||
.write_inode = gfs2_write_inode,
|
||||
.delete_inode = gfs2_delete_inode,
|
||||
.put_super = gfs2_put_super,
|
||||
.write_super = gfs2_write_super,
|
||||
.write_super_lockfs = gfs2_write_super_lockfs,
|
||||
.unlockfs = gfs2_unlockfs,
|
||||
.statfs = gfs2_statfs,
|
||||
.remount_fs = gfs2_remount_fs,
|
||||
.clear_inode = gfs2_clear_inode,
|
||||
.show_options = gfs2_show_options,
|
||||
};
|
||||
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue