7c9ca62113
Here is an update of Bob's original rbtree patch which, in addition, also resolves the rather strange ref counting that was being done relating to the bitmap blocks. Originally we had a dual system for journaling resource groups. The metadata blocks were journaled and also the rgrp itself was added to a list. The reason for adding the rgrp to the list in the journal was so that the "repolish clones" code could be run to update the free space, and potentially send any discard requests when the log was flushed. This was done by comparing the "cloned" bitmap with what had been written back on disk during the transaction commit. Due to this, there was a requirement to hang on to the rgrps' bitmap buffers until the journal had been flushed. For that reason, there was a rather complicated set up in the ->go_lock ->go_unlock functions for rgrps involving both a mutex and a spinlock (the ->sd_rindex_spin) to maintain a reference count on the buffers. However, the journal maintains a reference count on the buffers anyway, since they are being journaled as metadata buffers. So by moving the code which deals with the post-journal accounting for bitmap blocks to the metadata journaling code, we can entirely dispense with the rather strange buffer ref counting scheme and also the requirement to journal the rgrps. The net result of all this is that the ->sd_rindex_spin is left to do exactly one job, and that is to look after the rbtree or rgrps. This patch is designed to be a stepping stone towards using RCU for the rbtree of resource groups, however the reduction in the number of uses of the ->sd_rindex_spin is likely to have benefits for multi-threaded workloads, anyway. The patch retains ->go_lock and ->go_unlock for rgrps, however these maybe also be removed in future in favour of calling the functions directly where required in the code. That will allow locking of resource groups without needing to actually read them in - something that could be useful in speeding up statfs. In the mean time though it is valid to dereference ->bi_bh only when the rgrp is locked. This is basically the same rule as before, modulo the references not being valid until the following journal flush. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com> Signed-off-by: Bob Peterson <rpeterso@redhat.com> Cc: Benjamin Marzinski <bmarzins@redhat.com>
76 lines
2.7 KiB
C
76 lines
2.7 KiB
C
/*
|
|
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
|
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
|
|
*
|
|
* This copyrighted material is made available to anyone wishing to use,
|
|
* modify, copy, or redistribute it subject to the terms and conditions
|
|
* of the GNU General Public License version 2.
|
|
*/
|
|
|
|
#ifndef __RGRP_DOT_H__
|
|
#define __RGRP_DOT_H__
|
|
|
|
#include <linux/slab.h>
|
|
|
|
struct gfs2_rgrpd;
|
|
struct gfs2_sbd;
|
|
struct gfs2_holder;
|
|
|
|
extern void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd);
|
|
|
|
struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk);
|
|
struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp);
|
|
struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd);
|
|
|
|
extern void gfs2_clear_rgrpd(struct gfs2_sbd *sdp);
|
|
extern int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh);
|
|
|
|
extern int gfs2_rgrp_go_lock(struct gfs2_holder *gh);
|
|
extern void gfs2_rgrp_go_unlock(struct gfs2_holder *gh);
|
|
|
|
extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
|
|
static inline void gfs2_alloc_put(struct gfs2_inode *ip)
|
|
{
|
|
BUG_ON(ip->i_alloc == NULL);
|
|
kfree(ip->i_alloc);
|
|
ip->i_alloc = NULL;
|
|
}
|
|
|
|
extern int gfs2_inplace_reserve_i(struct gfs2_inode *ip, int hold_rindex,
|
|
char *file, unsigned int line);
|
|
#define gfs2_inplace_reserve(ip) \
|
|
gfs2_inplace_reserve_i((ip), 1, __FILE__, __LINE__)
|
|
#define gfs2_inplace_reserve_ri(ip) \
|
|
gfs2_inplace_reserve_i((ip), 0, __FILE__, __LINE__)
|
|
|
|
extern void gfs2_inplace_release(struct gfs2_inode *ip);
|
|
|
|
extern int gfs2_ri_update(struct gfs2_inode *ip);
|
|
extern int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n);
|
|
extern int gfs2_alloc_di(struct gfs2_inode *ip, u64 *bn, u64 *generation);
|
|
|
|
extern void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta);
|
|
extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);
|
|
extern void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
|
|
extern void gfs2_unlink_di(struct inode *inode);
|
|
extern int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr,
|
|
unsigned int type);
|
|
|
|
struct gfs2_rgrp_list {
|
|
unsigned int rl_rgrps;
|
|
unsigned int rl_space;
|
|
struct gfs2_rgrpd **rl_rgd;
|
|
struct gfs2_holder *rl_ghs;
|
|
};
|
|
|
|
extern void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist,
|
|
u64 block);
|
|
extern void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state);
|
|
extern void gfs2_rlist_free(struct gfs2_rgrp_list *rlist);
|
|
extern u64 gfs2_ri_total(struct gfs2_sbd *sdp);
|
|
extern int gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl);
|
|
extern void gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
|
|
struct buffer_head *bh,
|
|
const struct gfs2_bitmap *bi);
|
|
|
|
#endif /* __RGRP_DOT_H__ */
|