Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs update from Chris Mason: "The biggest feature in the pull is the new (and still experimental) raid56 code that David Woodhouse started long ago. I'm still working on the parity logging setup that will avoid inconsistent parity after a crash, so this is only for testing right now. But, I'd really like to get it out to a broader audience to hammer out any performance issues or other problems. scrub does not yet correct errors on raid5/6 either. Josef has another pass at fsync performance. The big change here is to combine waiting for metadata with waiting for data, which is a big latency win. It is also step one toward using atomics from the hardware during a commit. Mark Fasheh has a new way to use btrfs send/receive to send only the metadata changes. SUSE is using this to make snapper more efficient at finding changes between snapshosts. Snapshot-aware defrag is also included. Otherwise we have a large number of fixes and cleanups. Eric Sandeen wins the award for removing the most lines, and I'm hoping we steal this idea from XFS over and over again." * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (118 commits) btrfs: fixup/remove module.h usage as required Btrfs: delete inline extents when we find them during logging btrfs: try harder to allocate raid56 stripe cache Btrfs: cleanup to make the function btrfs_delalloc_reserve_metadata more logic Btrfs: don't call btrfs_qgroup_free if just btrfs_qgroup_reserve fails Btrfs: remove reduplicate check about root in the function btrfs_clean_quota_tree Btrfs: return ENOMEM rather than use BUG_ON when btrfs_alloc_path fails Btrfs: fix missing deleted items in btrfs_clean_quota_tree btrfs: use only inline_pages from extent buffer Btrfs: fix wrong reserved space when deleting a snapshot/subvolume Btrfs: fix wrong reserved space in qgroup during snap/subv creation Btrfs: remove unnecessary dget_parent/dput when creating the pending snapshot btrfs: remove a printk from scan_one_device Btrfs: fix NULL pointer after aborting a transaction Btrfs: fix memory leak of log roots Btrfs: copy everything if we've created an inline extent btrfs: cleanup for open-coded alignment Btrfs: do not change inode flags in rename Btrfs: use reserved space for creating a snapshot clear chunk_alloc flag on retryable failure ...
This commit is contained in:
commit
b695188dd3
48 changed files with 5457 additions and 1033 deletions
|
@ -5,6 +5,9 @@ config BTRFS_FS
|
|||
select ZLIB_DEFLATE
|
||||
select LZO_COMPRESS
|
||||
select LZO_DECOMPRESS
|
||||
select RAID6_PQ
|
||||
select XOR_BLOCKS
|
||||
|
||||
help
|
||||
Btrfs is a new filesystem with extents, writable snapshotting,
|
||||
support for multiple devices and many more features.
|
||||
|
|
|
@ -8,7 +8,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
|
|||
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
|
||||
export.o tree-log.o free-space-cache.o zlib.o lzo.o \
|
||||
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
|
||||
reada.o backref.o ulist.o qgroup.o send.o dev-replace.o
|
||||
reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o
|
||||
|
||||
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
|
||||
btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
|
||||
|
|
|
@ -352,11 +352,8 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
|
|||
err = __resolve_indirect_ref(fs_info, search_commit_root,
|
||||
time_seq, ref, parents,
|
||||
extent_item_pos);
|
||||
if (err) {
|
||||
if (ret == 0)
|
||||
ret = err;
|
||||
if (err)
|
||||
continue;
|
||||
}
|
||||
|
||||
/* we put the first parent into the ref at hand */
|
||||
ULIST_ITER_INIT(&uiter);
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#ifndef __BTRFS_BACKREF__
|
||||
#define __BTRFS_BACKREF__
|
||||
|
||||
#include "ioctl.h"
|
||||
#include <linux/btrfs.h>
|
||||
#include "ulist.h"
|
||||
#include "extent_io.h"
|
||||
|
||||
|
|
|
@ -40,6 +40,8 @@
|
|||
#define BTRFS_INODE_HAS_ASYNC_EXTENT 6
|
||||
#define BTRFS_INODE_NEEDS_FULL_SYNC 7
|
||||
#define BTRFS_INODE_COPY_EVERYTHING 8
|
||||
#define BTRFS_INODE_IN_DELALLOC_LIST 9
|
||||
#define BTRFS_INODE_READDIO_NEED_LOCK 10
|
||||
|
||||
/* in memory btrfs inode */
|
||||
struct btrfs_inode {
|
||||
|
@ -216,4 +218,22 @@ static inline int btrfs_inode_in_log(struct inode *inode, u64 generation)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable DIO read nolock optimization, so new dio readers will be forced
|
||||
* to grab i_mutex. It is used to avoid the endless truncate due to
|
||||
* nonlocked dio read.
|
||||
*/
|
||||
static inline void btrfs_inode_block_unlocked_dio(struct inode *inode)
|
||||
{
|
||||
set_bit(BTRFS_INODE_READDIO_NEED_LOCK, &BTRFS_I(inode)->runtime_flags);
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
static inline void btrfs_inode_resume_unlocked_dio(struct inode *inode)
|
||||
{
|
||||
smp_mb__before_clear_bit();
|
||||
clear_bit(BTRFS_INODE_READDIO_NEED_LOCK,
|
||||
&BTRFS_I(inode)->runtime_flags);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -813,8 +813,7 @@ static int btrfsic_process_superblock_dev_mirror(
|
|||
(bh->b_data + (dev_bytenr & 4095));
|
||||
|
||||
if (btrfs_super_bytenr(super_tmp) != dev_bytenr ||
|
||||
strncmp((char *)(&(super_tmp->magic)), BTRFS_MAGIC,
|
||||
sizeof(super_tmp->magic)) ||
|
||||
super_tmp->magic != cpu_to_le64(BTRFS_MAGIC) ||
|
||||
memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE) ||
|
||||
btrfs_super_nodesize(super_tmp) != state->metablock_size ||
|
||||
btrfs_super_leafsize(super_tmp) != state->metablock_size ||
|
||||
|
|
|
@ -372,7 +372,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
|
|||
page = compressed_pages[pg_index];
|
||||
page->mapping = inode->i_mapping;
|
||||
if (bio->bi_size)
|
||||
ret = io_tree->ops->merge_bio_hook(page, 0,
|
||||
ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
|
||||
PAGE_CACHE_SIZE,
|
||||
bio, 0);
|
||||
else
|
||||
|
@ -655,7 +655,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
|||
page->index = em_start >> PAGE_CACHE_SHIFT;
|
||||
|
||||
if (comp_bio->bi_size)
|
||||
ret = tree->ops->merge_bio_hook(page, 0,
|
||||
ret = tree->ops->merge_bio_hook(READ, page, 0,
|
||||
PAGE_CACHE_SIZE,
|
||||
comp_bio, 0);
|
||||
else
|
||||
|
|
|
@ -1138,6 +1138,7 @@ __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
|
|||
switch (tm->op) {
|
||||
case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
|
||||
BUG_ON(tm->slot < n);
|
||||
/* Fallthrough */
|
||||
case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
|
||||
case MOD_LOG_KEY_REMOVE:
|
||||
btrfs_set_node_key(eb, &tm->key, tm->slot);
|
||||
|
@ -1222,7 +1223,7 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
|
|||
|
||||
__tree_mod_log_rewind(eb_rewin, time_seq, tm);
|
||||
WARN_ON(btrfs_header_nritems(eb_rewin) >
|
||||
BTRFS_NODEPTRS_PER_BLOCK(fs_info->fs_root));
|
||||
BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
|
||||
|
||||
return eb_rewin;
|
||||
}
|
||||
|
@ -1441,7 +1442,7 @@ int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
|
|||
*/
|
||||
int btrfs_realloc_node(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, struct extent_buffer *parent,
|
||||
int start_slot, int cache_only, u64 *last_ret,
|
||||
int start_slot, u64 *last_ret,
|
||||
struct btrfs_key *progress)
|
||||
{
|
||||
struct extent_buffer *cur;
|
||||
|
@ -1461,8 +1462,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_disk_key disk_key;
|
||||
|
||||
parent_level = btrfs_header_level(parent);
|
||||
if (cache_only && parent_level != 1)
|
||||
return 0;
|
||||
|
||||
WARN_ON(trans->transaction != root->fs_info->running_transaction);
|
||||
WARN_ON(trans->transid != root->fs_info->generation);
|
||||
|
@ -1508,10 +1507,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
|
|||
else
|
||||
uptodate = 0;
|
||||
if (!cur || !uptodate) {
|
||||
if (cache_only) {
|
||||
free_extent_buffer(cur);
|
||||
continue;
|
||||
}
|
||||
if (!cur) {
|
||||
cur = read_tree_block(root, blocknr,
|
||||
blocksize, gen);
|
||||
|
@ -4825,8 +4820,8 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
|
|||
|
||||
/*
|
||||
* A helper function to walk down the tree starting at min_key, and looking
|
||||
* for nodes or leaves that are either in cache or have a minimum
|
||||
* transaction id. This is used by the btree defrag code, and tree logging
|
||||
* for nodes or leaves that are have a minimum transaction id.
|
||||
* This is used by the btree defrag code, and tree logging
|
||||
*
|
||||
* This does not cow, but it does stuff the starting key it finds back
|
||||
* into min_key, so you can call btrfs_search_slot with cow=1 on the
|
||||
|
@ -4847,7 +4842,7 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
|
|||
*/
|
||||
int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
|
||||
struct btrfs_key *max_key,
|
||||
struct btrfs_path *path, int cache_only,
|
||||
struct btrfs_path *path,
|
||||
u64 min_trans)
|
||||
{
|
||||
struct extent_buffer *cur;
|
||||
|
@ -4887,15 +4882,12 @@ again:
|
|||
if (sret && slot > 0)
|
||||
slot--;
|
||||
/*
|
||||
* check this node pointer against the cache_only and
|
||||
* min_trans parameters. If it isn't in cache or is too
|
||||
* old, skip to the next one.
|
||||
* check this node pointer against the min_trans parameters.
|
||||
* If it is too old, old, skip to the next one.
|
||||
*/
|
||||
while (slot < nritems) {
|
||||
u64 blockptr;
|
||||
u64 gen;
|
||||
struct extent_buffer *tmp;
|
||||
struct btrfs_disk_key disk_key;
|
||||
|
||||
blockptr = btrfs_node_blockptr(cur, slot);
|
||||
gen = btrfs_node_ptr_generation(cur, slot);
|
||||
|
@ -4903,27 +4895,7 @@ again:
|
|||
slot++;
|
||||
continue;
|
||||
}
|
||||
if (!cache_only)
|
||||
break;
|
||||
|
||||
if (max_key) {
|
||||
btrfs_node_key(cur, &disk_key, slot);
|
||||
if (comp_keys(&disk_key, max_key) >= 0) {
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
tmp = btrfs_find_tree_block(root, blockptr,
|
||||
btrfs_level_size(root, level - 1));
|
||||
|
||||
if (tmp && btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
|
||||
free_extent_buffer(tmp);
|
||||
break;
|
||||
}
|
||||
if (tmp)
|
||||
free_extent_buffer(tmp);
|
||||
slot++;
|
||||
break;
|
||||
}
|
||||
find_next_key:
|
||||
/*
|
||||
|
@ -4934,7 +4906,7 @@ find_next_key:
|
|||
path->slots[level] = slot;
|
||||
btrfs_set_path_blocking(path);
|
||||
sret = btrfs_find_next_key(root, path, min_key, level,
|
||||
cache_only, min_trans);
|
||||
min_trans);
|
||||
if (sret == 0) {
|
||||
btrfs_release_path(path);
|
||||
goto again;
|
||||
|
@ -5399,8 +5371,7 @@ out:
|
|||
/*
|
||||
* this is similar to btrfs_next_leaf, but does not try to preserve
|
||||
* and fixup the path. It looks for and returns the next key in the
|
||||
* tree based on the current path and the cache_only and min_trans
|
||||
* parameters.
|
||||
* tree based on the current path and the min_trans parameters.
|
||||
*
|
||||
* 0 is returned if another key is found, < 0 if there are any errors
|
||||
* and 1 is returned if there are no higher keys in the tree
|
||||
|
@ -5409,8 +5380,7 @@ out:
|
|||
* calling this function.
|
||||
*/
|
||||
int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
|
||||
struct btrfs_key *key, int level,
|
||||
int cache_only, u64 min_trans)
|
||||
struct btrfs_key *key, int level, u64 min_trans)
|
||||
{
|
||||
int slot;
|
||||
struct extent_buffer *c;
|
||||
|
@ -5461,22 +5431,8 @@ next:
|
|||
if (level == 0)
|
||||
btrfs_item_key_to_cpu(c, key, slot);
|
||||
else {
|
||||
u64 blockptr = btrfs_node_blockptr(c, slot);
|
||||
u64 gen = btrfs_node_ptr_generation(c, slot);
|
||||
|
||||
if (cache_only) {
|
||||
struct extent_buffer *cur;
|
||||
cur = btrfs_find_tree_block(root, blockptr,
|
||||
btrfs_level_size(root, level - 1));
|
||||
if (!cur ||
|
||||
btrfs_buffer_uptodate(cur, gen, 1) <= 0) {
|
||||
slot++;
|
||||
if (cur)
|
||||
free_extent_buffer(cur);
|
||||
goto next;
|
||||
}
|
||||
free_extent_buffer(cur);
|
||||
}
|
||||
if (gen < min_trans) {
|
||||
slot++;
|
||||
goto next;
|
||||
|
|
150
fs/btrfs/ctree.h
150
fs/btrfs/ctree.h
|
@ -31,10 +31,10 @@
|
|||
#include <trace/events/btrfs.h>
|
||||
#include <asm/kmap_types.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/btrfs.h>
|
||||
#include "extent_io.h"
|
||||
#include "extent_map.h"
|
||||
#include "async-thread.h"
|
||||
#include "ioctl.h"
|
||||
|
||||
struct btrfs_trans_handle;
|
||||
struct btrfs_transaction;
|
||||
|
@ -46,7 +46,7 @@ extern struct kmem_cache *btrfs_path_cachep;
|
|||
extern struct kmem_cache *btrfs_free_space_cachep;
|
||||
struct btrfs_ordered_sum;
|
||||
|
||||
#define BTRFS_MAGIC "_BHRfS_M"
|
||||
#define BTRFS_MAGIC 0x4D5F53665248425FULL /* ascii _BHRfS_M, no null */
|
||||
|
||||
#define BTRFS_MAX_MIRRORS 3
|
||||
|
||||
|
@ -191,6 +191,8 @@ static int btrfs_csum_sizes[] = { 4, 0 };
|
|||
/* ioprio of readahead is set to idle */
|
||||
#define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0))
|
||||
|
||||
#define BTRFS_DIRTY_METADATA_THRESH (32 * 1024 * 1024)
|
||||
|
||||
/*
|
||||
* The key defines the order in the tree, and so it also defines (optimal)
|
||||
* block layout.
|
||||
|
@ -336,7 +338,10 @@ static inline unsigned long btrfs_chunk_item_size(int num_stripes)
|
|||
/*
|
||||
* File system states
|
||||
*/
|
||||
#define BTRFS_FS_STATE_ERROR 0
|
||||
#define BTRFS_FS_STATE_REMOUNTING 1
|
||||
|
||||
/* Super block flags */
|
||||
/* Errors detected */
|
||||
#define BTRFS_SUPER_FLAG_ERROR (1ULL << 2)
|
||||
|
||||
|
@ -502,6 +507,7 @@ struct btrfs_super_block {
|
|||
#define BTRFS_FEATURE_INCOMPAT_BIG_METADATA (1ULL << 5)
|
||||
|
||||
#define BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF (1ULL << 6)
|
||||
#define BTRFS_FEATURE_INCOMPAT_RAID56 (1ULL << 7)
|
||||
|
||||
#define BTRFS_FEATURE_COMPAT_SUPP 0ULL
|
||||
#define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL
|
||||
|
@ -511,6 +517,7 @@ struct btrfs_super_block {
|
|||
BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \
|
||||
BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \
|
||||
BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \
|
||||
BTRFS_FEATURE_INCOMPAT_RAID56 | \
|
||||
BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
|
||||
|
||||
/*
|
||||
|
@ -952,8 +959,20 @@ struct btrfs_dev_replace_item {
|
|||
#define BTRFS_BLOCK_GROUP_RAID1 (1ULL << 4)
|
||||
#define BTRFS_BLOCK_GROUP_DUP (1ULL << 5)
|
||||
#define BTRFS_BLOCK_GROUP_RAID10 (1ULL << 6)
|
||||
#define BTRFS_BLOCK_GROUP_RAID5 (1 << 7)
|
||||
#define BTRFS_BLOCK_GROUP_RAID6 (1 << 8)
|
||||
#define BTRFS_BLOCK_GROUP_RESERVED BTRFS_AVAIL_ALLOC_BIT_SINGLE
|
||||
#define BTRFS_NR_RAID_TYPES 5
|
||||
|
||||
enum btrfs_raid_types {
|
||||
BTRFS_RAID_RAID10,
|
||||
BTRFS_RAID_RAID1,
|
||||
BTRFS_RAID_DUP,
|
||||
BTRFS_RAID_RAID0,
|
||||
BTRFS_RAID_SINGLE,
|
||||
BTRFS_RAID_RAID5,
|
||||
BTRFS_RAID_RAID6,
|
||||
BTRFS_NR_RAID_TYPES
|
||||
};
|
||||
|
||||
#define BTRFS_BLOCK_GROUP_TYPE_MASK (BTRFS_BLOCK_GROUP_DATA | \
|
||||
BTRFS_BLOCK_GROUP_SYSTEM | \
|
||||
|
@ -961,6 +980,8 @@ struct btrfs_dev_replace_item {
|
|||
|
||||
#define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \
|
||||
BTRFS_BLOCK_GROUP_RAID1 | \
|
||||
BTRFS_BLOCK_GROUP_RAID5 | \
|
||||
BTRFS_BLOCK_GROUP_RAID6 | \
|
||||
BTRFS_BLOCK_GROUP_DUP | \
|
||||
BTRFS_BLOCK_GROUP_RAID10)
|
||||
/*
|
||||
|
@ -1185,6 +1206,10 @@ struct btrfs_block_group_cache {
|
|||
u64 flags;
|
||||
u64 sectorsize;
|
||||
u64 cache_generation;
|
||||
|
||||
/* for raid56, this is a full stripe, without parity */
|
||||
unsigned long full_stripe_len;
|
||||
|
||||
unsigned int ro:1;
|
||||
unsigned int dirty:1;
|
||||
unsigned int iref:1;
|
||||
|
@ -1225,6 +1250,28 @@ struct seq_list {
|
|||
u64 seq;
|
||||
};
|
||||
|
||||
enum btrfs_orphan_cleanup_state {
|
||||
ORPHAN_CLEANUP_STARTED = 1,
|
||||
ORPHAN_CLEANUP_DONE = 2,
|
||||
};
|
||||
|
||||
/* used by the raid56 code to lock stripes for read/modify/write */
|
||||
struct btrfs_stripe_hash {
|
||||
struct list_head hash_list;
|
||||
wait_queue_head_t wait;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
/* used by the raid56 code to lock stripes for read/modify/write */
|
||||
struct btrfs_stripe_hash_table {
|
||||
struct list_head stripe_cache;
|
||||
spinlock_t cache_lock;
|
||||
int cache_size;
|
||||
struct btrfs_stripe_hash table[];
|
||||
};
|
||||
|
||||
#define BTRFS_STRIPE_HASH_TABLE_BITS 11
|
||||
|
||||
/* fs_info */
|
||||
struct reloc_control;
|
||||
struct btrfs_device;
|
||||
|
@ -1250,6 +1297,7 @@ struct btrfs_fs_info {
|
|||
|
||||
/* block group cache stuff */
|
||||
spinlock_t block_group_cache_lock;
|
||||
u64 first_logical_byte;
|
||||
struct rb_root block_group_cache_tree;
|
||||
|
||||
/* keep track of unallocated space */
|
||||
|
@ -1288,7 +1336,23 @@ struct btrfs_fs_info {
|
|||
u64 last_trans_log_full_commit;
|
||||
unsigned long mount_opt;
|
||||
unsigned long compress_type:4;
|
||||
/*
|
||||
* It is a suggestive number, the read side is safe even it gets a
|
||||
* wrong number because we will write out the data into a regular
|
||||
* extent. The write side(mount/remount) is under ->s_umount lock,
|
||||
* so it is also safe.
|
||||
*/
|
||||
u64 max_inline;
|
||||
/*
|
||||
* Protected by ->chunk_mutex and sb->s_umount.
|
||||
*
|
||||
* The reason that we use two lock to protect it is because only
|
||||
* remount and mount operations can change it and these two operations
|
||||
* are under sb->s_umount, but the read side (chunk allocation) can not
|
||||
* acquire sb->s_umount or the deadlock would happen. So we use two
|
||||
* locks to protect it. On the write side, we must acquire two locks,
|
||||
* and on the read side, we just need acquire one of them.
|
||||
*/
|
||||
u64 alloc_start;
|
||||
struct btrfs_transaction *running_transaction;
|
||||
wait_queue_head_t transaction_throttle;
|
||||
|
@ -1307,6 +1371,13 @@ struct btrfs_fs_info {
|
|||
struct mutex cleaner_mutex;
|
||||
struct mutex chunk_mutex;
|
||||
struct mutex volume_mutex;
|
||||
|
||||
/* this is used during read/modify/write to make sure
|
||||
* no two ios are trying to mod the same stripe at the same
|
||||
* time
|
||||
*/
|
||||
struct btrfs_stripe_hash_table *stripe_hash_table;
|
||||
|
||||
/*
|
||||
* this protects the ordered operations list only while we are
|
||||
* processing all of the entries on it. This way we make
|
||||
|
@ -1365,6 +1436,7 @@ struct btrfs_fs_info {
|
|||
*/
|
||||
struct list_head ordered_extents;
|
||||
|
||||
spinlock_t delalloc_lock;
|
||||
/*
|
||||
* all of the inodes that have delalloc bytes. It is possible for
|
||||
* this list to be empty even when there is still dirty data=ordered
|
||||
|
@ -1372,13 +1444,6 @@ struct btrfs_fs_info {
|
|||
*/
|
||||
struct list_head delalloc_inodes;
|
||||
|
||||
/*
|
||||
* special rename and truncate targets that must be on disk before
|
||||
* we're allowed to commit. This is basically the ext3 style
|
||||
* data=ordered list.
|
||||
*/
|
||||
struct list_head ordered_operations;
|
||||
|
||||
/*
|
||||
* there is a pool of worker threads for checksumming during writes
|
||||
* and a pool for checksumming after reads. This is because readers
|
||||
|
@ -1395,6 +1460,8 @@ struct btrfs_fs_info {
|
|||
struct btrfs_workers flush_workers;
|
||||
struct btrfs_workers endio_workers;
|
||||
struct btrfs_workers endio_meta_workers;
|
||||
struct btrfs_workers endio_raid56_workers;
|
||||
struct btrfs_workers rmw_workers;
|
||||
struct btrfs_workers endio_meta_write_workers;
|
||||
struct btrfs_workers endio_write_workers;
|
||||
struct btrfs_workers endio_freespace_worker;
|
||||
|
@ -1423,10 +1490,12 @@ struct btrfs_fs_info {
|
|||
|
||||
u64 total_pinned;
|
||||
|
||||
/* protected by the delalloc lock, used to keep from writing
|
||||
* metadata until there is a nice batch
|
||||
*/
|
||||
u64 dirty_metadata_bytes;
|
||||
/* used to keep from writing metadata until there is a nice batch */
|
||||
struct percpu_counter dirty_metadata_bytes;
|
||||
struct percpu_counter delalloc_bytes;
|
||||
s32 dirty_metadata_batch;
|
||||
s32 delalloc_batch;
|
||||
|
||||
struct list_head dirty_cowonly_roots;
|
||||
|
||||
struct btrfs_fs_devices *fs_devices;
|
||||
|
@ -1442,9 +1511,6 @@ struct btrfs_fs_info {
|
|||
|
||||
struct reloc_control *reloc_ctl;
|
||||
|
||||
spinlock_t delalloc_lock;
|
||||
u64 delalloc_bytes;
|
||||
|
||||
/* data_alloc_cluster is only used in ssd mode */
|
||||
struct btrfs_free_cluster data_alloc_cluster;
|
||||
|
||||
|
@ -1456,6 +1522,8 @@ struct btrfs_fs_info {
|
|||
struct rb_root defrag_inodes;
|
||||
atomic_t defrag_running;
|
||||
|
||||
/* Used to protect avail_{data, metadata, system}_alloc_bits */
|
||||
seqlock_t profiles_lock;
|
||||
/*
|
||||
* these three are in extended format (availability of single
|
||||
* chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other
|
||||
|
@ -1520,7 +1588,7 @@ struct btrfs_fs_info {
|
|||
u64 qgroup_seq;
|
||||
|
||||
/* filesystem state */
|
||||
u64 fs_state;
|
||||
unsigned long fs_state;
|
||||
|
||||
struct btrfs_delayed_root *delayed_root;
|
||||
|
||||
|
@ -1623,6 +1691,9 @@ struct btrfs_root {
|
|||
|
||||
struct list_head root_list;
|
||||
|
||||
spinlock_t log_extents_lock[2];
|
||||
struct list_head logged_list[2];
|
||||
|
||||
spinlock_t orphan_lock;
|
||||
atomic_t orphan_inodes;
|
||||
struct btrfs_block_rsv *orphan_block_rsv;
|
||||
|
@ -1832,6 +1903,7 @@ struct btrfs_ioctl_defrag_range_args {
|
|||
|
||||
#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
|
||||
#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
|
||||
#define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt)
|
||||
#define btrfs_test_opt(root, opt) ((root)->fs_info->mount_opt & \
|
||||
BTRFS_MOUNT_##opt)
|
||||
/*
|
||||
|
@ -2936,8 +3008,7 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
|
|||
u64 num_bytes, u64 *refs, u64 *flags);
|
||||
int btrfs_pin_extent(struct btrfs_root *root,
|
||||
u64 bytenr, u64 num, int reserved);
|
||||
int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
|
||||
u64 bytenr, u64 num_bytes);
|
||||
int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
|
@ -3035,8 +3106,13 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
|
|||
int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
|
||||
struct inode *inode);
|
||||
void btrfs_orphan_release_metadata(struct inode *inode);
|
||||
int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_pending_snapshot *pending);
|
||||
int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
|
||||
struct btrfs_block_rsv *rsv,
|
||||
int nitems,
|
||||
u64 *qgroup_reserved);
|
||||
void btrfs_subvolume_release_metadata(struct btrfs_root *root,
|
||||
struct btrfs_block_rsv *rsv,
|
||||
u64 qgroup_reserved);
|
||||
int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes);
|
||||
void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes);
|
||||
int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes);
|
||||
|
@ -3092,10 +3168,10 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
|
|||
struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
|
||||
int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
|
||||
struct btrfs_key *key, int lowest_level,
|
||||
int cache_only, u64 min_trans);
|
||||
u64 min_trans);
|
||||
int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
|
||||
struct btrfs_key *max_key,
|
||||
struct btrfs_path *path, int cache_only,
|
||||
struct btrfs_path *path,
|
||||
u64 min_trans);
|
||||
enum btrfs_compare_tree_result {
|
||||
BTRFS_COMPARE_TREE_NEW,
|
||||
|
@ -3148,7 +3224,7 @@ int btrfs_search_slot_for_read(struct btrfs_root *root,
|
|||
int find_higher, int return_any);
|
||||
int btrfs_realloc_node(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, struct extent_buffer *parent,
|
||||
int start_slot, int cache_only, u64 *last_ret,
|
||||
int start_slot, u64 *last_ret,
|
||||
struct btrfs_key *progress);
|
||||
void btrfs_release_path(struct btrfs_path *p);
|
||||
struct btrfs_path *btrfs_alloc_path(void);
|
||||
|
@ -3459,9 +3535,9 @@ int btrfs_writepages(struct address_space *mapping,
|
|||
struct writeback_control *wbc);
|
||||
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *new_root, u64 new_dirid);
|
||||
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
|
||||
size_t size, struct bio *bio, unsigned long bio_flags);
|
||||
|
||||
int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
|
||||
size_t size, struct bio *bio,
|
||||
unsigned long bio_flags);
|
||||
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
|
||||
int btrfs_readpage(struct file *file, struct page *page);
|
||||
void btrfs_evict_inode(struct inode *inode);
|
||||
|
@ -3543,7 +3619,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
|
|||
|
||||
/* tree-defrag.c */
|
||||
int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, int cache_only);
|
||||
struct btrfs_root *root);
|
||||
|
||||
/* sysfs.c */
|
||||
int btrfs_init_sysfs(void);
|
||||
|
@ -3620,11 +3696,14 @@ __printf(5, 6)
|
|||
void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
|
||||
unsigned int line, int errno, const char *fmt, ...);
|
||||
|
||||
/*
|
||||
* If BTRFS_MOUNT_PANIC_ON_FATAL_ERROR is in mount_opt, __btrfs_panic
|
||||
* will panic(). Otherwise we BUG() here.
|
||||
*/
|
||||
#define btrfs_panic(fs_info, errno, fmt, args...) \
|
||||
do { \
|
||||
struct btrfs_fs_info *_i = (fs_info); \
|
||||
__btrfs_panic(_i, __func__, __LINE__, errno, fmt, ##args); \
|
||||
BUG_ON(!(_i->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR)); \
|
||||
__btrfs_panic(fs_info, __func__, __LINE__, errno, fmt, ##args); \
|
||||
BUG(); \
|
||||
} while (0)
|
||||
|
||||
/* acl.c */
|
||||
|
@ -3745,4 +3824,11 @@ static inline int is_fstree(u64 rootid)
|
|||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
return signal_pending(current);
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
|
|
@ -875,7 +875,6 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_delayed_item *delayed_item)
|
||||
{
|
||||
struct extent_buffer *leaf;
|
||||
struct btrfs_item *item;
|
||||
char *ptr;
|
||||
int ret;
|
||||
|
||||
|
@ -886,7 +885,6 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
|
|||
|
||||
leaf = path->nodes[0];
|
||||
|
||||
item = btrfs_item_nr(leaf, path->slots[0]);
|
||||
ptr = btrfs_item_ptr(leaf, path->slots[0], char);
|
||||
|
||||
write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
|
||||
|
@ -1065,32 +1063,25 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
|
|||
}
|
||||
}
|
||||
|
||||
static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_path *path,
|
||||
struct btrfs_delayed_node *node)
|
||||
static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_path *path,
|
||||
struct btrfs_delayed_node *node)
|
||||
{
|
||||
struct btrfs_key key;
|
||||
struct btrfs_inode_item *inode_item;
|
||||
struct extent_buffer *leaf;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&node->mutex);
|
||||
if (!node->inode_dirty) {
|
||||
mutex_unlock(&node->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
key.objectid = node->inode_id;
|
||||
btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
|
||||
key.offset = 0;
|
||||
|
||||
ret = btrfs_lookup_inode(trans, root, path, &key, 1);
|
||||
if (ret > 0) {
|
||||
btrfs_release_path(path);
|
||||
mutex_unlock(&node->mutex);
|
||||
return -ENOENT;
|
||||
} else if (ret < 0) {
|
||||
mutex_unlock(&node->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1105,11 +1096,47 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
|
|||
|
||||
btrfs_delayed_inode_release_metadata(root, node);
|
||||
btrfs_release_delayed_inode(node);
|
||||
mutex_unlock(&node->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_path *path,
|
||||
struct btrfs_delayed_node *node)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&node->mutex);
|
||||
if (!node->inode_dirty) {
|
||||
mutex_unlock(&node->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = __btrfs_update_delayed_inode(trans, root, path, node);
|
||||
mutex_unlock(&node->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int
|
||||
__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_path *path,
|
||||
struct btrfs_delayed_node *node)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = btrfs_insert_delayed_items(trans, path, node->root, node);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = btrfs_delete_delayed_items(trans, path, node->root, node);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = btrfs_update_delayed_inode(trans, node->root, path, node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called when committing the transaction.
|
||||
* Returns 0 on success.
|
||||
|
@ -1119,7 +1146,6 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
|
|||
static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, int nr)
|
||||
{
|
||||
struct btrfs_root *curr_root = root;
|
||||
struct btrfs_delayed_root *delayed_root;
|
||||
struct btrfs_delayed_node *curr_node, *prev_node;
|
||||
struct btrfs_path *path;
|
||||
|
@ -1142,15 +1168,8 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
|
|||
|
||||
curr_node = btrfs_first_delayed_node(delayed_root);
|
||||
while (curr_node && (!count || (count && nr--))) {
|
||||
curr_root = curr_node->root;
|
||||
ret = btrfs_insert_delayed_items(trans, path, curr_root,
|
||||
curr_node);
|
||||
if (!ret)
|
||||
ret = btrfs_delete_delayed_items(trans, path,
|
||||
curr_root, curr_node);
|
||||
if (!ret)
|
||||
ret = btrfs_update_delayed_inode(trans, curr_root,
|
||||
path, curr_node);
|
||||
ret = __btrfs_commit_inode_delayed_items(trans, path,
|
||||
curr_node);
|
||||
if (ret) {
|
||||
btrfs_release_delayed_node(curr_node);
|
||||
curr_node = NULL;
|
||||
|
@ -1183,36 +1202,12 @@ int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
|
|||
return __btrfs_run_delayed_items(trans, root, nr);
|
||||
}
|
||||
|
||||
static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_delayed_node *node)
|
||||
{
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_block_rsv *block_rsv;
|
||||
int ret;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
path->leave_spinning = 1;
|
||||
|
||||
block_rsv = trans->block_rsv;
|
||||
trans->block_rsv = &node->root->fs_info->delayed_block_rsv;
|
||||
|
||||
ret = btrfs_insert_delayed_items(trans, path, node->root, node);
|
||||
if (!ret)
|
||||
ret = btrfs_delete_delayed_items(trans, path, node->root, node);
|
||||
if (!ret)
|
||||
ret = btrfs_update_delayed_inode(trans, node->root, path, node);
|
||||
btrfs_free_path(path);
|
||||
|
||||
trans->block_rsv = block_rsv;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
|
||||
struct inode *inode)
|
||||
{
|
||||
struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_block_rsv *block_rsv;
|
||||
int ret;
|
||||
|
||||
if (!delayed_node)
|
||||
|
@ -1226,8 +1221,74 @@ int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
mutex_unlock(&delayed_node->mutex);
|
||||
|
||||
ret = __btrfs_commit_inode_delayed_items(trans, delayed_node);
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
path->leave_spinning = 1;
|
||||
|
||||
block_rsv = trans->block_rsv;
|
||||
trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
|
||||
|
||||
ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
|
||||
|
||||
btrfs_release_delayed_node(delayed_node);
|
||||
btrfs_free_path(path);
|
||||
trans->block_rsv = block_rsv;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_commit_inode_delayed_inode(struct inode *inode)
|
||||
{
|
||||
struct btrfs_trans_handle *trans;
|
||||
struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_block_rsv *block_rsv;
|
||||
int ret;
|
||||
|
||||
if (!delayed_node)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&delayed_node->mutex);
|
||||
if (!delayed_node->inode_dirty) {
|
||||
mutex_unlock(&delayed_node->mutex);
|
||||
btrfs_release_delayed_node(delayed_node);
|
||||
return 0;
|
||||
}
|
||||
mutex_unlock(&delayed_node->mutex);
|
||||
|
||||
trans = btrfs_join_transaction(delayed_node->root);
|
||||
if (IS_ERR(trans)) {
|
||||
ret = PTR_ERR(trans);
|
||||
goto out;
|
||||
}
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path) {
|
||||
ret = -ENOMEM;
|
||||
goto trans_out;
|
||||
}
|
||||
path->leave_spinning = 1;
|
||||
|
||||
block_rsv = trans->block_rsv;
|
||||
trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
|
||||
|
||||
mutex_lock(&delayed_node->mutex);
|
||||
if (delayed_node->inode_dirty)
|
||||
ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
|
||||
path, delayed_node);
|
||||
else
|
||||
ret = 0;
|
||||
mutex_unlock(&delayed_node->mutex);
|
||||
|
||||
btrfs_free_path(path);
|
||||
trans->block_rsv = block_rsv;
|
||||
trans_out:
|
||||
btrfs_end_transaction(trans, delayed_node->root);
|
||||
btrfs_btree_balance_dirty(delayed_node->root);
|
||||
out:
|
||||
btrfs_release_delayed_node(delayed_node);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1258,7 +1319,6 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
|
|||
struct btrfs_root *root;
|
||||
struct btrfs_block_rsv *block_rsv;
|
||||
int need_requeue = 0;
|
||||
int ret;
|
||||
|
||||
async_node = container_of(work, struct btrfs_async_delayed_node, work);
|
||||
|
||||
|
@ -1277,14 +1337,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
|
|||
block_rsv = trans->block_rsv;
|
||||
trans->block_rsv = &root->fs_info->delayed_block_rsv;
|
||||
|
||||
ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
|
||||
if (!ret)
|
||||
ret = btrfs_delete_delayed_items(trans, path, root,
|
||||
delayed_node);
|
||||
|
||||
if (!ret)
|
||||
btrfs_update_delayed_inode(trans, root, path, delayed_node);
|
||||
|
||||
__btrfs_commit_inode_delayed_items(trans, path, delayed_node);
|
||||
/*
|
||||
* Maybe new delayed items have been inserted, so we need requeue
|
||||
* the work. Besides that, we must dequeue the empty delayed nodes
|
||||
|
|
|
@ -117,6 +117,7 @@ int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
|
|||
/* Used for evicting the inode. */
|
||||
void btrfs_remove_delayed_node(struct inode *inode);
|
||||
void btrfs_kill_delayed_inode_items(struct inode *inode);
|
||||
int btrfs_commit_inode_delayed_inode(struct inode *inode);
|
||||
|
||||
|
||||
int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
|
||||
|
|
|
@ -23,6 +23,10 @@
|
|||
#include "delayed-ref.h"
|
||||
#include "transaction.h"
|
||||
|
||||
struct kmem_cache *btrfs_delayed_ref_head_cachep;
|
||||
struct kmem_cache *btrfs_delayed_tree_ref_cachep;
|
||||
struct kmem_cache *btrfs_delayed_data_ref_cachep;
|
||||
struct kmem_cache *btrfs_delayed_extent_op_cachep;
|
||||
/*
|
||||
* delayed back reference update tracking. For subvolume trees
|
||||
* we queue up extent allocations and backref maintenance for
|
||||
|
@ -422,6 +426,14 @@ again:
|
|||
return 1;
|
||||
}
|
||||
|
||||
void btrfs_release_ref_cluster(struct list_head *cluster)
|
||||
{
|
||||
struct list_head *pos, *q;
|
||||
|
||||
list_for_each_safe(pos, q, cluster)
|
||||
list_del_init(pos);
|
||||
}
|
||||
|
||||
/*
|
||||
* helper function to update an extent delayed ref in the
|
||||
* rbtree. existing and update must both have the same
|
||||
|
@ -511,7 +523,7 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
|
|||
ref->extent_op->flags_to_set;
|
||||
existing_ref->extent_op->update_flags = 1;
|
||||
}
|
||||
kfree(ref->extent_op);
|
||||
btrfs_free_delayed_extent_op(ref->extent_op);
|
||||
}
|
||||
}
|
||||
/*
|
||||
|
@ -592,7 +604,7 @@ static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
|
|||
* we've updated the existing ref, free the newly
|
||||
* allocated ref
|
||||
*/
|
||||
kfree(head_ref);
|
||||
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
|
||||
} else {
|
||||
delayed_refs->num_heads++;
|
||||
delayed_refs->num_heads_ready++;
|
||||
|
@ -653,7 +665,7 @@ static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
|
|||
* we've updated the existing ref, free the newly
|
||||
* allocated ref
|
||||
*/
|
||||
kfree(full_ref);
|
||||
kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
|
||||
} else {
|
||||
delayed_refs->num_entries++;
|
||||
trans->delayed_ref_updates++;
|
||||
|
@ -714,7 +726,7 @@ static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
|
|||
* we've updated the existing ref, free the newly
|
||||
* allocated ref
|
||||
*/
|
||||
kfree(full_ref);
|
||||
kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
|
||||
} else {
|
||||
delayed_refs->num_entries++;
|
||||
trans->delayed_ref_updates++;
|
||||
|
@ -738,13 +750,13 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
|
|||
struct btrfs_delayed_ref_root *delayed_refs;
|
||||
|
||||
BUG_ON(extent_op && extent_op->is_data);
|
||||
ref = kmalloc(sizeof(*ref), GFP_NOFS);
|
||||
ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
|
||||
if (!ref)
|
||||
return -ENOMEM;
|
||||
|
||||
head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
|
||||
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
|
||||
if (!head_ref) {
|
||||
kfree(ref);
|
||||
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -786,13 +798,13 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
|
|||
struct btrfs_delayed_ref_root *delayed_refs;
|
||||
|
||||
BUG_ON(extent_op && !extent_op->is_data);
|
||||
ref = kmalloc(sizeof(*ref), GFP_NOFS);
|
||||
ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
|
||||
if (!ref)
|
||||
return -ENOMEM;
|
||||
|
||||
head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
|
||||
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
|
||||
if (!head_ref) {
|
||||
kfree(ref);
|
||||
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -826,7 +838,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
|
|||
struct btrfs_delayed_ref_head *head_ref;
|
||||
struct btrfs_delayed_ref_root *delayed_refs;
|
||||
|
||||
head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
|
||||
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
|
||||
if (!head_ref)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -860,3 +872,51 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
|
|||
return btrfs_delayed_node_to_head(ref);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void btrfs_delayed_ref_exit(void)
|
||||
{
|
||||
if (btrfs_delayed_ref_head_cachep)
|
||||
kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
|
||||
if (btrfs_delayed_tree_ref_cachep)
|
||||
kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
|
||||
if (btrfs_delayed_data_ref_cachep)
|
||||
kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
|
||||
if (btrfs_delayed_extent_op_cachep)
|
||||
kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
|
||||
}
|
||||
|
||||
int btrfs_delayed_ref_init(void)
|
||||
{
|
||||
btrfs_delayed_ref_head_cachep = kmem_cache_create(
|
||||
"btrfs_delayed_ref_head",
|
||||
sizeof(struct btrfs_delayed_ref_head), 0,
|
||||
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
|
||||
if (!btrfs_delayed_ref_head_cachep)
|
||||
goto fail;
|
||||
|
||||
btrfs_delayed_tree_ref_cachep = kmem_cache_create(
|
||||
"btrfs_delayed_tree_ref",
|
||||
sizeof(struct btrfs_delayed_tree_ref), 0,
|
||||
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
|
||||
if (!btrfs_delayed_tree_ref_cachep)
|
||||
goto fail;
|
||||
|
||||
btrfs_delayed_data_ref_cachep = kmem_cache_create(
|
||||
"btrfs_delayed_data_ref",
|
||||
sizeof(struct btrfs_delayed_data_ref), 0,
|
||||
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
|
||||
if (!btrfs_delayed_data_ref_cachep)
|
||||
goto fail;
|
||||
|
||||
btrfs_delayed_extent_op_cachep = kmem_cache_create(
|
||||
"btrfs_delayed_extent_op",
|
||||
sizeof(struct btrfs_delayed_extent_op), 0,
|
||||
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
|
||||
if (!btrfs_delayed_extent_op_cachep)
|
||||
goto fail;
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
btrfs_delayed_ref_exit();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
|
|
@ -131,6 +131,15 @@ struct btrfs_delayed_ref_root {
|
|||
/* total number of head nodes ready for processing */
|
||||
unsigned long num_heads_ready;
|
||||
|
||||
/*
|
||||
* bumped when someone is making progress on the delayed
|
||||
* refs, so that other procs know they are just adding to
|
||||
* contention intead of helping
|
||||
*/
|
||||
atomic_t procs_running_refs;
|
||||
atomic_t ref_seq;
|
||||
wait_queue_head_t wait;
|
||||
|
||||
/*
|
||||
* set when the tree is flushing before a transaction commit,
|
||||
* used by the throttling code to decide if new updates need
|
||||
|
@ -141,12 +150,47 @@ struct btrfs_delayed_ref_root {
|
|||
u64 run_delayed_start;
|
||||
};
|
||||
|
||||
extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
|
||||
extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
|
||||
extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
|
||||
extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
|
||||
|
||||
int btrfs_delayed_ref_init(void);
|
||||
void btrfs_delayed_ref_exit(void);
|
||||
|
||||
static inline struct btrfs_delayed_extent_op *
|
||||
btrfs_alloc_delayed_extent_op(void)
|
||||
{
|
||||
return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS);
|
||||
}
|
||||
|
||||
static inline void
|
||||
btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op)
|
||||
{
|
||||
if (op)
|
||||
kmem_cache_free(btrfs_delayed_extent_op_cachep, op);
|
||||
}
|
||||
|
||||
static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
|
||||
{
|
||||
WARN_ON(atomic_read(&ref->refs) == 0);
|
||||
if (atomic_dec_and_test(&ref->refs)) {
|
||||
WARN_ON(ref->in_tree);
|
||||
kfree(ref);
|
||||
switch (ref->type) {
|
||||
case BTRFS_TREE_BLOCK_REF_KEY:
|
||||
case BTRFS_SHARED_BLOCK_REF_KEY:
|
||||
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
|
||||
break;
|
||||
case BTRFS_EXTENT_DATA_REF_KEY:
|
||||
case BTRFS_SHARED_DATA_REF_KEY:
|
||||
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
|
||||
break;
|
||||
case 0:
|
||||
kmem_cache_free(btrfs_delayed_ref_head_cachep, ref);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -176,8 +220,14 @@ struct btrfs_delayed_ref_head *
|
|||
btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
|
||||
int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_delayed_ref_head *head);
|
||||
static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
|
||||
{
|
||||
mutex_unlock(&head->mutex);
|
||||
}
|
||||
|
||||
int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
|
||||
struct list_head *cluster, u64 search_start);
|
||||
void btrfs_release_ref_cluster(struct list_head *cluster);
|
||||
|
||||
int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_delayed_ref_root *delayed_refs,
|
||||
|
|
|
@ -465,7 +465,11 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
|
|||
* flush all outstanding I/O and inode extent mappings before the
|
||||
* copy operation is declared as being finished
|
||||
*/
|
||||
btrfs_start_delalloc_inodes(root, 0);
|
||||
ret = btrfs_start_delalloc_inodes(root, 0);
|
||||
if (ret) {
|
||||
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
|
||||
return ret;
|
||||
}
|
||||
btrfs_wait_ordered_extents(root, 0);
|
||||
|
||||
trans = btrfs_start_transaction(root, 0);
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
#include "check-integrity.h"
|
||||
#include "rcu-string.h"
|
||||
#include "dev-replace.h"
|
||||
#include "raid56.h"
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
#include <asm/cpufeature.h>
|
||||
|
@ -56,7 +57,8 @@ static void end_workqueue_fn(struct btrfs_work *work);
|
|||
static void free_fs_root(struct btrfs_root *root);
|
||||
static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
|
||||
int read_only);
|
||||
static void btrfs_destroy_ordered_operations(struct btrfs_root *root);
|
||||
static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
|
||||
struct btrfs_root *root);
|
||||
static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
|
||||
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
|
||||
struct btrfs_root *root);
|
||||
|
@ -420,7 +422,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
|
|||
static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
|
||||
{
|
||||
struct extent_io_tree *tree;
|
||||
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
|
||||
u64 start = page_offset(page);
|
||||
u64 found_start;
|
||||
struct extent_buffer *eb;
|
||||
|
||||
|
@ -639,8 +641,15 @@ err:
|
|||
btree_readahead_hook(root, eb, eb->start, ret);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
if (ret) {
|
||||
/*
|
||||
* our io error hook is going to dec the io pages
|
||||
* again, we have to make sure it has something
|
||||
* to decrement
|
||||
*/
|
||||
atomic_inc(&eb->io_pages);
|
||||
clear_extent_buffer_uptodate(eb);
|
||||
}
|
||||
free_extent_buffer(eb);
|
||||
out:
|
||||
return ret;
|
||||
|
@ -654,6 +663,7 @@ static int btree_io_failed_hook(struct page *page, int failed_mirror)
|
|||
eb = (struct extent_buffer *)page->private;
|
||||
set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
|
||||
eb->read_mirror = failed_mirror;
|
||||
atomic_dec(&eb->io_pages);
|
||||
if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
|
||||
btree_readahead_hook(root, eb, eb->start, -EIO);
|
||||
return -EIO; /* we fixed nothing */
|
||||
|
@ -670,17 +680,23 @@ static void end_workqueue_bio(struct bio *bio, int err)
|
|||
end_io_wq->work.flags = 0;
|
||||
|
||||
if (bio->bi_rw & REQ_WRITE) {
|
||||
if (end_io_wq->metadata == 1)
|
||||
if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
|
||||
btrfs_queue_worker(&fs_info->endio_meta_write_workers,
|
||||
&end_io_wq->work);
|
||||
else if (end_io_wq->metadata == 2)
|
||||
else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
|
||||
btrfs_queue_worker(&fs_info->endio_freespace_worker,
|
||||
&end_io_wq->work);
|
||||
else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
|
||||
btrfs_queue_worker(&fs_info->endio_raid56_workers,
|
||||
&end_io_wq->work);
|
||||
else
|
||||
btrfs_queue_worker(&fs_info->endio_write_workers,
|
||||
&end_io_wq->work);
|
||||
} else {
|
||||
if (end_io_wq->metadata)
|
||||
if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
|
||||
btrfs_queue_worker(&fs_info->endio_raid56_workers,
|
||||
&end_io_wq->work);
|
||||
else if (end_io_wq->metadata)
|
||||
btrfs_queue_worker(&fs_info->endio_meta_workers,
|
||||
&end_io_wq->work);
|
||||
else
|
||||
|
@ -695,6 +711,7 @@ static void end_workqueue_bio(struct bio *bio, int err)
|
|||
* 0 - if data
|
||||
* 1 - if normal metadta
|
||||
* 2 - if writing to the free space cache area
|
||||
* 3 - raid parity work
|
||||
*/
|
||||
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
|
||||
int metadata)
|
||||
|
@ -946,18 +963,20 @@ static int btree_writepages(struct address_space *mapping,
|
|||
struct writeback_control *wbc)
|
||||
{
|
||||
struct extent_io_tree *tree;
|
||||
struct btrfs_fs_info *fs_info;
|
||||
int ret;
|
||||
|
||||
tree = &BTRFS_I(mapping->host)->io_tree;
|
||||
if (wbc->sync_mode == WB_SYNC_NONE) {
|
||||
struct btrfs_root *root = BTRFS_I(mapping->host)->root;
|
||||
u64 num_dirty;
|
||||
unsigned long thresh = 32 * 1024 * 1024;
|
||||
|
||||
if (wbc->for_kupdate)
|
||||
return 0;
|
||||
|
||||
fs_info = BTRFS_I(mapping->host)->root->fs_info;
|
||||
/* this is a bit racy, but that's ok */
|
||||
num_dirty = root->fs_info->dirty_metadata_bytes;
|
||||
if (num_dirty < thresh)
|
||||
ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
|
||||
BTRFS_DIRTY_METADATA_THRESH);
|
||||
if (ret < 0)
|
||||
return 0;
|
||||
}
|
||||
return btree_write_cache_pages(mapping, wbc);
|
||||
|
@ -1125,24 +1144,16 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
|
|||
void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
||||
struct extent_buffer *buf)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
|
||||
if (btrfs_header_generation(buf) ==
|
||||
root->fs_info->running_transaction->transid) {
|
||||
fs_info->running_transaction->transid) {
|
||||
btrfs_assert_tree_locked(buf);
|
||||
|
||||
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
|
||||
spin_lock(&root->fs_info->delalloc_lock);
|
||||
if (root->fs_info->dirty_metadata_bytes >= buf->len)
|
||||
root->fs_info->dirty_metadata_bytes -= buf->len;
|
||||
else {
|
||||
spin_unlock(&root->fs_info->delalloc_lock);
|
||||
btrfs_panic(root->fs_info, -EOVERFLOW,
|
||||
"Can't clear %lu bytes from "
|
||||
" dirty_mdatadata_bytes (%llu)",
|
||||
buf->len,
|
||||
root->fs_info->dirty_metadata_bytes);
|
||||
}
|
||||
spin_unlock(&root->fs_info->delalloc_lock);
|
||||
|
||||
__percpu_counter_add(&fs_info->dirty_metadata_bytes,
|
||||
-buf->len,
|
||||
fs_info->dirty_metadata_batch);
|
||||
/* ugh, clear_extent_buffer_dirty needs to lock the page */
|
||||
btrfs_set_lock_blocking(buf);
|
||||
clear_extent_buffer_dirty(buf);
|
||||
|
@ -1178,9 +1189,13 @@ static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
|
|||
|
||||
INIT_LIST_HEAD(&root->dirty_list);
|
||||
INIT_LIST_HEAD(&root->root_list);
|
||||
INIT_LIST_HEAD(&root->logged_list[0]);
|
||||
INIT_LIST_HEAD(&root->logged_list[1]);
|
||||
spin_lock_init(&root->orphan_lock);
|
||||
spin_lock_init(&root->inode_lock);
|
||||
spin_lock_init(&root->accounting_lock);
|
||||
spin_lock_init(&root->log_extents_lock[0]);
|
||||
spin_lock_init(&root->log_extents_lock[1]);
|
||||
mutex_init(&root->objectid_mutex);
|
||||
mutex_init(&root->log_mutex);
|
||||
init_waitqueue_head(&root->log_writer_wait);
|
||||
|
@ -2004,10 +2019,24 @@ int open_ctree(struct super_block *sb,
|
|||
goto fail_srcu;
|
||||
}
|
||||
|
||||
ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0);
|
||||
if (ret) {
|
||||
err = ret;
|
||||
goto fail_bdi;
|
||||
}
|
||||
fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
|
||||
(1 + ilog2(nr_cpu_ids));
|
||||
|
||||
ret = percpu_counter_init(&fs_info->delalloc_bytes, 0);
|
||||
if (ret) {
|
||||
err = ret;
|
||||
goto fail_dirty_metadata_bytes;
|
||||
}
|
||||
|
||||
fs_info->btree_inode = new_inode(sb);
|
||||
if (!fs_info->btree_inode) {
|
||||
err = -ENOMEM;
|
||||
goto fail_bdi;
|
||||
goto fail_delalloc_bytes;
|
||||
}
|
||||
|
||||
mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
|
||||
|
@ -2017,7 +2046,6 @@ int open_ctree(struct super_block *sb,
|
|||
INIT_LIST_HEAD(&fs_info->dead_roots);
|
||||
INIT_LIST_HEAD(&fs_info->delayed_iputs);
|
||||
INIT_LIST_HEAD(&fs_info->delalloc_inodes);
|
||||
INIT_LIST_HEAD(&fs_info->ordered_operations);
|
||||
INIT_LIST_HEAD(&fs_info->caching_block_groups);
|
||||
spin_lock_init(&fs_info->delalloc_lock);
|
||||
spin_lock_init(&fs_info->trans_lock);
|
||||
|
@ -2028,6 +2056,7 @@ int open_ctree(struct super_block *sb,
|
|||
spin_lock_init(&fs_info->tree_mod_seq_lock);
|
||||
rwlock_init(&fs_info->tree_mod_log_lock);
|
||||
mutex_init(&fs_info->reloc_mutex);
|
||||
seqlock_init(&fs_info->profiles_lock);
|
||||
|
||||
init_completion(&fs_info->kobj_unregister);
|
||||
INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
|
||||
|
@ -2126,6 +2155,7 @@ int open_ctree(struct super_block *sb,
|
|||
|
||||
spin_lock_init(&fs_info->block_group_cache_lock);
|
||||
fs_info->block_group_cache_tree = RB_ROOT;
|
||||
fs_info->first_logical_byte = (u64)-1;
|
||||
|
||||
extent_io_tree_init(&fs_info->freed_extents[0],
|
||||
fs_info->btree_inode->i_mapping);
|
||||
|
@ -2165,6 +2195,12 @@ int open_ctree(struct super_block *sb,
|
|||
init_waitqueue_head(&fs_info->transaction_blocked_wait);
|
||||
init_waitqueue_head(&fs_info->async_submit_wait);
|
||||
|
||||
ret = btrfs_alloc_stripe_hash_table(fs_info);
|
||||
if (ret) {
|
||||
err = ret;
|
||||
goto fail_alloc;
|
||||
}
|
||||
|
||||
__setup_root(4096, 4096, 4096, 4096, tree_root,
|
||||
fs_info, BTRFS_ROOT_TREE_OBJECTID);
|
||||
|
||||
|
@ -2187,7 +2223,8 @@ int open_ctree(struct super_block *sb,
|
|||
goto fail_alloc;
|
||||
|
||||
/* check FS state, whether FS is broken. */
|
||||
fs_info->fs_state |= btrfs_super_flags(disk_super);
|
||||
if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
|
||||
set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
|
||||
|
||||
ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
|
||||
if (ret) {
|
||||
|
@ -2261,6 +2298,8 @@ int open_ctree(struct super_block *sb,
|
|||
leafsize = btrfs_super_leafsize(disk_super);
|
||||
sectorsize = btrfs_super_sectorsize(disk_super);
|
||||
stripesize = btrfs_super_stripesize(disk_super);
|
||||
fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids));
|
||||
fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
|
||||
|
||||
/*
|
||||
* mixed block groups end up with duplicate but slightly offset
|
||||
|
@ -2332,6 +2371,12 @@ int open_ctree(struct super_block *sb,
|
|||
btrfs_init_workers(&fs_info->endio_meta_write_workers,
|
||||
"endio-meta-write", fs_info->thread_pool_size,
|
||||
&fs_info->generic_worker);
|
||||
btrfs_init_workers(&fs_info->endio_raid56_workers,
|
||||
"endio-raid56", fs_info->thread_pool_size,
|
||||
&fs_info->generic_worker);
|
||||
btrfs_init_workers(&fs_info->rmw_workers,
|
||||
"rmw", fs_info->thread_pool_size,
|
||||
&fs_info->generic_worker);
|
||||
btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
|
||||
fs_info->thread_pool_size,
|
||||
&fs_info->generic_worker);
|
||||
|
@ -2350,6 +2395,8 @@ int open_ctree(struct super_block *sb,
|
|||
*/
|
||||
fs_info->endio_workers.idle_thresh = 4;
|
||||
fs_info->endio_meta_workers.idle_thresh = 4;
|
||||
fs_info->endio_raid56_workers.idle_thresh = 4;
|
||||
fs_info->rmw_workers.idle_thresh = 2;
|
||||
|
||||
fs_info->endio_write_workers.idle_thresh = 2;
|
||||
fs_info->endio_meta_write_workers.idle_thresh = 2;
|
||||
|
@ -2366,6 +2413,8 @@ int open_ctree(struct super_block *sb,
|
|||
ret |= btrfs_start_workers(&fs_info->fixup_workers);
|
||||
ret |= btrfs_start_workers(&fs_info->endio_workers);
|
||||
ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
|
||||
ret |= btrfs_start_workers(&fs_info->rmw_workers);
|
||||
ret |= btrfs_start_workers(&fs_info->endio_raid56_workers);
|
||||
ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
|
||||
ret |= btrfs_start_workers(&fs_info->endio_write_workers);
|
||||
ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
|
||||
|
@ -2390,8 +2439,7 @@ int open_ctree(struct super_block *sb,
|
|||
sb->s_blocksize = sectorsize;
|
||||
sb->s_blocksize_bits = blksize_bits(sectorsize);
|
||||
|
||||
if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
|
||||
sizeof(disk_super->magic))) {
|
||||
if (disk_super->magic != cpu_to_le64(BTRFS_MAGIC)) {
|
||||
printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
|
||||
goto fail_sb_buffer;
|
||||
}
|
||||
|
@ -2694,13 +2742,13 @@ fail_cleaner:
|
|||
* kthreads
|
||||
*/
|
||||
filemap_write_and_wait(fs_info->btree_inode->i_mapping);
|
||||
invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
|
||||
|
||||
fail_block_groups:
|
||||
btrfs_free_block_groups(fs_info);
|
||||
|
||||
fail_tree_roots:
|
||||
free_root_pointers(fs_info, 1);
|
||||
invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
|
||||
|
||||
fail_sb_buffer:
|
||||
btrfs_stop_workers(&fs_info->generic_worker);
|
||||
|
@ -2710,6 +2758,8 @@ fail_sb_buffer:
|
|||
btrfs_stop_workers(&fs_info->workers);
|
||||
btrfs_stop_workers(&fs_info->endio_workers);
|
||||
btrfs_stop_workers(&fs_info->endio_meta_workers);
|
||||
btrfs_stop_workers(&fs_info->endio_raid56_workers);
|
||||
btrfs_stop_workers(&fs_info->rmw_workers);
|
||||
btrfs_stop_workers(&fs_info->endio_meta_write_workers);
|
||||
btrfs_stop_workers(&fs_info->endio_write_workers);
|
||||
btrfs_stop_workers(&fs_info->endio_freespace_worker);
|
||||
|
@ -2721,13 +2771,17 @@ fail_alloc:
|
|||
fail_iput:
|
||||
btrfs_mapping_tree_free(&fs_info->mapping_tree);
|
||||
|
||||
invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
|
||||
iput(fs_info->btree_inode);
|
||||
fail_delalloc_bytes:
|
||||
percpu_counter_destroy(&fs_info->delalloc_bytes);
|
||||
fail_dirty_metadata_bytes:
|
||||
percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
|
||||
fail_bdi:
|
||||
bdi_destroy(&fs_info->bdi);
|
||||
fail_srcu:
|
||||
cleanup_srcu_struct(&fs_info->subvol_srcu);
|
||||
fail:
|
||||
btrfs_free_stripe_hash_table(fs_info);
|
||||
btrfs_close_devices(fs_info->fs_devices);
|
||||
return err;
|
||||
|
||||
|
@ -2795,8 +2849,7 @@ struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
|
|||
|
||||
super = (struct btrfs_super_block *)bh->b_data;
|
||||
if (btrfs_super_bytenr(super) != bytenr ||
|
||||
strncmp((char *)(&super->magic), BTRFS_MAGIC,
|
||||
sizeof(super->magic))) {
|
||||
super->magic != cpu_to_le64(BTRFS_MAGIC)) {
|
||||
brelse(bh);
|
||||
continue;
|
||||
}
|
||||
|
@ -3076,11 +3129,16 @@ int btrfs_calc_num_tolerated_disk_barrier_failures(
|
|||
((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)
|
||||
== 0)))
|
||||
num_tolerated_disk_barrier_failures = 0;
|
||||
else if (num_tolerated_disk_barrier_failures > 1
|
||||
&&
|
||||
(flags & (BTRFS_BLOCK_GROUP_RAID1 |
|
||||
BTRFS_BLOCK_GROUP_RAID10)))
|
||||
num_tolerated_disk_barrier_failures = 1;
|
||||
else if (num_tolerated_disk_barrier_failures > 1) {
|
||||
if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
|
||||
BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID10)) {
|
||||
num_tolerated_disk_barrier_failures = 1;
|
||||
} else if (flags &
|
||||
BTRFS_BLOCK_GROUP_RAID5) {
|
||||
num_tolerated_disk_barrier_failures = 2;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
up_read(&sinfo->groups_sem);
|
||||
|
@ -3195,6 +3253,11 @@ void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
|
|||
if (btrfs_root_refs(&root->root_item) == 0)
|
||||
synchronize_srcu(&fs_info->subvol_srcu);
|
||||
|
||||
if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
|
||||
btrfs_free_log(NULL, root);
|
||||
btrfs_free_log_root_tree(NULL, fs_info);
|
||||
}
|
||||
|
||||
__btrfs_remove_free_space_cache(root->free_ino_pinned);
|
||||
__btrfs_remove_free_space_cache(root->free_ino_ctl);
|
||||
free_fs_root(root);
|
||||
|
@ -3339,7 +3402,7 @@ int close_ctree(struct btrfs_root *root)
|
|||
printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
|
||||
}
|
||||
|
||||
if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
|
||||
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
|
||||
btrfs_error_commit_super(root);
|
||||
|
||||
btrfs_put_block_group_cache(fs_info);
|
||||
|
@ -3352,9 +3415,9 @@ int close_ctree(struct btrfs_root *root)
|
|||
|
||||
btrfs_free_qgroup_config(root->fs_info);
|
||||
|
||||
if (fs_info->delalloc_bytes) {
|
||||
printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
|
||||
(unsigned long long)fs_info->delalloc_bytes);
|
||||
if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
|
||||
printk(KERN_INFO "btrfs: at unmount delalloc count %lld\n",
|
||||
percpu_counter_sum(&fs_info->delalloc_bytes));
|
||||
}
|
||||
|
||||
free_extent_buffer(fs_info->extent_root->node);
|
||||
|
@ -3384,6 +3447,8 @@ int close_ctree(struct btrfs_root *root)
|
|||
btrfs_stop_workers(&fs_info->workers);
|
||||
btrfs_stop_workers(&fs_info->endio_workers);
|
||||
btrfs_stop_workers(&fs_info->endio_meta_workers);
|
||||
btrfs_stop_workers(&fs_info->endio_raid56_workers);
|
||||
btrfs_stop_workers(&fs_info->rmw_workers);
|
||||
btrfs_stop_workers(&fs_info->endio_meta_write_workers);
|
||||
btrfs_stop_workers(&fs_info->endio_write_workers);
|
||||
btrfs_stop_workers(&fs_info->endio_freespace_worker);
|
||||
|
@ -3401,9 +3466,13 @@ int close_ctree(struct btrfs_root *root)
|
|||
btrfs_close_devices(fs_info->fs_devices);
|
||||
btrfs_mapping_tree_free(&fs_info->mapping_tree);
|
||||
|
||||
percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
|
||||
percpu_counter_destroy(&fs_info->delalloc_bytes);
|
||||
bdi_destroy(&fs_info->bdi);
|
||||
cleanup_srcu_struct(&fs_info->subvol_srcu);
|
||||
|
||||
btrfs_free_stripe_hash_table(fs_info);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3443,11 +3512,10 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
|
|||
(unsigned long long)transid,
|
||||
(unsigned long long)root->fs_info->generation);
|
||||
was_dirty = set_extent_buffer_dirty(buf);
|
||||
if (!was_dirty) {
|
||||
spin_lock(&root->fs_info->delalloc_lock);
|
||||
root->fs_info->dirty_metadata_bytes += buf->len;
|
||||
spin_unlock(&root->fs_info->delalloc_lock);
|
||||
}
|
||||
if (!was_dirty)
|
||||
__percpu_counter_add(&root->fs_info->dirty_metadata_bytes,
|
||||
buf->len,
|
||||
root->fs_info->dirty_metadata_batch);
|
||||
}
|
||||
|
||||
static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
|
||||
|
@ -3457,8 +3525,7 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
|
|||
* looks as though older kernels can get into trouble with
|
||||
* this code, they end up stuck in balance_dirty_pages forever
|
||||
*/
|
||||
u64 num_dirty;
|
||||
unsigned long thresh = 32 * 1024 * 1024;
|
||||
int ret;
|
||||
|
||||
if (current->flags & PF_MEMALLOC)
|
||||
return;
|
||||
|
@ -3466,9 +3533,9 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
|
|||
if (flush_delayed)
|
||||
btrfs_balance_delayed_items(root);
|
||||
|
||||
num_dirty = root->fs_info->dirty_metadata_bytes;
|
||||
|
||||
if (num_dirty > thresh) {
|
||||
ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
|
||||
BTRFS_DIRTY_METADATA_THRESH);
|
||||
if (ret > 0) {
|
||||
balance_dirty_pages_ratelimited(
|
||||
root->fs_info->btree_inode->i_mapping);
|
||||
}
|
||||
|
@ -3518,7 +3585,8 @@ void btrfs_error_commit_super(struct btrfs_root *root)
|
|||
btrfs_cleanup_transaction(root);
|
||||
}
|
||||
|
||||
static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
|
||||
static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
|
||||
struct btrfs_root *root)
|
||||
{
|
||||
struct btrfs_inode *btrfs_inode;
|
||||
struct list_head splice;
|
||||
|
@ -3528,7 +3596,7 @@ static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
|
|||
mutex_lock(&root->fs_info->ordered_operations_mutex);
|
||||
spin_lock(&root->fs_info->ordered_extent_lock);
|
||||
|
||||
list_splice_init(&root->fs_info->ordered_operations, &splice);
|
||||
list_splice_init(&t->ordered_operations, &splice);
|
||||
while (!list_empty(&splice)) {
|
||||
btrfs_inode = list_entry(splice.next, struct btrfs_inode,
|
||||
ordered_operations);
|
||||
|
@ -3544,35 +3612,16 @@ static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
|
|||
|
||||
static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
|
||||
{
|
||||
struct list_head splice;
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
struct inode *inode;
|
||||
|
||||
INIT_LIST_HEAD(&splice);
|
||||
|
||||
spin_lock(&root->fs_info->ordered_extent_lock);
|
||||
|
||||
list_splice_init(&root->fs_info->ordered_extents, &splice);
|
||||
while (!list_empty(&splice)) {
|
||||
ordered = list_entry(splice.next, struct btrfs_ordered_extent,
|
||||
root_extent_list);
|
||||
|
||||
list_del_init(&ordered->root_extent_list);
|
||||
atomic_inc(&ordered->refs);
|
||||
|
||||
/* the inode may be getting freed (in sys_unlink path). */
|
||||
inode = igrab(ordered->inode);
|
||||
|
||||
spin_unlock(&root->fs_info->ordered_extent_lock);
|
||||
if (inode)
|
||||
iput(inode);
|
||||
|
||||
atomic_set(&ordered->refs, 1);
|
||||
btrfs_put_ordered_extent(ordered);
|
||||
|
||||
spin_lock(&root->fs_info->ordered_extent_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* This will just short circuit the ordered completion stuff which will
|
||||
* make sure the ordered extent gets properly cleaned up.
|
||||
*/
|
||||
list_for_each_entry(ordered, &root->fs_info->ordered_extents,
|
||||
root_extent_list)
|
||||
set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
|
||||
spin_unlock(&root->fs_info->ordered_extent_lock);
|
||||
}
|
||||
|
||||
|
@ -3594,11 +3643,11 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
|
|||
}
|
||||
|
||||
while ((node = rb_first(&delayed_refs->root)) != NULL) {
|
||||
ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
|
||||
struct btrfs_delayed_ref_head *head = NULL;
|
||||
|
||||
ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
|
||||
atomic_set(&ref->refs, 1);
|
||||
if (btrfs_delayed_ref_is_head(ref)) {
|
||||
struct btrfs_delayed_ref_head *head;
|
||||
|
||||
head = btrfs_delayed_node_to_head(ref);
|
||||
if (!mutex_trylock(&head->mutex)) {
|
||||
|
@ -3614,16 +3663,18 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
|
|||
continue;
|
||||
}
|
||||
|
||||
kfree(head->extent_op);
|
||||
btrfs_free_delayed_extent_op(head->extent_op);
|
||||
delayed_refs->num_heads--;
|
||||
if (list_empty(&head->cluster))
|
||||
delayed_refs->num_heads_ready--;
|
||||
list_del_init(&head->cluster);
|
||||
}
|
||||
|
||||
ref->in_tree = 0;
|
||||
rb_erase(&ref->rb_node, &delayed_refs->root);
|
||||
delayed_refs->num_entries--;
|
||||
|
||||
if (head)
|
||||
mutex_unlock(&head->mutex);
|
||||
spin_unlock(&delayed_refs->lock);
|
||||
btrfs_put_delayed_ref(ref);
|
||||
|
||||
|
@ -3671,6 +3722,8 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
|
|||
delalloc_inodes);
|
||||
|
||||
list_del_init(&btrfs_inode->delalloc_inodes);
|
||||
clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
|
||||
&btrfs_inode->runtime_flags);
|
||||
|
||||
btrfs_invalidate_inodes(btrfs_inode->root);
|
||||
}
|
||||
|
@ -3823,10 +3876,8 @@ int btrfs_cleanup_transaction(struct btrfs_root *root)
|
|||
|
||||
while (!list_empty(&list)) {
|
||||
t = list_entry(list.next, struct btrfs_transaction, list);
|
||||
if (!t)
|
||||
break;
|
||||
|
||||
btrfs_destroy_ordered_operations(root);
|
||||
btrfs_destroy_ordered_operations(t, root);
|
||||
|
||||
btrfs_destroy_ordered_extents(root);
|
||||
|
||||
|
|
|
@ -25,6 +25,13 @@
|
|||
#define BTRFS_SUPER_MIRROR_MAX 3
|
||||
#define BTRFS_SUPER_MIRROR_SHIFT 12
|
||||
|
||||
enum {
|
||||
BTRFS_WQ_ENDIO_DATA = 0,
|
||||
BTRFS_WQ_ENDIO_METADATA = 1,
|
||||
BTRFS_WQ_ENDIO_FREE_SPACE = 2,
|
||||
BTRFS_WQ_ENDIO_RAID56 = 3,
|
||||
};
|
||||
|
||||
static inline u64 btrfs_sb_offset(int mirror)
|
||||
{
|
||||
u64 start = 16 * 1024;
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -4,7 +4,6 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/page-flags.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/swap.h>
|
||||
|
@ -1834,7 +1833,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
|||
*/
|
||||
static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
|
||||
{
|
||||
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
|
||||
u64 start = page_offset(page);
|
||||
u64 end = start + PAGE_CACHE_SIZE - 1;
|
||||
if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
|
||||
SetPageUptodate(page);
|
||||
|
@ -1846,7 +1845,7 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
|
|||
*/
|
||||
static void check_page_locked(struct extent_io_tree *tree, struct page *page)
|
||||
{
|
||||
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
|
||||
u64 start = page_offset(page);
|
||||
u64 end = start + PAGE_CACHE_SIZE - 1;
|
||||
if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
|
||||
unlock_page(page);
|
||||
|
@ -1895,13 +1894,11 @@ static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
|
|||
if (ret)
|
||||
err = ret;
|
||||
|
||||
if (did_repair) {
|
||||
ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
|
||||
rec->start + rec->len - 1,
|
||||
EXTENT_DAMAGED, GFP_NOFS);
|
||||
if (ret && !err)
|
||||
err = ret;
|
||||
}
|
||||
ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
|
||||
rec->start + rec->len - 1,
|
||||
EXTENT_DAMAGED, GFP_NOFS);
|
||||
if (ret && !err)
|
||||
err = ret;
|
||||
|
||||
kfree(rec);
|
||||
return err;
|
||||
|
@ -1932,10 +1929,15 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
|
|||
u64 map_length = 0;
|
||||
u64 sector;
|
||||
struct btrfs_bio *bbio = NULL;
|
||||
struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
|
||||
int ret;
|
||||
|
||||
BUG_ON(!mirror_num);
|
||||
|
||||
/* we can't repair anything in raid56 yet */
|
||||
if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
|
||||
return 0;
|
||||
|
||||
bio = bio_alloc(GFP_NOFS, 1);
|
||||
if (!bio)
|
||||
return -EIO;
|
||||
|
@ -1960,7 +1962,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
|
|||
return -EIO;
|
||||
}
|
||||
bio->bi_bdev = dev->bdev;
|
||||
bio_add_page(bio, page, length, start-page_offset(page));
|
||||
bio_add_page(bio, page, length, start - page_offset(page));
|
||||
btrfsic_submit_bio(WRITE_SYNC, bio);
|
||||
wait_for_completion(&compl);
|
||||
|
||||
|
@ -2052,6 +2054,7 @@ static int clean_io_failure(u64 start, struct page *page)
|
|||
failrec->failed_mirror);
|
||||
did_repair = !ret;
|
||||
}
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -2293,8 +2296,7 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
|
|||
struct page *page = bvec->bv_page;
|
||||
tree = &BTRFS_I(page->mapping->host)->io_tree;
|
||||
|
||||
start = ((u64)page->index << PAGE_CACHE_SHIFT) +
|
||||
bvec->bv_offset;
|
||||
start = page_offset(page) + bvec->bv_offset;
|
||||
end = start + bvec->bv_len - 1;
|
||||
|
||||
if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
|
||||
|
@ -2353,8 +2355,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
|
|||
(long int)bio->bi_bdev);
|
||||
tree = &BTRFS_I(page->mapping->host)->io_tree;
|
||||
|
||||
start = ((u64)page->index << PAGE_CACHE_SHIFT) +
|
||||
bvec->bv_offset;
|
||||
start = page_offset(page) + bvec->bv_offset;
|
||||
end = start + bvec->bv_len - 1;
|
||||
|
||||
if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
|
||||
|
@ -2471,7 +2472,7 @@ static int __must_check submit_one_bio(int rw, struct bio *bio,
|
|||
struct extent_io_tree *tree = bio->bi_private;
|
||||
u64 start;
|
||||
|
||||
start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
|
||||
start = page_offset(page) + bvec->bv_offset;
|
||||
|
||||
bio->bi_private = NULL;
|
||||
|
||||
|
@ -2489,13 +2490,13 @@ static int __must_check submit_one_bio(int rw, struct bio *bio,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int merge_bio(struct extent_io_tree *tree, struct page *page,
|
||||
static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page,
|
||||
unsigned long offset, size_t size, struct bio *bio,
|
||||
unsigned long bio_flags)
|
||||
{
|
||||
int ret = 0;
|
||||
if (tree->ops && tree->ops->merge_bio_hook)
|
||||
ret = tree->ops->merge_bio_hook(page, offset, size, bio,
|
||||
ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
|
||||
bio_flags);
|
||||
BUG_ON(ret < 0);
|
||||
return ret;
|
||||
|
@ -2530,7 +2531,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
|
|||
sector;
|
||||
|
||||
if (prev_bio_flags != bio_flags || !contig ||
|
||||
merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
|
||||
merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
|
||||
bio_add_page(bio, page, page_size, offset) < page_size) {
|
||||
ret = submit_one_bio(rw, bio, mirror_num,
|
||||
prev_bio_flags);
|
||||
|
@ -2595,7 +2596,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
|
|||
unsigned long *bio_flags)
|
||||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
|
||||
u64 start = page_offset(page);
|
||||
u64 page_end = start + PAGE_CACHE_SIZE - 1;
|
||||
u64 end;
|
||||
u64 cur = start;
|
||||
|
@ -2648,6 +2649,8 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
|
|||
}
|
||||
}
|
||||
while (cur <= end) {
|
||||
unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
|
||||
|
||||
if (cur >= last_byte) {
|
||||
char *userpage;
|
||||
struct extent_state *cached = NULL;
|
||||
|
@ -2682,7 +2685,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
|
|||
|
||||
iosize = min(extent_map_end(em) - cur, end - cur + 1);
|
||||
cur_end = min(extent_map_end(em) - 1, end);
|
||||
iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
|
||||
iosize = ALIGN(iosize, blocksize);
|
||||
if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
|
||||
disk_io_size = em->block_len;
|
||||
sector = em->block_start >> 9;
|
||||
|
@ -2735,26 +2738,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
|
|||
continue;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
if (tree->ops && tree->ops->readpage_io_hook) {
|
||||
ret = tree->ops->readpage_io_hook(page, cur,
|
||||
cur + iosize - 1);
|
||||
}
|
||||
if (!ret) {
|
||||
unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
|
||||
pnr -= page->index;
|
||||
ret = submit_extent_page(READ, tree, page,
|
||||
pnr -= page->index;
|
||||
ret = submit_extent_page(READ, tree, page,
|
||||
sector, disk_io_size, pg_offset,
|
||||
bdev, bio, pnr,
|
||||
end_bio_extent_readpage, mirror_num,
|
||||
*bio_flags,
|
||||
this_bio_flag);
|
||||
if (!ret) {
|
||||
nr++;
|
||||
*bio_flags = this_bio_flag;
|
||||
}
|
||||
}
|
||||
if (ret) {
|
||||
if (!ret) {
|
||||
nr++;
|
||||
*bio_flags = this_bio_flag;
|
||||
} else {
|
||||
SetPageError(page);
|
||||
unlock_extent(tree, cur, cur + iosize - 1);
|
||||
}
|
||||
|
@ -2806,7 +2800,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
|
|||
struct inode *inode = page->mapping->host;
|
||||
struct extent_page_data *epd = data;
|
||||
struct extent_io_tree *tree = epd->tree;
|
||||
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
|
||||
u64 start = page_offset(page);
|
||||
u64 delalloc_start;
|
||||
u64 page_end = start + PAGE_CACHE_SIZE - 1;
|
||||
u64 end;
|
||||
|
@ -2982,7 +2976,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
|
|||
BUG_ON(extent_map_end(em) <= cur);
|
||||
BUG_ON(end < cur);
|
||||
iosize = min(extent_map_end(em) - cur, end - cur + 1);
|
||||
iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
|
||||
iosize = ALIGN(iosize, blocksize);
|
||||
sector = (em->block_start + extent_offset) >> 9;
|
||||
bdev = em->bdev;
|
||||
block_start = em->block_start;
|
||||
|
@ -3124,12 +3118,9 @@ static int lock_extent_buffer_for_io(struct extent_buffer *eb,
|
|||
set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
|
||||
spin_unlock(&eb->refs_lock);
|
||||
btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
|
||||
spin_lock(&fs_info->delalloc_lock);
|
||||
if (fs_info->dirty_metadata_bytes >= eb->len)
|
||||
fs_info->dirty_metadata_bytes -= eb->len;
|
||||
else
|
||||
WARN_ON(1);
|
||||
spin_unlock(&fs_info->delalloc_lock);
|
||||
__percpu_counter_add(&fs_info->dirty_metadata_bytes,
|
||||
-eb->len,
|
||||
fs_info->dirty_metadata_batch);
|
||||
ret = 1;
|
||||
} else {
|
||||
spin_unlock(&eb->refs_lock);
|
||||
|
@ -3446,15 +3437,9 @@ retry:
|
|||
* swizzled back from swapper_space to tmpfs file
|
||||
* mapping
|
||||
*/
|
||||
if (tree->ops &&
|
||||
tree->ops->write_cache_pages_lock_hook) {
|
||||
tree->ops->write_cache_pages_lock_hook(page,
|
||||
data, flush_fn);
|
||||
} else {
|
||||
if (!trylock_page(page)) {
|
||||
flush_fn(data);
|
||||
lock_page(page);
|
||||
}
|
||||
if (!trylock_page(page)) {
|
||||
flush_fn(data);
|
||||
lock_page(page);
|
||||
}
|
||||
|
||||
if (unlikely(page->mapping != mapping)) {
|
||||
|
@ -3674,11 +3659,11 @@ int extent_invalidatepage(struct extent_io_tree *tree,
|
|||
struct page *page, unsigned long offset)
|
||||
{
|
||||
struct extent_state *cached_state = NULL;
|
||||
u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
|
||||
u64 start = page_offset(page);
|
||||
u64 end = start + PAGE_CACHE_SIZE - 1;
|
||||
size_t blocksize = page->mapping->host->i_sb->s_blocksize;
|
||||
|
||||
start += (offset + blocksize - 1) & ~(blocksize - 1);
|
||||
start += ALIGN(offset, blocksize);
|
||||
if (start > end)
|
||||
return 0;
|
||||
|
||||
|
@ -3700,7 +3685,7 @@ int try_release_extent_state(struct extent_map_tree *map,
|
|||
struct extent_io_tree *tree, struct page *page,
|
||||
gfp_t mask)
|
||||
{
|
||||
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
|
||||
u64 start = page_offset(page);
|
||||
u64 end = start + PAGE_CACHE_SIZE - 1;
|
||||
int ret = 1;
|
||||
|
||||
|
@ -3739,7 +3724,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
|
|||
gfp_t mask)
|
||||
{
|
||||
struct extent_map *em;
|
||||
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
|
||||
u64 start = page_offset(page);
|
||||
u64 end = start + PAGE_CACHE_SIZE - 1;
|
||||
|
||||
if ((mask & __GFP_WAIT) &&
|
||||
|
@ -3797,7 +3782,7 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
|
|||
len = last - offset;
|
||||
if (len == 0)
|
||||
break;
|
||||
len = (len + sectorsize - 1) & ~(sectorsize - 1);
|
||||
len = ALIGN(len, sectorsize);
|
||||
em = get_extent(inode, NULL, 0, offset, len, 0);
|
||||
if (IS_ERR_OR_NULL(em))
|
||||
return em;
|
||||
|
@ -3995,8 +3980,6 @@ static void __free_extent_buffer(struct extent_buffer *eb)
|
|||
list_del(&eb->leak_list);
|
||||
spin_unlock_irqrestore(&leak_lock, flags);
|
||||
#endif
|
||||
if (eb->pages && eb->pages != eb->inline_pages)
|
||||
kfree(eb->pages);
|
||||
kmem_cache_free(extent_buffer_cache, eb);
|
||||
}
|
||||
|
||||
|
@ -4037,19 +4020,12 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
|
|||
atomic_set(&eb->refs, 1);
|
||||
atomic_set(&eb->io_pages, 0);
|
||||
|
||||
if (len > MAX_INLINE_EXTENT_BUFFER_SIZE) {
|
||||
struct page **pages;
|
||||
int num_pages = (len + PAGE_CACHE_SIZE - 1) >>
|
||||
PAGE_CACHE_SHIFT;
|
||||
pages = kzalloc(num_pages, mask);
|
||||
if (!pages) {
|
||||
__free_extent_buffer(eb);
|
||||
return NULL;
|
||||
}
|
||||
eb->pages = pages;
|
||||
} else {
|
||||
eb->pages = eb->inline_pages;
|
||||
}
|
||||
/*
|
||||
* Sanity checks, currently the maximum is 64k covered by 16x 4k pages
|
||||
*/
|
||||
BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
|
||||
> MAX_INLINE_EXTENT_BUFFER_SIZE);
|
||||
BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
|
||||
|
||||
return eb;
|
||||
}
|
||||
|
@ -4180,6 +4156,7 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
|
|||
|
||||
static void check_buffer_tree_ref(struct extent_buffer *eb)
|
||||
{
|
||||
int refs;
|
||||
/* the ref bit is tricky. We have to make sure it is set
|
||||
* if we have the buffer dirty. Otherwise the
|
||||
* code to free a buffer can end up dropping a dirty
|
||||
|
@ -4200,6 +4177,10 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
|
|||
* So bump the ref count first, then set the bit. If someone
|
||||
* beat us to it, drop the ref we added.
|
||||
*/
|
||||
refs = atomic_read(&eb->refs);
|
||||
if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
|
||||
return;
|
||||
|
||||
spin_lock(&eb->refs_lock);
|
||||
if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
|
||||
atomic_inc(&eb->refs);
|
||||
|
@ -4401,9 +4382,20 @@ static int release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
|
|||
|
||||
void free_extent_buffer(struct extent_buffer *eb)
|
||||
{
|
||||
int refs;
|
||||
int old;
|
||||
if (!eb)
|
||||
return;
|
||||
|
||||
while (1) {
|
||||
refs = atomic_read(&eb->refs);
|
||||
if (refs <= 3)
|
||||
break;
|
||||
old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
|
||||
if (old == refs)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock(&eb->refs_lock);
|
||||
if (atomic_read(&eb->refs) == 2 &&
|
||||
test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
|
||||
|
|
|
@ -72,10 +72,9 @@ struct extent_io_ops {
|
|||
int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
|
||||
int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
|
||||
extent_submit_bio_hook_t *submit_bio_hook;
|
||||
int (*merge_bio_hook)(struct page *page, unsigned long offset,
|
||||
int (*merge_bio_hook)(int rw, struct page *page, unsigned long offset,
|
||||
size_t size, struct bio *bio,
|
||||
unsigned long bio_flags);
|
||||
int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
|
||||
int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
|
||||
int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
|
||||
struct extent_state *state, int mirror);
|
||||
|
@ -90,8 +89,6 @@ struct extent_io_ops {
|
|||
struct extent_state *other);
|
||||
void (*split_extent_hook)(struct inode *inode,
|
||||
struct extent_state *orig, u64 split);
|
||||
int (*write_cache_pages_lock_hook)(struct page *page, void *data,
|
||||
void (*flush_fn)(void *));
|
||||
};
|
||||
|
||||
struct extent_io_tree {
|
||||
|
@ -161,8 +158,7 @@ struct extent_buffer {
|
|||
*/
|
||||
wait_queue_head_t read_lock_wq;
|
||||
wait_queue_head_t lock_wq;
|
||||
struct page *inline_pages[INLINE_EXTENT_BUFFER_PAGES];
|
||||
struct page **pages;
|
||||
struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
|
||||
};
|
||||
|
||||
static inline void extent_set_compress_type(unsigned long *bio_flags,
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
#include <linux/err.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include "ctree.h"
|
||||
|
|
|
@ -684,6 +684,24 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static u64 btrfs_sector_sum_left(struct btrfs_ordered_sum *sums,
|
||||
struct btrfs_sector_sum *sector_sum,
|
||||
u64 total_bytes, u64 sectorsize)
|
||||
{
|
||||
u64 tmp = sectorsize;
|
||||
u64 next_sector = sector_sum->bytenr;
|
||||
struct btrfs_sector_sum *next = sector_sum + 1;
|
||||
|
||||
while ((tmp + total_bytes) < sums->len) {
|
||||
if (next_sector + sectorsize != next->bytenr)
|
||||
break;
|
||||
tmp += sectorsize;
|
||||
next_sector = next->bytenr;
|
||||
next++;
|
||||
}
|
||||
return tmp;
|
||||
}
|
||||
|
||||
int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_ordered_sum *sums)
|
||||
|
@ -789,20 +807,32 @@ again:
|
|||
goto insert;
|
||||
}
|
||||
|
||||
if (csum_offset >= btrfs_item_size_nr(leaf, path->slots[0]) /
|
||||
if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
|
||||
csum_size) {
|
||||
u32 diff = (csum_offset + 1) * csum_size;
|
||||
int extend_nr;
|
||||
u64 tmp;
|
||||
u32 diff;
|
||||
u32 free_space;
|
||||
|
||||
/*
|
||||
* is the item big enough already? we dropped our lock
|
||||
* before and need to recheck
|
||||
*/
|
||||
if (diff < btrfs_item_size_nr(leaf, path->slots[0]))
|
||||
goto csum;
|
||||
if (btrfs_leaf_free_space(root, leaf) <
|
||||
sizeof(struct btrfs_item) + csum_size * 2)
|
||||
goto insert;
|
||||
|
||||
free_space = btrfs_leaf_free_space(root, leaf) -
|
||||
sizeof(struct btrfs_item) - csum_size;
|
||||
tmp = btrfs_sector_sum_left(sums, sector_sum, total_bytes,
|
||||
root->sectorsize);
|
||||
tmp >>= root->fs_info->sb->s_blocksize_bits;
|
||||
WARN_ON(tmp < 1);
|
||||
|
||||
extend_nr = max_t(int, 1, (int)tmp);
|
||||
diff = (csum_offset + extend_nr) * csum_size;
|
||||
diff = min(diff, MAX_CSUM_ITEMS(root, csum_size) * csum_size);
|
||||
|
||||
diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
|
||||
if (diff != csum_size)
|
||||
goto insert;
|
||||
diff = min(free_space, diff);
|
||||
diff /= csum_size;
|
||||
diff *= csum_size;
|
||||
|
||||
btrfs_extend_item(trans, root, path, diff);
|
||||
goto csum;
|
||||
|
@ -812,19 +842,14 @@ insert:
|
|||
btrfs_release_path(path);
|
||||
csum_offset = 0;
|
||||
if (found_next) {
|
||||
u64 tmp = total_bytes + root->sectorsize;
|
||||
u64 next_sector = sector_sum->bytenr;
|
||||
struct btrfs_sector_sum *next = sector_sum + 1;
|
||||
u64 tmp;
|
||||
|
||||
while (tmp < sums->len) {
|
||||
if (next_sector + root->sectorsize != next->bytenr)
|
||||
break;
|
||||
tmp += root->sectorsize;
|
||||
next_sector = next->bytenr;
|
||||
next++;
|
||||
}
|
||||
tmp = min(tmp, next_offset - file_key.offset);
|
||||
tmp = btrfs_sector_sum_left(sums, sector_sum, total_bytes,
|
||||
root->sectorsize);
|
||||
tmp >>= root->fs_info->sb->s_blocksize_bits;
|
||||
tmp = min(tmp, (next_offset - file_key.offset) >>
|
||||
root->fs_info->sb->s_blocksize_bits);
|
||||
|
||||
tmp = max((u64)1, tmp);
|
||||
tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size));
|
||||
ins_size = csum_size * tmp;
|
||||
|
|
|
@ -30,11 +30,11 @@
|
|||
#include <linux/statfs.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/btrfs.h>
|
||||
#include "ctree.h"
|
||||
#include "disk-io.h"
|
||||
#include "transaction.h"
|
||||
#include "btrfs_inode.h"
|
||||
#include "ioctl.h"
|
||||
#include "print-tree.h"
|
||||
#include "tree-log.h"
|
||||
#include "locking.h"
|
||||
|
@ -374,6 +374,11 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
|
|||
|
||||
atomic_inc(&fs_info->defrag_running);
|
||||
while(1) {
|
||||
/* Pause the auto defragger. */
|
||||
if (test_bit(BTRFS_FS_STATE_REMOUNTING,
|
||||
&fs_info->fs_state))
|
||||
break;
|
||||
|
||||
if (!__need_auto_defrag(fs_info->tree_root))
|
||||
break;
|
||||
|
||||
|
@ -505,8 +510,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
|
|||
loff_t isize = i_size_read(inode);
|
||||
|
||||
start_pos = pos & ~((u64)root->sectorsize - 1);
|
||||
num_bytes = (write_bytes + pos - start_pos +
|
||||
root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
|
||||
num_bytes = ALIGN(write_bytes + pos - start_pos, root->sectorsize);
|
||||
|
||||
end_of_last_block = start_pos + num_bytes - 1;
|
||||
err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
|
||||
|
@ -1544,7 +1548,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
|
|||
* although we have opened a file as writable, we have
|
||||
* to stop this write operation to ensure FS consistency.
|
||||
*/
|
||||
if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
|
||||
if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
err = -EROFS;
|
||||
goto out;
|
||||
|
@ -1627,7 +1631,20 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
|
|||
*/
|
||||
if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
|
||||
&BTRFS_I(inode)->runtime_flags)) {
|
||||
btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
|
||||
struct btrfs_trans_handle *trans;
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
|
||||
/*
|
||||
* We need to block on a committing transaction to keep us from
|
||||
* throwing a ordered operation on to the list and causing
|
||||
* something like sync to deadlock trying to flush out this
|
||||
* inode.
|
||||
*/
|
||||
trans = btrfs_start_transaction(root, 0);
|
||||
if (IS_ERR(trans))
|
||||
return PTR_ERR(trans);
|
||||
btrfs_add_ordered_operation(trans, BTRFS_I(inode)->root, inode);
|
||||
btrfs_end_transaction(trans, root);
|
||||
if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
|
||||
filemap_flush(inode->i_mapping);
|
||||
}
|
||||
|
@ -1654,16 +1671,21 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
int ret = 0;
|
||||
struct btrfs_trans_handle *trans;
|
||||
bool full_sync = 0;
|
||||
|
||||
trace_btrfs_sync_file(file, datasync);
|
||||
|
||||
/*
|
||||
* We write the dirty pages in the range and wait until they complete
|
||||
* out of the ->i_mutex. If so, we can flush the dirty pages by
|
||||
* multi-task, and make the performance up.
|
||||
* multi-task, and make the performance up. See
|
||||
* btrfs_wait_ordered_range for an explanation of the ASYNC check.
|
||||
*/
|
||||
atomic_inc(&BTRFS_I(inode)->sync_writers);
|
||||
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
|
||||
ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
|
||||
if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
|
||||
&BTRFS_I(inode)->runtime_flags))
|
||||
ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
|
||||
atomic_dec(&BTRFS_I(inode)->sync_writers);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1675,7 +1697,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
* range being left.
|
||||
*/
|
||||
atomic_inc(&root->log_batch);
|
||||
btrfs_wait_ordered_range(inode, start, end - start + 1);
|
||||
full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
|
||||
&BTRFS_I(inode)->runtime_flags);
|
||||
if (full_sync)
|
||||
btrfs_wait_ordered_range(inode, start, end - start + 1);
|
||||
atomic_inc(&root->log_batch);
|
||||
|
||||
/*
|
||||
|
@ -1742,13 +1767,25 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
|
||||
if (ret != BTRFS_NO_LOG_SYNC) {
|
||||
if (ret > 0) {
|
||||
/*
|
||||
* If we didn't already wait for ordered extents we need
|
||||
* to do that now.
|
||||
*/
|
||||
if (!full_sync)
|
||||
btrfs_wait_ordered_range(inode, start,
|
||||
end - start + 1);
|
||||
ret = btrfs_commit_transaction(trans, root);
|
||||
} else {
|
||||
ret = btrfs_sync_log(trans, root);
|
||||
if (ret == 0)
|
||||
if (ret == 0) {
|
||||
ret = btrfs_end_transaction(trans, root);
|
||||
else
|
||||
} else {
|
||||
if (!full_sync)
|
||||
btrfs_wait_ordered_range(inode, start,
|
||||
end -
|
||||
start + 1);
|
||||
ret = btrfs_commit_transaction(trans, root);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ret = btrfs_end_transaction(trans, root);
|
||||
|
|
|
@ -1356,6 +1356,8 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
|
|||
u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
|
||||
int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
|
||||
|
||||
max_bitmaps = max(max_bitmaps, 1);
|
||||
|
||||
BUG_ON(ctl->total_bitmaps > max_bitmaps);
|
||||
|
||||
/*
|
||||
|
@ -1463,10 +1465,14 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
|
|||
}
|
||||
|
||||
static struct btrfs_free_space *
|
||||
find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
|
||||
find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
|
||||
unsigned long align)
|
||||
{
|
||||
struct btrfs_free_space *entry;
|
||||
struct rb_node *node;
|
||||
u64 ctl_off;
|
||||
u64 tmp;
|
||||
u64 align_off;
|
||||
int ret;
|
||||
|
||||
if (!ctl->free_space_offset.rb_node)
|
||||
|
@ -1481,15 +1487,34 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
|
|||
if (entry->bytes < *bytes)
|
||||
continue;
|
||||
|
||||
/* make sure the space returned is big enough
|
||||
* to match our requested alignment
|
||||
*/
|
||||
if (*bytes >= align) {
|
||||
ctl_off = entry->offset - ctl->start;
|
||||
tmp = ctl_off + align - 1;;
|
||||
do_div(tmp, align);
|
||||
tmp = tmp * align + ctl->start;
|
||||
align_off = tmp - entry->offset;
|
||||
} else {
|
||||
align_off = 0;
|
||||
tmp = entry->offset;
|
||||
}
|
||||
|
||||
if (entry->bytes < *bytes + align_off)
|
||||
continue;
|
||||
|
||||
if (entry->bitmap) {
|
||||
ret = search_bitmap(ctl, entry, offset, bytes);
|
||||
if (!ret)
|
||||
ret = search_bitmap(ctl, entry, &tmp, bytes);
|
||||
if (!ret) {
|
||||
*offset = tmp;
|
||||
return entry;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
*offset = entry->offset;
|
||||
*bytes = entry->bytes;
|
||||
*offset = tmp;
|
||||
*bytes = entry->bytes - align_off;
|
||||
return entry;
|
||||
}
|
||||
|
||||
|
@ -1636,10 +1661,14 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
|
|||
}
|
||||
|
||||
/*
|
||||
* some block groups are so tiny they can't be enveloped by a bitmap, so
|
||||
* don't even bother to create a bitmap for this
|
||||
* The original block groups from mkfs can be really small, like 8
|
||||
* megabytes, so don't bother with a bitmap for those entries. However
|
||||
* some block groups can be smaller than what a bitmap would cover but
|
||||
* are still large enough that they could overflow the 32k memory limit,
|
||||
* so allow those block groups to still be allowed to have a bitmap
|
||||
* entry.
|
||||
*/
|
||||
if (BITS_PER_BITMAP * ctl->unit > block_group->key.offset)
|
||||
if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
@ -2095,9 +2124,12 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
|
|||
struct btrfs_free_space *entry = NULL;
|
||||
u64 bytes_search = bytes + empty_size;
|
||||
u64 ret = 0;
|
||||
u64 align_gap = 0;
|
||||
u64 align_gap_len = 0;
|
||||
|
||||
spin_lock(&ctl->tree_lock);
|
||||
entry = find_free_space(ctl, &offset, &bytes_search);
|
||||
entry = find_free_space(ctl, &offset, &bytes_search,
|
||||
block_group->full_stripe_len);
|
||||
if (!entry)
|
||||
goto out;
|
||||
|
||||
|
@ -2107,9 +2139,15 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
|
|||
if (!entry->bytes)
|
||||
free_bitmap(ctl, entry);
|
||||
} else {
|
||||
|
||||
unlink_free_space(ctl, entry);
|
||||
entry->offset += bytes;
|
||||
entry->bytes -= bytes;
|
||||
align_gap_len = offset - entry->offset;
|
||||
align_gap = entry->offset;
|
||||
|
||||
entry->offset = offset + bytes;
|
||||
WARN_ON(entry->bytes < bytes + align_gap_len);
|
||||
|
||||
entry->bytes -= bytes + align_gap_len;
|
||||
if (!entry->bytes)
|
||||
kmem_cache_free(btrfs_free_space_cachep, entry);
|
||||
else
|
||||
|
@ -2119,6 +2157,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
|
|||
out:
|
||||
spin_unlock(&ctl->tree_lock);
|
||||
|
||||
if (align_gap_len)
|
||||
__btrfs_add_free_space(ctl, align_gap, align_gap_len);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
1064
fs/btrfs/inode.c
1064
fs/btrfs/inode.c
File diff suppressed because it is too large
Load diff
211
fs/btrfs/ioctl.c
211
fs/btrfs/ioctl.c
|
@ -42,12 +42,12 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/uuid.h>
|
||||
#include <linux/btrfs.h>
|
||||
#include "compat.h"
|
||||
#include "ctree.h"
|
||||
#include "disk-io.h"
|
||||
#include "transaction.h"
|
||||
#include "btrfs_inode.h"
|
||||
#include "ioctl.h"
|
||||
#include "print-tree.h"
|
||||
#include "volumes.h"
|
||||
#include "locking.h"
|
||||
|
@ -363,46 +363,52 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static noinline int create_subvol(struct btrfs_root *root,
|
||||
static noinline int create_subvol(struct inode *dir,
|
||||
struct dentry *dentry,
|
||||
char *name, int namelen,
|
||||
u64 *async_transid,
|
||||
struct btrfs_qgroup_inherit **inherit)
|
||||
struct btrfs_qgroup_inherit *inherit)
|
||||
{
|
||||
struct btrfs_trans_handle *trans;
|
||||
struct btrfs_key key;
|
||||
struct btrfs_root_item root_item;
|
||||
struct btrfs_inode_item *inode_item;
|
||||
struct extent_buffer *leaf;
|
||||
struct btrfs_root *root = BTRFS_I(dir)->root;
|
||||
struct btrfs_root *new_root;
|
||||
struct dentry *parent = dentry->d_parent;
|
||||
struct inode *dir;
|
||||
struct btrfs_block_rsv block_rsv;
|
||||
struct timespec cur_time = CURRENT_TIME;
|
||||
int ret;
|
||||
int err;
|
||||
u64 objectid;
|
||||
u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
|
||||
u64 index = 0;
|
||||
u64 qgroup_reserved;
|
||||
uuid_le new_uuid;
|
||||
|
||||
ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dir = parent->d_inode;
|
||||
|
||||
btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
|
||||
/*
|
||||
* 1 - inode item
|
||||
* 2 - refs
|
||||
* 1 - root item
|
||||
* 2 - dir items
|
||||
* The same as the snapshot creation, please see the comment
|
||||
* of create_snapshot().
|
||||
*/
|
||||
trans = btrfs_start_transaction(root, 6);
|
||||
if (IS_ERR(trans))
|
||||
return PTR_ERR(trans);
|
||||
ret = btrfs_subvolume_reserve_metadata(root, &block_rsv,
|
||||
7, &qgroup_reserved);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = btrfs_qgroup_inherit(trans, root->fs_info, 0, objectid,
|
||||
inherit ? *inherit : NULL);
|
||||
trans = btrfs_start_transaction(root, 0);
|
||||
if (IS_ERR(trans)) {
|
||||
ret = PTR_ERR(trans);
|
||||
goto out;
|
||||
}
|
||||
trans->block_rsv = &block_rsv;
|
||||
trans->bytes_reserved = block_rsv.size;
|
||||
|
||||
ret = btrfs_qgroup_inherit(trans, root->fs_info, 0, objectid, inherit);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
|
@ -516,6 +522,8 @@ static noinline int create_subvol(struct btrfs_root *root,
|
|||
BUG_ON(ret);
|
||||
|
||||
fail:
|
||||
trans->block_rsv = NULL;
|
||||
trans->bytes_reserved = 0;
|
||||
if (async_transid) {
|
||||
*async_transid = trans->transid;
|
||||
err = btrfs_commit_transaction_async(trans, root, 1);
|
||||
|
@ -527,13 +535,15 @@ fail:
|
|||
|
||||
if (!ret)
|
||||
d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry));
|
||||
|
||||
out:
|
||||
btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
|
||||
char *name, int namelen, u64 *async_transid,
|
||||
bool readonly, struct btrfs_qgroup_inherit **inherit)
|
||||
static int create_snapshot(struct btrfs_root *root, struct inode *dir,
|
||||
struct dentry *dentry, char *name, int namelen,
|
||||
u64 *async_transid, bool readonly,
|
||||
struct btrfs_qgroup_inherit *inherit)
|
||||
{
|
||||
struct inode *inode;
|
||||
struct btrfs_pending_snapshot *pending_snapshot;
|
||||
|
@ -549,23 +559,31 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
|
|||
|
||||
btrfs_init_block_rsv(&pending_snapshot->block_rsv,
|
||||
BTRFS_BLOCK_RSV_TEMP);
|
||||
/*
|
||||
* 1 - parent dir inode
|
||||
* 2 - dir entries
|
||||
* 1 - root item
|
||||
* 2 - root ref/backref
|
||||
* 1 - root of snapshot
|
||||
*/
|
||||
ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
|
||||
&pending_snapshot->block_rsv, 7,
|
||||
&pending_snapshot->qgroup_reserved);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
pending_snapshot->dentry = dentry;
|
||||
pending_snapshot->root = root;
|
||||
pending_snapshot->readonly = readonly;
|
||||
if (inherit) {
|
||||
pending_snapshot->inherit = *inherit;
|
||||
*inherit = NULL; /* take responsibility to free it */
|
||||
}
|
||||
pending_snapshot->dir = dir;
|
||||
pending_snapshot->inherit = inherit;
|
||||
|
||||
trans = btrfs_start_transaction(root->fs_info->extent_root, 6);
|
||||
trans = btrfs_start_transaction(root, 0);
|
||||
if (IS_ERR(trans)) {
|
||||
ret = PTR_ERR(trans);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = btrfs_snap_reserve_metadata(trans, pending_snapshot);
|
||||
BUG_ON(ret);
|
||||
|
||||
spin_lock(&root->fs_info->trans_lock);
|
||||
list_add(&pending_snapshot->list,
|
||||
&trans->transaction->pending_snapshots);
|
||||
|
@ -602,6 +620,10 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
|
|||
d_instantiate(dentry, inode);
|
||||
ret = 0;
|
||||
fail:
|
||||
btrfs_subvolume_release_metadata(BTRFS_I(dir)->root,
|
||||
&pending_snapshot->block_rsv,
|
||||
pending_snapshot->qgroup_reserved);
|
||||
out:
|
||||
kfree(pending_snapshot);
|
||||
return ret;
|
||||
}
|
||||
|
@ -695,7 +717,7 @@ static noinline int btrfs_mksubvol(struct path *parent,
|
|||
char *name, int namelen,
|
||||
struct btrfs_root *snap_src,
|
||||
u64 *async_transid, bool readonly,
|
||||
struct btrfs_qgroup_inherit **inherit)
|
||||
struct btrfs_qgroup_inherit *inherit)
|
||||
{
|
||||
struct inode *dir = parent->dentry->d_inode;
|
||||
struct dentry *dentry;
|
||||
|
@ -732,11 +754,11 @@ static noinline int btrfs_mksubvol(struct path *parent,
|
|||
goto out_up_read;
|
||||
|
||||
if (snap_src) {
|
||||
error = create_snapshot(snap_src, dentry, name, namelen,
|
||||
error = create_snapshot(snap_src, dir, dentry, name, namelen,
|
||||
async_transid, readonly, inherit);
|
||||
} else {
|
||||
error = create_subvol(BTRFS_I(dir)->root, dentry,
|
||||
name, namelen, async_transid, inherit);
|
||||
error = create_subvol(dir, dentry, name, namelen,
|
||||
async_transid, inherit);
|
||||
}
|
||||
if (!error)
|
||||
fsnotify_mkdir(dir, dentry);
|
||||
|
@ -818,7 +840,7 @@ static int find_new_extents(struct btrfs_root *root,
|
|||
|
||||
while(1) {
|
||||
ret = btrfs_search_forward(root, &min_key, &max_key,
|
||||
path, 0, newer_than);
|
||||
path, newer_than);
|
||||
if (ret != 0)
|
||||
goto none;
|
||||
if (min_key.objectid != ino)
|
||||
|
@ -1206,6 +1228,12 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
|
|||
if (!(inode->i_sb->s_flags & MS_ACTIVE))
|
||||
break;
|
||||
|
||||
if (btrfs_defrag_cancelled(root->fs_info)) {
|
||||
printk(KERN_DEBUG "btrfs: defrag_file cancelled\n");
|
||||
ret = -EAGAIN;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
|
||||
extent_thresh, &last_len, &skip,
|
||||
&defrag_end, range->flags &
|
||||
|
@ -1329,9 +1357,6 @@ static noinline int btrfs_ioctl_resize(struct file *file,
|
|||
int ret = 0;
|
||||
int mod = 0;
|
||||
|
||||
if (root->fs_info->sb->s_flags & MS_RDONLY)
|
||||
return -EROFS;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
|
@ -1363,6 +1388,10 @@ static noinline int btrfs_ioctl_resize(struct file *file,
|
|||
*devstr = '\0';
|
||||
devstr = vol_args->name;
|
||||
devid = simple_strtoull(devstr, &end, 10);
|
||||
if (!devid) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
printk(KERN_INFO "btrfs: resizing devid %llu\n",
|
||||
(unsigned long long)devid);
|
||||
}
|
||||
|
@ -1371,7 +1400,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
|
|||
if (!device) {
|
||||
printk(KERN_INFO "btrfs: resizer unable to find device %llu\n",
|
||||
(unsigned long long)devid);
|
||||
ret = -EINVAL;
|
||||
ret = -ENODEV;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
|
@ -1379,7 +1408,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
|
|||
printk(KERN_INFO "btrfs: resizer unable to apply on "
|
||||
"readonly device %llu\n",
|
||||
(unsigned long long)devid);
|
||||
ret = -EINVAL;
|
||||
ret = -EPERM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
|
@ -1401,7 +1430,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
|
|||
}
|
||||
|
||||
if (device->is_tgtdev_for_dev_replace) {
|
||||
ret = -EINVAL;
|
||||
ret = -EPERM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
|
@ -1457,7 +1486,7 @@ out:
|
|||
static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
|
||||
char *name, unsigned long fd, int subvol,
|
||||
u64 *transid, bool readonly,
|
||||
struct btrfs_qgroup_inherit **inherit)
|
||||
struct btrfs_qgroup_inherit *inherit)
|
||||
{
|
||||
int namelen;
|
||||
int ret = 0;
|
||||
|
@ -1566,7 +1595,7 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
|
|||
|
||||
ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
|
||||
vol_args->fd, subvol, ptr,
|
||||
readonly, &inherit);
|
||||
readonly, inherit);
|
||||
|
||||
if (ret == 0 && ptr &&
|
||||
copy_to_user(arg +
|
||||
|
@ -1863,7 +1892,7 @@ static noinline int search_ioctl(struct inode *inode,
|
|||
path->keep_locks = 1;
|
||||
|
||||
while(1) {
|
||||
ret = btrfs_search_forward(root, &key, &max_key, path, 0,
|
||||
ret = btrfs_search_forward(root, &key, &max_key, path,
|
||||
sk->min_transid);
|
||||
if (ret != 0) {
|
||||
if (ret > 0)
|
||||
|
@ -2035,6 +2064,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
|
|||
struct btrfs_root *dest = NULL;
|
||||
struct btrfs_ioctl_vol_args *vol_args;
|
||||
struct btrfs_trans_handle *trans;
|
||||
struct btrfs_block_rsv block_rsv;
|
||||
u64 qgroup_reserved;
|
||||
int namelen;
|
||||
int ret;
|
||||
int err = 0;
|
||||
|
@ -2124,12 +2155,23 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
|
|||
if (err)
|
||||
goto out_up_write;
|
||||
|
||||
btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
|
||||
/*
|
||||
* One for dir inode, two for dir entries, two for root
|
||||
* ref/backref.
|
||||
*/
|
||||
err = btrfs_subvolume_reserve_metadata(root, &block_rsv,
|
||||
5, &qgroup_reserved);
|
||||
if (err)
|
||||
goto out_up_write;
|
||||
|
||||
trans = btrfs_start_transaction(root, 0);
|
||||
if (IS_ERR(trans)) {
|
||||
err = PTR_ERR(trans);
|
||||
goto out_up_write;
|
||||
goto out_release;
|
||||
}
|
||||
trans->block_rsv = &root->fs_info->global_block_rsv;
|
||||
trans->block_rsv = &block_rsv;
|
||||
trans->bytes_reserved = block_rsv.size;
|
||||
|
||||
ret = btrfs_unlink_subvol(trans, root, dir,
|
||||
dest->root_key.objectid,
|
||||
|
@ -2159,10 +2201,14 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
|
|||
}
|
||||
}
|
||||
out_end_trans:
|
||||
trans->block_rsv = NULL;
|
||||
trans->bytes_reserved = 0;
|
||||
ret = btrfs_end_transaction(trans, root);
|
||||
if (ret && !err)
|
||||
err = ret;
|
||||
inode->i_flags |= S_DEAD;
|
||||
out_release:
|
||||
btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
|
||||
out_up_write:
|
||||
up_write(&root->fs_info->subvol_sem);
|
||||
out_unlock:
|
||||
|
@ -2171,6 +2217,12 @@ out_unlock:
|
|||
shrink_dcache_sb(root->fs_info->sb);
|
||||
btrfs_invalidate_inodes(dest);
|
||||
d_delete(dentry);
|
||||
|
||||
/* the last ref */
|
||||
if (dest->cache_inode) {
|
||||
iput(dest->cache_inode);
|
||||
dest->cache_inode = NULL;
|
||||
}
|
||||
}
|
||||
out_dput:
|
||||
dput(dentry);
|
||||
|
@ -2211,10 +2263,10 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
|
|||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
ret = btrfs_defrag_root(root, 0);
|
||||
ret = btrfs_defrag_root(root);
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = btrfs_defrag_root(root->fs_info->extent_root, 0);
|
||||
ret = btrfs_defrag_root(root->fs_info->extent_root);
|
||||
break;
|
||||
case S_IFREG:
|
||||
if (!(file->f_mode & FMODE_WRITE)) {
|
||||
|
@ -3111,7 +3163,7 @@ static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
|
|||
u64 transid;
|
||||
int ret;
|
||||
|
||||
trans = btrfs_attach_transaction(root);
|
||||
trans = btrfs_attach_transaction_barrier(root);
|
||||
if (IS_ERR(trans)) {
|
||||
if (PTR_ERR(trans) != -ENOENT)
|
||||
return PTR_ERR(trans);
|
||||
|
@ -3289,7 +3341,7 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
|
|||
struct inode_fs_paths *ipath = NULL;
|
||||
struct btrfs_path *path;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
if (!capable(CAP_DAC_READ_SEARCH))
|
||||
return -EPERM;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
|
@ -3914,6 +3966,65 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg)
|
||||
{
|
||||
struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
|
||||
const char *label = root->fs_info->super_copy->label;
|
||||
size_t len = strnlen(label, BTRFS_LABEL_SIZE);
|
||||
int ret;
|
||||
|
||||
if (len == BTRFS_LABEL_SIZE) {
|
||||
pr_warn("btrfs: label is too long, return the first %zu bytes\n",
|
||||
--len);
|
||||
}
|
||||
|
||||
mutex_lock(&root->fs_info->volume_mutex);
|
||||
ret = copy_to_user(arg, label, len);
|
||||
mutex_unlock(&root->fs_info->volume_mutex);
|
||||
|
||||
return ret ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
|
||||
{
|
||||
struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
|
||||
struct btrfs_super_block *super_block = root->fs_info->super_copy;
|
||||
struct btrfs_trans_handle *trans;
|
||||
char label[BTRFS_LABEL_SIZE];
|
||||
int ret;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (copy_from_user(label, arg, sizeof(label)))
|
||||
return -EFAULT;
|
||||
|
||||
if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
|
||||
pr_err("btrfs: unable to set label with more than %d bytes\n",
|
||||
BTRFS_LABEL_SIZE - 1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = mnt_want_write_file(file);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&root->fs_info->volume_mutex);
|
||||
trans = btrfs_start_transaction(root, 0);
|
||||
if (IS_ERR(trans)) {
|
||||
ret = PTR_ERR(trans);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
strcpy(super_block->label, label);
|
||||
ret = btrfs_end_transaction(trans, root);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&root->fs_info->volume_mutex);
|
||||
mnt_drop_write_file(file);
|
||||
return ret;
|
||||
}
|
||||
|
||||
long btrfs_ioctl(struct file *file, unsigned int
|
||||
cmd, unsigned long arg)
|
||||
{
|
||||
|
@ -4014,6 +4125,10 @@ long btrfs_ioctl(struct file *file, unsigned int
|
|||
return btrfs_ioctl_qgroup_limit(file, argp);
|
||||
case BTRFS_IOC_DEV_REPLACE:
|
||||
return btrfs_ioctl_dev_replace(root, argp);
|
||||
case BTRFS_IOC_GET_FSLABEL:
|
||||
return btrfs_ioctl_get_fslabel(file, argp);
|
||||
case BTRFS_IOC_SET_FSLABEL:
|
||||
return btrfs_ioctl_set_fslabel(file, argp);
|
||||
}
|
||||
|
||||
return -ENOTTY;
|
||||
|
|
|
@ -113,11 +113,10 @@ again:
|
|||
read_unlock(&eb->lock);
|
||||
return;
|
||||
}
|
||||
read_unlock(&eb->lock);
|
||||
wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
|
||||
read_lock(&eb->lock);
|
||||
if (atomic_read(&eb->blocking_writers)) {
|
||||
read_unlock(&eb->lock);
|
||||
wait_event(eb->write_lock_wq,
|
||||
atomic_read(&eb->blocking_writers) == 0);
|
||||
goto again;
|
||||
}
|
||||
atomic_inc(&eb->read_locks);
|
||||
|
|
|
@ -196,6 +196,9 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
|
|||
entry->file_offset = file_offset;
|
||||
entry->start = start;
|
||||
entry->len = len;
|
||||
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) &&
|
||||
!(type == BTRFS_ORDERED_NOCOW))
|
||||
entry->csum_bytes_left = disk_len;
|
||||
entry->disk_len = disk_len;
|
||||
entry->bytes_left = len;
|
||||
entry->inode = igrab(inode);
|
||||
|
@ -213,6 +216,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
|
|||
INIT_LIST_HEAD(&entry->root_extent_list);
|
||||
INIT_LIST_HEAD(&entry->work_list);
|
||||
init_completion(&entry->completion);
|
||||
INIT_LIST_HEAD(&entry->log_list);
|
||||
|
||||
trace_btrfs_ordered_extent_add(inode, entry);
|
||||
|
||||
|
@ -270,6 +274,10 @@ void btrfs_add_ordered_sum(struct inode *inode,
|
|||
tree = &BTRFS_I(inode)->ordered_tree;
|
||||
spin_lock_irq(&tree->lock);
|
||||
list_add_tail(&sum->list, &entry->list);
|
||||
WARN_ON(entry->csum_bytes_left < sum->len);
|
||||
entry->csum_bytes_left -= sum->len;
|
||||
if (entry->csum_bytes_left == 0)
|
||||
wake_up(&entry->wait);
|
||||
spin_unlock_irq(&tree->lock);
|
||||
}
|
||||
|
||||
|
@ -405,6 +413,66 @@ out:
|
|||
return ret == 0;
|
||||
}
|
||||
|
||||
/* Needs to either be called under a log transaction or the log_mutex */
|
||||
void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode)
|
||||
{
|
||||
struct btrfs_ordered_inode_tree *tree;
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
struct rb_node *n;
|
||||
int index = log->log_transid % 2;
|
||||
|
||||
tree = &BTRFS_I(inode)->ordered_tree;
|
||||
spin_lock_irq(&tree->lock);
|
||||
for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
|
||||
ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
|
||||
spin_lock(&log->log_extents_lock[index]);
|
||||
if (list_empty(&ordered->log_list)) {
|
||||
list_add_tail(&ordered->log_list, &log->logged_list[index]);
|
||||
atomic_inc(&ordered->refs);
|
||||
}
|
||||
spin_unlock(&log->log_extents_lock[index]);
|
||||
}
|
||||
spin_unlock_irq(&tree->lock);
|
||||
}
|
||||
|
||||
void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
|
||||
{
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
int index = transid % 2;
|
||||
|
||||
spin_lock_irq(&log->log_extents_lock[index]);
|
||||
while (!list_empty(&log->logged_list[index])) {
|
||||
ordered = list_first_entry(&log->logged_list[index],
|
||||
struct btrfs_ordered_extent,
|
||||
log_list);
|
||||
list_del_init(&ordered->log_list);
|
||||
spin_unlock_irq(&log->log_extents_lock[index]);
|
||||
wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
|
||||
&ordered->flags));
|
||||
btrfs_put_ordered_extent(ordered);
|
||||
spin_lock_irq(&log->log_extents_lock[index]);
|
||||
}
|
||||
spin_unlock_irq(&log->log_extents_lock[index]);
|
||||
}
|
||||
|
||||
void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
|
||||
{
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
int index = transid % 2;
|
||||
|
||||
spin_lock_irq(&log->log_extents_lock[index]);
|
||||
while (!list_empty(&log->logged_list[index])) {
|
||||
ordered = list_first_entry(&log->logged_list[index],
|
||||
struct btrfs_ordered_extent,
|
||||
log_list);
|
||||
list_del_init(&ordered->log_list);
|
||||
spin_unlock_irq(&log->log_extents_lock[index]);
|
||||
btrfs_put_ordered_extent(ordered);
|
||||
spin_lock_irq(&log->log_extents_lock[index]);
|
||||
}
|
||||
spin_unlock_irq(&log->log_extents_lock[index]);
|
||||
}
|
||||
|
||||
/*
|
||||
* used to drop a reference on an ordered extent. This will free
|
||||
* the extent if the last reference is dropped
|
||||
|
@ -544,10 +612,12 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
|
|||
* extra check to make sure the ordered operation list really is empty
|
||||
* before we return
|
||||
*/
|
||||
int btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
|
||||
int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, int wait)
|
||||
{
|
||||
struct btrfs_inode *btrfs_inode;
|
||||
struct inode *inode;
|
||||
struct btrfs_transaction *cur_trans = trans->transaction;
|
||||
struct list_head splice;
|
||||
struct list_head works;
|
||||
struct btrfs_delalloc_work *work, *next;
|
||||
|
@ -558,14 +628,10 @@ int btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
|
|||
|
||||
mutex_lock(&root->fs_info->ordered_operations_mutex);
|
||||
spin_lock(&root->fs_info->ordered_extent_lock);
|
||||
again:
|
||||
list_splice_init(&root->fs_info->ordered_operations, &splice);
|
||||
|
||||
list_splice_init(&cur_trans->ordered_operations, &splice);
|
||||
while (!list_empty(&splice)) {
|
||||
|
||||
btrfs_inode = list_entry(splice.next, struct btrfs_inode,
|
||||
ordered_operations);
|
||||
|
||||
inode = &btrfs_inode->vfs_inode;
|
||||
|
||||
list_del_init(&btrfs_inode->ordered_operations);
|
||||
|
@ -574,24 +640,22 @@ again:
|
|||
* the inode may be getting freed (in sys_unlink path).
|
||||
*/
|
||||
inode = igrab(inode);
|
||||
|
||||
if (!wait && inode) {
|
||||
list_add_tail(&BTRFS_I(inode)->ordered_operations,
|
||||
&root->fs_info->ordered_operations);
|
||||
}
|
||||
|
||||
if (!inode)
|
||||
continue;
|
||||
|
||||
if (!wait)
|
||||
list_add_tail(&BTRFS_I(inode)->ordered_operations,
|
||||
&cur_trans->ordered_operations);
|
||||
spin_unlock(&root->fs_info->ordered_extent_lock);
|
||||
|
||||
work = btrfs_alloc_delalloc_work(inode, wait, 1);
|
||||
if (!work) {
|
||||
spin_lock(&root->fs_info->ordered_extent_lock);
|
||||
if (list_empty(&BTRFS_I(inode)->ordered_operations))
|
||||
list_add_tail(&btrfs_inode->ordered_operations,
|
||||
&splice);
|
||||
spin_lock(&root->fs_info->ordered_extent_lock);
|
||||
list_splice_tail(&splice,
|
||||
&root->fs_info->ordered_operations);
|
||||
&cur_trans->ordered_operations);
|
||||
spin_unlock(&root->fs_info->ordered_extent_lock);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -603,9 +667,6 @@ again:
|
|||
cond_resched();
|
||||
spin_lock(&root->fs_info->ordered_extent_lock);
|
||||
}
|
||||
if (wait && !list_empty(&root->fs_info->ordered_operations))
|
||||
goto again;
|
||||
|
||||
spin_unlock(&root->fs_info->ordered_extent_lock);
|
||||
out:
|
||||
list_for_each_entry_safe(work, next, &works, list) {
|
||||
|
@ -974,6 +1035,7 @@ out:
|
|||
void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, struct inode *inode)
|
||||
{
|
||||
struct btrfs_transaction *cur_trans = trans->transaction;
|
||||
u64 last_mod;
|
||||
|
||||
last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
|
||||
|
@ -988,7 +1050,7 @@ void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
|
|||
spin_lock(&root->fs_info->ordered_extent_lock);
|
||||
if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
|
||||
list_add_tail(&BTRFS_I(inode)->ordered_operations,
|
||||
&root->fs_info->ordered_operations);
|
||||
&cur_trans->ordered_operations);
|
||||
}
|
||||
spin_unlock(&root->fs_info->ordered_extent_lock);
|
||||
}
|
||||
|
|
|
@ -79,6 +79,8 @@ struct btrfs_ordered_sum {
|
|||
#define BTRFS_ORDERED_UPDATED_ISIZE 7 /* indicates whether this ordered extent
|
||||
* has done its due diligence in updating
|
||||
* the isize. */
|
||||
#define BTRFS_ORDERED_LOGGED_CSUM 8 /* We've logged the csums on this ordered
|
||||
ordered extent */
|
||||
|
||||
struct btrfs_ordered_extent {
|
||||
/* logical offset in the file */
|
||||
|
@ -96,6 +98,9 @@ struct btrfs_ordered_extent {
|
|||
/* number of bytes that still need writing */
|
||||
u64 bytes_left;
|
||||
|
||||
/* number of bytes that still need csumming */
|
||||
u64 csum_bytes_left;
|
||||
|
||||
/*
|
||||
* the end of the ordered extent which is behind it but
|
||||
* didn't update disk_i_size. Please see the comment of
|
||||
|
@ -118,6 +123,9 @@ struct btrfs_ordered_extent {
|
|||
/* list of checksums for insertion when the extent io is done */
|
||||
struct list_head list;
|
||||
|
||||
/* If we need to wait on this to be done */
|
||||
struct list_head log_list;
|
||||
|
||||
/* used to wait for the BTRFS_ORDERED_COMPLETE bit */
|
||||
wait_queue_head_t wait;
|
||||
|
||||
|
@ -189,11 +197,15 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
|
|||
int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
|
||||
struct btrfs_ordered_extent *ordered);
|
||||
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
|
||||
int btrfs_run_ordered_operations(struct btrfs_root *root, int wait);
|
||||
int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, int wait);
|
||||
void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct inode *inode);
|
||||
void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput);
|
||||
void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode);
|
||||
void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid);
|
||||
void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid);
|
||||
int __init ordered_data_init(void);
|
||||
void ordered_data_exit(void);
|
||||
#endif
|
||||
|
|
|
@ -294,6 +294,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
|
|||
btrfs_dev_extent_chunk_offset(l, dev_extent),
|
||||
(unsigned long long)
|
||||
btrfs_dev_extent_length(l, dev_extent));
|
||||
break;
|
||||
case BTRFS_DEV_STATS_KEY:
|
||||
printk(KERN_INFO "\t\tdevice stats\n");
|
||||
break;
|
||||
|
|
|
@ -23,13 +23,13 @@
|
|||
#include <linux/rbtree.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/btrfs.h>
|
||||
|
||||
#include "ctree.h"
|
||||
#include "transaction.h"
|
||||
#include "disk-io.h"
|
||||
#include "locking.h"
|
||||
#include "ulist.h"
|
||||
#include "ioctl.h"
|
||||
#include "backref.h"
|
||||
|
||||
/* TODO XXX FIXME
|
||||
|
@ -620,7 +620,9 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
|
|||
key.offset = qgroupid;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
BUG_ON(!path);
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
|
||||
if (ret > 0)
|
||||
ret = -ENOENT;
|
||||
|
@ -661,7 +663,9 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
|
|||
key.offset = qgroup->qgroupid;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
BUG_ON(!path);
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
|
||||
if (ret > 0)
|
||||
ret = -ENOENT;
|
||||
|
@ -702,7 +706,9 @@ static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
|
|||
key.offset = 0;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
BUG_ON(!path);
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
|
||||
if (ret > 0)
|
||||
ret = -ENOENT;
|
||||
|
@ -732,33 +738,38 @@ static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
|
|||
{
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_key key;
|
||||
struct extent_buffer *leaf = NULL;
|
||||
int ret;
|
||||
|
||||
if (!root)
|
||||
return -EINVAL;
|
||||
int nr = 0;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
path->leave_spinning = 1;
|
||||
|
||||
key.objectid = 0;
|
||||
key.offset = 0;
|
||||
key.type = 0;
|
||||
|
||||
while (1) {
|
||||
key.objectid = 0;
|
||||
key.offset = 0;
|
||||
key.type = 0;
|
||||
|
||||
path->leave_spinning = 1;
|
||||
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
|
||||
if (ret > 0) {
|
||||
if (path->slots[0] == 0)
|
||||
break;
|
||||
path->slots[0]--;
|
||||
} else if (ret < 0) {
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
leaf = path->nodes[0];
|
||||
nr = btrfs_header_nritems(leaf);
|
||||
if (!nr)
|
||||
break;
|
||||
}
|
||||
|
||||
ret = btrfs_del_item(trans, root, path);
|
||||
/*
|
||||
* delete the leaf one by one
|
||||
* since the whole tree is going
|
||||
* to be deleted.
|
||||
*/
|
||||
path->slots[0] = 0;
|
||||
ret = btrfs_del_items(trans, root, path, 0, nr);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
btrfs_release_path(path);
|
||||
}
|
||||
ret = 0;
|
||||
|
@ -847,6 +858,10 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
|
|||
int ret = 0;
|
||||
|
||||
spin_lock(&fs_info->qgroup_lock);
|
||||
if (!fs_info->quota_root) {
|
||||
spin_unlock(&fs_info->qgroup_lock);
|
||||
return 0;
|
||||
}
|
||||
fs_info->quota_enabled = 0;
|
||||
fs_info->pending_quota_state = 0;
|
||||
quota_root = fs_info->quota_root;
|
||||
|
|
2099
fs/btrfs/raid56.c
Normal file
2099
fs/btrfs/raid56.c
Normal file
File diff suppressed because it is too large
Load diff
51
fs/btrfs/raid56.h
Normal file
51
fs/btrfs/raid56.h
Normal file
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
* Copyright (C) 2012 Fusion-io All rights reserved.
|
||||
* Copyright (C) 2012 Intel Corp. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License v2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public
|
||||
* License along with this program; if not, write to the
|
||||
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
* Boston, MA 021110-1307, USA.
|
||||
*/
|
||||
|
||||
#ifndef __BTRFS_RAID56__
|
||||
#define __BTRFS_RAID56__
|
||||
static inline int nr_parity_stripes(struct map_lookup *map)
|
||||
{
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID5)
|
||||
return 1;
|
||||
else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
|
||||
return 2;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int nr_data_stripes(struct map_lookup *map)
|
||||
{
|
||||
return map->num_stripes - nr_parity_stripes(map);
|
||||
}
|
||||
#define RAID5_P_STRIPE ((u64)-2)
|
||||
#define RAID6_Q_STRIPE ((u64)-1)
|
||||
|
||||
#define is_parity_stripe(x) (((x) == RAID5_P_STRIPE) || \
|
||||
((x) == RAID6_Q_STRIPE))
|
||||
|
||||
int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
|
||||
struct btrfs_bio *bbio, u64 *raid_map,
|
||||
u64 stripe_len, int mirror_num);
|
||||
int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
|
||||
struct btrfs_bio *bbio, u64 *raid_map,
|
||||
u64 stripe_len);
|
||||
|
||||
int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info);
|
||||
void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info);
|
||||
#endif
|
|
@ -3017,7 +3017,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
|
|||
}
|
||||
}
|
||||
|
||||
page_start = (u64)page->index << PAGE_CACHE_SHIFT;
|
||||
page_start = page_offset(page);
|
||||
page_end = page_start + PAGE_CACHE_SIZE - 1;
|
||||
|
||||
lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "dev-replace.h"
|
||||
#include "check-integrity.h"
|
||||
#include "rcu-string.h"
|
||||
#include "raid56.h"
|
||||
|
||||
/*
|
||||
* This is only the first step towards a full-features scrub. It reads all
|
||||
|
@ -2254,6 +2255,13 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
|
|||
struct btrfs_device *extent_dev;
|
||||
int extent_mirror_num;
|
||||
|
||||
if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6)) {
|
||||
if (num >= nr_data_stripes(map)) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
nstripes = length;
|
||||
offset = 0;
|
||||
do_div(nstripes, map->stripe_len);
|
||||
|
@ -2708,7 +2716,7 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
|
|||
int ret;
|
||||
struct btrfs_root *root = sctx->dev_root;
|
||||
|
||||
if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
|
||||
if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
|
||||
return -EIO;
|
||||
|
||||
gen = root->fs_info->last_trans_committed;
|
||||
|
|
|
@ -85,6 +85,7 @@ struct send_ctx {
|
|||
u32 send_max_size;
|
||||
u64 total_send_size;
|
||||
u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
|
||||
u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */
|
||||
|
||||
struct vfsmount *mnt;
|
||||
|
||||
|
@ -3709,6 +3710,39 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Send an update extent command to user space.
|
||||
*/
|
||||
static int send_update_extent(struct send_ctx *sctx,
|
||||
u64 offset, u32 len)
|
||||
{
|
||||
int ret = 0;
|
||||
struct fs_path *p;
|
||||
|
||||
p = fs_path_alloc(sctx);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
|
||||
TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
|
||||
TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
|
||||
|
||||
ret = send_cmd(sctx);
|
||||
|
||||
tlv_put_failure:
|
||||
out:
|
||||
fs_path_free(sctx, p);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int send_write_or_clone(struct send_ctx *sctx,
|
||||
struct btrfs_path *path,
|
||||
struct btrfs_key *key,
|
||||
|
@ -3744,7 +3778,11 @@ static int send_write_or_clone(struct send_ctx *sctx,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (!clone_root) {
|
||||
if (clone_root) {
|
||||
ret = send_clone(sctx, offset, len, clone_root);
|
||||
} else if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) {
|
||||
ret = send_update_extent(sctx, offset, len);
|
||||
} else {
|
||||
while (pos < len) {
|
||||
l = len - pos;
|
||||
if (l > BTRFS_SEND_READ_SIZE)
|
||||
|
@ -3757,10 +3795,7 @@ static int send_write_or_clone(struct send_ctx *sctx,
|
|||
pos += ret;
|
||||
}
|
||||
ret = 0;
|
||||
} else {
|
||||
ret = send_clone(sctx, offset, len, clone_root);
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
@ -4536,7 +4571,6 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
|
|||
struct btrfs_fs_info *fs_info;
|
||||
struct btrfs_ioctl_send_args *arg = NULL;
|
||||
struct btrfs_key key;
|
||||
struct file *filp = NULL;
|
||||
struct send_ctx *sctx = NULL;
|
||||
u32 i;
|
||||
u64 *clone_sources_tmp = NULL;
|
||||
|
@ -4561,6 +4595,11 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (arg->flags & ~BTRFS_SEND_FLAG_NO_FILE_DATA) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
sctx = kzalloc(sizeof(struct send_ctx), GFP_NOFS);
|
||||
if (!sctx) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -4572,6 +4611,8 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
|
|||
INIT_RADIX_TREE(&sctx->name_cache, GFP_NOFS);
|
||||
INIT_LIST_HEAD(&sctx->name_cache_list);
|
||||
|
||||
sctx->flags = arg->flags;
|
||||
|
||||
sctx->send_filp = fget(arg->send_fd);
|
||||
if (IS_ERR(sctx->send_filp)) {
|
||||
ret = PTR_ERR(sctx->send_filp);
|
||||
|
@ -4673,8 +4714,6 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
|
|||
goto out;
|
||||
|
||||
out:
|
||||
if (filp)
|
||||
fput(filp);
|
||||
kfree(arg);
|
||||
vfree(clone_sources_tmp);
|
||||
|
||||
|
|
|
@ -86,6 +86,7 @@ enum btrfs_send_cmd {
|
|||
BTRFS_SEND_C_UTIMES,
|
||||
|
||||
BTRFS_SEND_C_END,
|
||||
BTRFS_SEND_C_UPDATE_EXTENT,
|
||||
__BTRFS_SEND_C_MAX,
|
||||
};
|
||||
#define BTRFS_SEND_C_MAX (__BTRFS_SEND_C_MAX - 1)
|
||||
|
|
|
@ -41,13 +41,13 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/cleancache.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/btrfs.h>
|
||||
#include "compat.h"
|
||||
#include "delayed-inode.h"
|
||||
#include "ctree.h"
|
||||
#include "disk-io.h"
|
||||
#include "transaction.h"
|
||||
#include "btrfs_inode.h"
|
||||
#include "ioctl.h"
|
||||
#include "print-tree.h"
|
||||
#include "xattr.h"
|
||||
#include "volumes.h"
|
||||
|
@ -63,8 +63,7 @@
|
|||
static const struct super_operations btrfs_super_ops;
|
||||
static struct file_system_type btrfs_fs_type;
|
||||
|
||||
static const char *btrfs_decode_error(struct btrfs_fs_info *fs_info, int errno,
|
||||
char nbuf[16])
|
||||
static const char *btrfs_decode_error(int errno, char nbuf[16])
|
||||
{
|
||||
char *errstr = NULL;
|
||||
|
||||
|
@ -98,7 +97,7 @@ static void __save_error_info(struct btrfs_fs_info *fs_info)
|
|||
* today we only save the error info into ram. Long term we'll
|
||||
* also send it down to the disk
|
||||
*/
|
||||
fs_info->fs_state = BTRFS_SUPER_FLAG_ERROR;
|
||||
set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
|
||||
}
|
||||
|
||||
static void save_error_info(struct btrfs_fs_info *fs_info)
|
||||
|
@ -114,7 +113,7 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
|
|||
if (sb->s_flags & MS_RDONLY)
|
||||
return;
|
||||
|
||||
if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
|
||||
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
|
||||
sb->s_flags |= MS_RDONLY;
|
||||
printk(KERN_INFO "btrfs is forced readonly\n");
|
||||
/*
|
||||
|
@ -142,8 +141,6 @@ void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
|
|||
struct super_block *sb = fs_info->sb;
|
||||
char nbuf[16];
|
||||
const char *errstr;
|
||||
va_list args;
|
||||
va_start(args, fmt);
|
||||
|
||||
/*
|
||||
* Special case: if the error is EROFS, and we're already
|
||||
|
@ -152,15 +149,18 @@ void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
|
|||
if (errno == -EROFS && (sb->s_flags & MS_RDONLY))
|
||||
return;
|
||||
|
||||
errstr = btrfs_decode_error(fs_info, errno, nbuf);
|
||||
errstr = btrfs_decode_error(errno, nbuf);
|
||||
if (fmt) {
|
||||
struct va_format vaf = {
|
||||
.fmt = fmt,
|
||||
.va = &args,
|
||||
};
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
|
||||
va_start(args, fmt);
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
|
||||
printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s (%pV)\n",
|
||||
sb->s_id, function, line, errstr, &vaf);
|
||||
va_end(args);
|
||||
} else {
|
||||
printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s\n",
|
||||
sb->s_id, function, line, errstr);
|
||||
|
@ -171,7 +171,6 @@ void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
|
|||
save_error_info(fs_info);
|
||||
btrfs_handle_error(fs_info);
|
||||
}
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
static const char * const logtypes[] = {
|
||||
|
@ -261,7 +260,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
|
|||
char nbuf[16];
|
||||
const char *errstr;
|
||||
|
||||
errstr = btrfs_decode_error(root->fs_info, errno, nbuf);
|
||||
errstr = btrfs_decode_error(errno, nbuf);
|
||||
btrfs_printk(root->fs_info,
|
||||
"%s:%d: Aborting unused transaction(%s).\n",
|
||||
function, line, errstr);
|
||||
|
@ -289,8 +288,8 @@ void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
|
|||
va_start(args, fmt);
|
||||
vaf.va = &args;
|
||||
|
||||
errstr = btrfs_decode_error(fs_info, errno, nbuf);
|
||||
if (fs_info->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR)
|
||||
errstr = btrfs_decode_error(errno, nbuf);
|
||||
if (fs_info && (fs_info->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR))
|
||||
panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (%s)\n",
|
||||
s_id, function, line, &vaf, errstr);
|
||||
|
||||
|
@ -438,6 +437,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
|
|||
case Opt_compress_force:
|
||||
case Opt_compress_force_type:
|
||||
compress_force = true;
|
||||
/* Fallthrough */
|
||||
case Opt_compress:
|
||||
case Opt_compress_type:
|
||||
if (token == Opt_compress ||
|
||||
|
@ -519,7 +519,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
|
|||
case Opt_alloc_start:
|
||||
num = match_strdup(&args[0]);
|
||||
if (num) {
|
||||
mutex_lock(&info->chunk_mutex);
|
||||
info->alloc_start = memparse(num, NULL);
|
||||
mutex_unlock(&info->chunk_mutex);
|
||||
kfree(num);
|
||||
printk(KERN_INFO
|
||||
"btrfs: allocations start at %llu\n",
|
||||
|
@ -876,7 +878,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
|
|||
|
||||
btrfs_wait_ordered_extents(root, 0);
|
||||
|
||||
trans = btrfs_attach_transaction(root);
|
||||
trans = btrfs_attach_transaction_barrier(root);
|
||||
if (IS_ERR(trans)) {
|
||||
/* no transaction, don't bother */
|
||||
if (PTR_ERR(trans) == -ENOENT)
|
||||
|
@ -1200,6 +1202,38 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
|
|||
new_pool_size);
|
||||
}
|
||||
|
||||
static inline void btrfs_remount_prepare(struct btrfs_fs_info *fs_info,
|
||||
unsigned long old_opts, int flags)
|
||||
{
|
||||
set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
|
||||
|
||||
if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
|
||||
(!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
|
||||
(flags & MS_RDONLY))) {
|
||||
/* wait for any defraggers to finish */
|
||||
wait_event(fs_info->transaction_wait,
|
||||
(atomic_read(&fs_info->defrag_running) == 0));
|
||||
if (flags & MS_RDONLY)
|
||||
sync_filesystem(fs_info->sb);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info,
|
||||
unsigned long old_opts)
|
||||
{
|
||||
/*
|
||||
* We need cleanup all defragable inodes if the autodefragment is
|
||||
* close or the fs is R/O.
|
||||
*/
|
||||
if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
|
||||
(!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
|
||||
(fs_info->sb->s_flags & MS_RDONLY))) {
|
||||
btrfs_cleanup_defrag_inodes(fs_info);
|
||||
}
|
||||
|
||||
clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
|
||||
}
|
||||
|
||||
static int btrfs_remount(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
|
||||
|
@ -1213,6 +1247,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
|
|||
unsigned int old_metadata_ratio = fs_info->metadata_ratio;
|
||||
int ret;
|
||||
|
||||
btrfs_remount_prepare(fs_info, old_opts, *flags);
|
||||
|
||||
ret = btrfs_parse_options(root, data);
|
||||
if (ret) {
|
||||
ret = -EINVAL;
|
||||
|
@ -1223,7 +1259,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
|
|||
fs_info->thread_pool_size, old_thread_pool_size);
|
||||
|
||||
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
if (*flags & MS_RDONLY) {
|
||||
/*
|
||||
|
@ -1278,7 +1314,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
|
|||
}
|
||||
sb->s_flags &= ~MS_RDONLY;
|
||||
}
|
||||
|
||||
out:
|
||||
btrfs_remount_cleanup(fs_info, old_opts);
|
||||
return 0;
|
||||
|
||||
restore:
|
||||
|
@ -1289,10 +1326,13 @@ restore:
|
|||
fs_info->mount_opt = old_opts;
|
||||
fs_info->compress_type = old_compress_type;
|
||||
fs_info->max_inline = old_max_inline;
|
||||
mutex_lock(&fs_info->chunk_mutex);
|
||||
fs_info->alloc_start = old_alloc_start;
|
||||
mutex_unlock(&fs_info->chunk_mutex);
|
||||
btrfs_resize_thread_pool(fs_info,
|
||||
old_thread_pool_size, fs_info->thread_pool_size);
|
||||
fs_info->metadata_ratio = old_metadata_ratio;
|
||||
btrfs_remount_cleanup(fs_info, old_opts);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1559,7 +1599,7 @@ static int btrfs_freeze(struct super_block *sb)
|
|||
struct btrfs_trans_handle *trans;
|
||||
struct btrfs_root *root = btrfs_sb(sb)->tree_root;
|
||||
|
||||
trans = btrfs_attach_transaction(root);
|
||||
trans = btrfs_attach_transaction_barrier(root);
|
||||
if (IS_ERR(trans)) {
|
||||
/* no transaction, don't bother */
|
||||
if (PTR_ERR(trans) == -ENOENT)
|
||||
|
@ -1684,10 +1724,14 @@ static int __init init_btrfs_fs(void)
|
|||
if (err)
|
||||
goto free_delayed_inode;
|
||||
|
||||
err = btrfs_interface_init();
|
||||
err = btrfs_delayed_ref_init();
|
||||
if (err)
|
||||
goto free_auto_defrag;
|
||||
|
||||
err = btrfs_interface_init();
|
||||
if (err)
|
||||
goto free_delayed_ref;
|
||||
|
||||
err = register_filesystem(&btrfs_fs_type);
|
||||
if (err)
|
||||
goto unregister_ioctl;
|
||||
|
@ -1699,6 +1743,8 @@ static int __init init_btrfs_fs(void)
|
|||
|
||||
unregister_ioctl:
|
||||
btrfs_interface_exit();
|
||||
free_delayed_ref:
|
||||
btrfs_delayed_ref_exit();
|
||||
free_auto_defrag:
|
||||
btrfs_auto_defrag_exit();
|
||||
free_delayed_inode:
|
||||
|
@ -1720,6 +1766,7 @@ free_compress:
|
|||
static void __exit exit_btrfs_fs(void)
|
||||
{
|
||||
btrfs_destroy_cachep();
|
||||
btrfs_delayed_ref_exit();
|
||||
btrfs_auto_defrag_exit();
|
||||
btrfs_delayed_inode_exit();
|
||||
ordered_data_exit();
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
#include <linux/spinlock.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kobject.h>
|
||||
|
||||
#include "ctree.h"
|
||||
|
|
|
@ -40,7 +40,6 @@ void put_transaction(struct btrfs_transaction *transaction)
|
|||
if (atomic_dec_and_test(&transaction->use_count)) {
|
||||
BUG_ON(!list_empty(&transaction->list));
|
||||
WARN_ON(transaction->delayed_refs.root.rb_node);
|
||||
memset(transaction, 0, sizeof(*transaction));
|
||||
kmem_cache_free(btrfs_transaction_cachep, transaction);
|
||||
}
|
||||
}
|
||||
|
@ -51,6 +50,14 @@ static noinline void switch_commit_root(struct btrfs_root *root)
|
|||
root->commit_root = btrfs_root_node(root);
|
||||
}
|
||||
|
||||
static inline int can_join_transaction(struct btrfs_transaction *trans,
|
||||
int type)
|
||||
{
|
||||
return !(trans->in_commit &&
|
||||
type != TRANS_JOIN &&
|
||||
type != TRANS_JOIN_NOLOCK);
|
||||
}
|
||||
|
||||
/*
|
||||
* either allocate a new transaction or hop into the existing one
|
||||
*/
|
||||
|
@ -62,7 +69,7 @@ static noinline int join_transaction(struct btrfs_root *root, int type)
|
|||
spin_lock(&fs_info->trans_lock);
|
||||
loop:
|
||||
/* The file system has been taken offline. No new transactions. */
|
||||
if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
|
||||
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
|
||||
spin_unlock(&fs_info->trans_lock);
|
||||
return -EROFS;
|
||||
}
|
||||
|
@ -86,6 +93,10 @@ loop:
|
|||
spin_unlock(&fs_info->trans_lock);
|
||||
return cur_trans->aborted;
|
||||
}
|
||||
if (!can_join_transaction(cur_trans, type)) {
|
||||
spin_unlock(&fs_info->trans_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
atomic_inc(&cur_trans->use_count);
|
||||
atomic_inc(&cur_trans->num_writers);
|
||||
cur_trans->num_joined++;
|
||||
|
@ -113,7 +124,7 @@ loop:
|
|||
*/
|
||||
kmem_cache_free(btrfs_transaction_cachep, cur_trans);
|
||||
goto loop;
|
||||
} else if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
|
||||
} else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
|
||||
spin_unlock(&fs_info->trans_lock);
|
||||
kmem_cache_free(btrfs_transaction_cachep, cur_trans);
|
||||
return -EROFS;
|
||||
|
@ -155,8 +166,12 @@ loop:
|
|||
|
||||
spin_lock_init(&cur_trans->commit_lock);
|
||||
spin_lock_init(&cur_trans->delayed_refs.lock);
|
||||
atomic_set(&cur_trans->delayed_refs.procs_running_refs, 0);
|
||||
atomic_set(&cur_trans->delayed_refs.ref_seq, 0);
|
||||
init_waitqueue_head(&cur_trans->delayed_refs.wait);
|
||||
|
||||
INIT_LIST_HEAD(&cur_trans->pending_snapshots);
|
||||
INIT_LIST_HEAD(&cur_trans->ordered_operations);
|
||||
list_add_tail(&cur_trans->list, &fs_info->trans_list);
|
||||
extent_io_tree_init(&cur_trans->dirty_pages,
|
||||
fs_info->btree_inode->i_mapping);
|
||||
|
@ -301,7 +316,7 @@ start_transaction(struct btrfs_root *root, u64 num_items, int type,
|
|||
int ret;
|
||||
u64 qgroup_reserved = 0;
|
||||
|
||||
if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
|
||||
if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
|
||||
return ERR_PTR(-EROFS);
|
||||
|
||||
if (current->journal_info) {
|
||||
|
@ -359,8 +374,11 @@ again:
|
|||
|
||||
do {
|
||||
ret = join_transaction(root, type);
|
||||
if (ret == -EBUSY)
|
||||
if (ret == -EBUSY) {
|
||||
wait_current_trans(root);
|
||||
if (unlikely(type == TRANS_ATTACH))
|
||||
ret = -ENOENT;
|
||||
}
|
||||
} while (ret == -EBUSY);
|
||||
|
||||
if (ret < 0) {
|
||||
|
@ -382,9 +400,10 @@ again:
|
|||
h->block_rsv = NULL;
|
||||
h->orig_rsv = NULL;
|
||||
h->aborted = 0;
|
||||
h->qgroup_reserved = qgroup_reserved;
|
||||
h->qgroup_reserved = 0;
|
||||
h->delayed_ref_elem.seq = 0;
|
||||
h->type = type;
|
||||
h->allocating_chunk = false;
|
||||
INIT_LIST_HEAD(&h->qgroup_ref_list);
|
||||
INIT_LIST_HEAD(&h->new_bgs);
|
||||
|
||||
|
@ -400,6 +419,7 @@ again:
|
|||
h->block_rsv = &root->fs_info->trans_block_rsv;
|
||||
h->bytes_reserved = num_bytes;
|
||||
}
|
||||
h->qgroup_reserved = qgroup_reserved;
|
||||
|
||||
got_it:
|
||||
btrfs_record_root_in_trans(h, root);
|
||||
|
@ -451,11 +471,43 @@ struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root
|
|||
return start_transaction(root, 0, TRANS_USERSPACE, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* btrfs_attach_transaction() - catch the running transaction
|
||||
*
|
||||
* It is used when we want to commit the current the transaction, but
|
||||
* don't want to start a new one.
|
||||
*
|
||||
* Note: If this function return -ENOENT, it just means there is no
|
||||
* running transaction. But it is possible that the inactive transaction
|
||||
* is still in the memory, not fully on disk. If you hope there is no
|
||||
* inactive transaction in the fs when -ENOENT is returned, you should
|
||||
* invoke
|
||||
* btrfs_attach_transaction_barrier()
|
||||
*/
|
||||
struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
|
||||
{
|
||||
return start_transaction(root, 0, TRANS_ATTACH, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* btrfs_attach_transaction() - catch the running transaction
|
||||
*
|
||||
* It is similar to the above function, the differentia is this one
|
||||
* will wait for all the inactive transactions until they fully
|
||||
* complete.
|
||||
*/
|
||||
struct btrfs_trans_handle *
|
||||
btrfs_attach_transaction_barrier(struct btrfs_root *root)
|
||||
{
|
||||
struct btrfs_trans_handle *trans;
|
||||
|
||||
trans = start_transaction(root, 0, TRANS_ATTACH, 0);
|
||||
if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
|
||||
btrfs_wait_for_commit(root, 0);
|
||||
|
||||
return trans;
|
||||
}
|
||||
|
||||
/* wait for a transaction commit to be fully complete */
|
||||
static noinline void wait_for_commit(struct btrfs_root *root,
|
||||
struct btrfs_transaction *commit)
|
||||
|
@ -587,7 +639,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
|
|||
if (!list_empty(&trans->new_bgs))
|
||||
btrfs_create_pending_block_groups(trans, root);
|
||||
|
||||
while (count < 2) {
|
||||
while (count < 1) {
|
||||
unsigned long cur = trans->delayed_ref_updates;
|
||||
trans->delayed_ref_updates = 0;
|
||||
if (cur &&
|
||||
|
@ -599,6 +651,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
count++;
|
||||
}
|
||||
|
||||
btrfs_trans_release_metadata(trans, root);
|
||||
trans->block_rsv = NULL;
|
||||
|
||||
|
@ -644,12 +697,10 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
|
|||
btrfs_run_delayed_iputs(root);
|
||||
|
||||
if (trans->aborted ||
|
||||
root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
|
||||
test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
|
||||
err = -EIO;
|
||||
}
|
||||
assert_qgroups_uptodate(trans);
|
||||
|
||||
memset(trans, 0, sizeof(*trans));
|
||||
kmem_cache_free(btrfs_trans_handle_cachep, trans);
|
||||
return err;
|
||||
}
|
||||
|
@ -696,7 +747,9 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
|
|||
struct extent_state *cached_state = NULL;
|
||||
u64 start = 0;
|
||||
u64 end;
|
||||
struct blk_plug plug;
|
||||
|
||||
blk_start_plug(&plug);
|
||||
while (!find_first_extent_bit(dirty_pages, start, &start, &end,
|
||||
mark, &cached_state)) {
|
||||
convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
|
||||
|
@ -710,6 +763,7 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
|
|||
}
|
||||
if (err)
|
||||
werr = err;
|
||||
blk_finish_plug(&plug);
|
||||
return werr;
|
||||
}
|
||||
|
||||
|
@ -960,10 +1014,10 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
/*
|
||||
* defrag a given btree. If cacheonly == 1, this won't read from the disk,
|
||||
* otherwise every leaf in the btree is read and defragged.
|
||||
* defrag a given btree.
|
||||
* Every leaf in the btree is read and defragged.
|
||||
*/
|
||||
int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
|
||||
int btrfs_defrag_root(struct btrfs_root *root)
|
||||
{
|
||||
struct btrfs_fs_info *info = root->fs_info;
|
||||
struct btrfs_trans_handle *trans;
|
||||
|
@ -977,7 +1031,7 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
|
|||
if (IS_ERR(trans))
|
||||
return PTR_ERR(trans);
|
||||
|
||||
ret = btrfs_defrag_leaves(trans, root, cacheonly);
|
||||
ret = btrfs_defrag_leaves(trans, root);
|
||||
|
||||
btrfs_end_transaction(trans, root);
|
||||
btrfs_btree_balance_dirty(info->tree_root);
|
||||
|
@ -985,6 +1039,12 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
|
|||
|
||||
if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
|
||||
break;
|
||||
|
||||
if (btrfs_defrag_cancelled(root->fs_info)) {
|
||||
printk(KERN_DEBUG "btrfs: defrag_root cancelled\n");
|
||||
ret = -EAGAIN;
|
||||
break;
|
||||
}
|
||||
}
|
||||
root->defrag_running = 0;
|
||||
return ret;
|
||||
|
@ -1007,7 +1067,6 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
|
|||
struct inode *parent_inode;
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_dir_item *dir_item;
|
||||
struct dentry *parent;
|
||||
struct dentry *dentry;
|
||||
struct extent_buffer *tmp;
|
||||
struct extent_buffer *old;
|
||||
|
@ -1022,7 +1081,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
|
|||
path = btrfs_alloc_path();
|
||||
if (!path) {
|
||||
ret = pending->error = -ENOMEM;
|
||||
goto path_alloc_fail;
|
||||
return ret;
|
||||
}
|
||||
|
||||
new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
|
||||
|
@ -1062,10 +1121,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
|
|||
|
||||
rsv = trans->block_rsv;
|
||||
trans->block_rsv = &pending->block_rsv;
|
||||
trans->bytes_reserved = trans->block_rsv->reserved;
|
||||
|
||||
dentry = pending->dentry;
|
||||
parent = dget_parent(dentry);
|
||||
parent_inode = parent->d_inode;
|
||||
parent_inode = pending->dir;
|
||||
parent_root = BTRFS_I(parent_inode)->root;
|
||||
record_root_in_trans(trans, parent_root);
|
||||
|
||||
|
@ -1213,14 +1272,12 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
|
|||
if (ret)
|
||||
btrfs_abort_transaction(trans, root, ret);
|
||||
fail:
|
||||
dput(parent);
|
||||
trans->block_rsv = rsv;
|
||||
trans->bytes_reserved = 0;
|
||||
no_free_objectid:
|
||||
kfree(new_root_item);
|
||||
root_item_alloc_fail:
|
||||
btrfs_free_path(path);
|
||||
path_alloc_fail:
|
||||
btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1306,13 +1363,13 @@ static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
|
|||
struct btrfs_async_commit {
|
||||
struct btrfs_trans_handle *newtrans;
|
||||
struct btrfs_root *root;
|
||||
struct delayed_work work;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
static void do_async_commit(struct work_struct *work)
|
||||
{
|
||||
struct btrfs_async_commit *ac =
|
||||
container_of(work, struct btrfs_async_commit, work.work);
|
||||
container_of(work, struct btrfs_async_commit, work);
|
||||
|
||||
/*
|
||||
* We've got freeze protection passed with the transaction.
|
||||
|
@ -1340,7 +1397,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
|
|||
if (!ac)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_DELAYED_WORK(&ac->work, do_async_commit);
|
||||
INIT_WORK(&ac->work, do_async_commit);
|
||||
ac->root = root;
|
||||
ac->newtrans = btrfs_join_transaction(root);
|
||||
if (IS_ERR(ac->newtrans)) {
|
||||
|
@ -1364,7 +1421,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
|
|||
&root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
|
||||
1, _THIS_IP_);
|
||||
|
||||
schedule_delayed_work(&ac->work, 0);
|
||||
schedule_work(&ac->work);
|
||||
|
||||
/* wait for transaction to start and unblock */
|
||||
if (wait_for_unblock)
|
||||
|
@ -1384,6 +1441,7 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_root *root, int err)
|
||||
{
|
||||
struct btrfs_transaction *cur_trans = trans->transaction;
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
WARN_ON(trans->use_count > 1);
|
||||
|
||||
|
@ -1392,8 +1450,13 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
|
|||
spin_lock(&root->fs_info->trans_lock);
|
||||
list_del_init(&cur_trans->list);
|
||||
if (cur_trans == root->fs_info->running_transaction) {
|
||||
root->fs_info->trans_no_join = 1;
|
||||
spin_unlock(&root->fs_info->trans_lock);
|
||||
wait_event(cur_trans->writer_wait,
|
||||
atomic_read(&cur_trans->num_writers) == 1);
|
||||
|
||||
spin_lock(&root->fs_info->trans_lock);
|
||||
root->fs_info->running_transaction = NULL;
|
||||
root->fs_info->trans_no_join = 0;
|
||||
}
|
||||
spin_unlock(&root->fs_info->trans_lock);
|
||||
|
||||
|
@ -1427,7 +1490,9 @@ static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
if (flush_on_commit || snap_pending) {
|
||||
btrfs_start_delalloc_inodes(root, 1);
|
||||
ret = btrfs_start_delalloc_inodes(root, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
btrfs_wait_ordered_extents(root, 1);
|
||||
}
|
||||
|
||||
|
@ -1449,9 +1514,9 @@ static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
|
|||
* it here and no for sure that nothing new will be added
|
||||
* to the list
|
||||
*/
|
||||
btrfs_run_ordered_operations(root, 1);
|
||||
ret = btrfs_run_ordered_operations(trans, root, 1);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1472,27 +1537,35 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
|||
int should_grow = 0;
|
||||
unsigned long now = get_seconds();
|
||||
|
||||
ret = btrfs_run_ordered_operations(root, 0);
|
||||
ret = btrfs_run_ordered_operations(trans, root, 0);
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, root, ret);
|
||||
goto cleanup_transaction;
|
||||
btrfs_end_transaction(trans, root);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Stop the commit early if ->aborted is set */
|
||||
if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
|
||||
ret = cur_trans->aborted;
|
||||
goto cleanup_transaction;
|
||||
btrfs_end_transaction(trans, root);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* make a pass through all the delayed refs we have so far
|
||||
* any runnings procs may add more while we are here
|
||||
*/
|
||||
ret = btrfs_run_delayed_refs(trans, root, 0);
|
||||
if (ret)
|
||||
goto cleanup_transaction;
|
||||
if (ret) {
|
||||
btrfs_end_transaction(trans, root);
|
||||
return ret;
|
||||
}
|
||||
|
||||
btrfs_trans_release_metadata(trans, root);
|
||||
trans->block_rsv = NULL;
|
||||
if (trans->qgroup_reserved) {
|
||||
btrfs_qgroup_free(root, trans->qgroup_reserved);
|
||||
trans->qgroup_reserved = 0;
|
||||
}
|
||||
|
||||
cur_trans = trans->transaction;
|
||||
|
||||
|
@ -1506,8 +1579,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
|||
btrfs_create_pending_block_groups(trans, root);
|
||||
|
||||
ret = btrfs_run_delayed_refs(trans, root, 0);
|
||||
if (ret)
|
||||
goto cleanup_transaction;
|
||||
if (ret) {
|
||||
btrfs_end_transaction(trans, root);
|
||||
return ret;
|
||||
}
|
||||
|
||||
spin_lock(&cur_trans->commit_lock);
|
||||
if (cur_trans->in_commit) {
|
||||
|
@ -1771,6 +1846,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
|||
cleanup_transaction:
|
||||
btrfs_trans_release_metadata(trans, root);
|
||||
trans->block_rsv = NULL;
|
||||
if (trans->qgroup_reserved) {
|
||||
btrfs_qgroup_free(root, trans->qgroup_reserved);
|
||||
trans->qgroup_reserved = 0;
|
||||
}
|
||||
btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
|
||||
// WARN_ON(1);
|
||||
if (current->journal_info == trans)
|
||||
|
|
|
@ -43,6 +43,7 @@ struct btrfs_transaction {
|
|||
wait_queue_head_t writer_wait;
|
||||
wait_queue_head_t commit_wait;
|
||||
struct list_head pending_snapshots;
|
||||
struct list_head ordered_operations;
|
||||
struct btrfs_delayed_ref_root delayed_refs;
|
||||
int aborted;
|
||||
};
|
||||
|
@ -68,6 +69,7 @@ struct btrfs_trans_handle {
|
|||
struct btrfs_block_rsv *orig_rsv;
|
||||
short aborted;
|
||||
short adding_csums;
|
||||
bool allocating_chunk;
|
||||
enum btrfs_trans_type type;
|
||||
/*
|
||||
* this root is only needed to validate that the root passed to
|
||||
|
@ -82,11 +84,13 @@ struct btrfs_trans_handle {
|
|||
|
||||
struct btrfs_pending_snapshot {
|
||||
struct dentry *dentry;
|
||||
struct inode *dir;
|
||||
struct btrfs_root *root;
|
||||
struct btrfs_root *snap;
|
||||
struct btrfs_qgroup_inherit *inherit;
|
||||
/* block reservation for the operation */
|
||||
struct btrfs_block_rsv block_rsv;
|
||||
u64 qgroup_reserved;
|
||||
/* extra metadata reseration for relocation */
|
||||
int error;
|
||||
bool readonly;
|
||||
|
@ -110,13 +114,15 @@ struct btrfs_trans_handle *btrfs_start_transaction_lflush(
|
|||
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
|
||||
struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
|
||||
struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
|
||||
struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
|
||||
struct btrfs_root *root);
|
||||
struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root);
|
||||
int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
|
||||
int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root);
|
||||
|
||||
int btrfs_add_dead_root(struct btrfs_root *root);
|
||||
int btrfs_defrag_root(struct btrfs_root *root, int cacheonly);
|
||||
int btrfs_defrag_root(struct btrfs_root *root);
|
||||
int btrfs_clean_old_snapshots(struct btrfs_root *root);
|
||||
int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root);
|
||||
|
|
|
@ -23,13 +23,14 @@
|
|||
#include "transaction.h"
|
||||
#include "locking.h"
|
||||
|
||||
/* defrag all the leaves in a given btree. If cache_only == 1, don't read
|
||||
* things from disk, otherwise read all the leaves and try to get key order to
|
||||
/*
|
||||
* Defrag all the leaves in a given btree.
|
||||
* Read all the leaves and try to get key order to
|
||||
* better reflect disk order
|
||||
*/
|
||||
|
||||
int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, int cache_only)
|
||||
struct btrfs_root *root)
|
||||
{
|
||||
struct btrfs_path *path = NULL;
|
||||
struct btrfs_key key;
|
||||
|
@ -41,9 +42,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
|
|||
u64 last_ret = 0;
|
||||
u64 min_trans = 0;
|
||||
|
||||
if (cache_only)
|
||||
goto out;
|
||||
|
||||
if (root->fs_info->extent_root == root) {
|
||||
/*
|
||||
* there's recursion here right now in the tree locking,
|
||||
|
@ -86,11 +84,8 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
path->keep_locks = 1;
|
||||
if (cache_only)
|
||||
min_trans = root->defrag_trans_start;
|
||||
|
||||
ret = btrfs_search_forward(root, &key, NULL, path,
|
||||
cache_only, min_trans);
|
||||
ret = btrfs_search_forward(root, &key, NULL, path, min_trans);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
if (ret > 0) {
|
||||
|
@ -109,11 +104,11 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
|
|||
goto out;
|
||||
}
|
||||
path->slots[1] = btrfs_header_nritems(path->nodes[1]);
|
||||
next_key_ret = btrfs_find_next_key(root, path, &key, 1, cache_only,
|
||||
next_key_ret = btrfs_find_next_key(root, path, &key, 1,
|
||||
min_trans);
|
||||
ret = btrfs_realloc_node(trans, root,
|
||||
path->nodes[1], 0,
|
||||
cache_only, &last_ret,
|
||||
&last_ret,
|
||||
&root->defrag_progress);
|
||||
if (ret) {
|
||||
WARN_ON(ret == -EAGAIN);
|
||||
|
|
|
@ -278,8 +278,7 @@ static int process_one_buffer(struct btrfs_root *log,
|
|||
struct walk_control *wc, u64 gen)
|
||||
{
|
||||
if (wc->pin)
|
||||
btrfs_pin_extent_for_log_replay(wc->trans,
|
||||
log->fs_info->extent_root,
|
||||
btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
|
||||
eb->start, eb->len);
|
||||
|
||||
if (btrfs_buffer_uptodate(eb, gen, 0)) {
|
||||
|
@ -485,7 +484,6 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_key *key)
|
||||
{
|
||||
int found_type;
|
||||
u64 mask = root->sectorsize - 1;
|
||||
u64 extent_end;
|
||||
u64 start = key->offset;
|
||||
u64 saved_nbytes;
|
||||
|
@ -502,7 +500,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
|
|||
extent_end = start + btrfs_file_extent_num_bytes(eb, item);
|
||||
else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
|
||||
size = btrfs_file_extent_inline_len(eb, item);
|
||||
extent_end = (start + size + mask) & ~mask;
|
||||
extent_end = ALIGN(start + size, root->sectorsize);
|
||||
} else {
|
||||
ret = 0;
|
||||
goto out;
|
||||
|
@ -2281,6 +2279,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
|||
unsigned long log_transid = 0;
|
||||
|
||||
mutex_lock(&root->log_mutex);
|
||||
log_transid = root->log_transid;
|
||||
index1 = root->log_transid % 2;
|
||||
if (atomic_read(&root->log_commit[index1])) {
|
||||
wait_log_commit(trans, root, root->log_transid);
|
||||
|
@ -2308,11 +2307,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
|||
/* bail out if we need to do a full commit */
|
||||
if (root->fs_info->last_trans_log_full_commit == trans->transid) {
|
||||
ret = -EAGAIN;
|
||||
btrfs_free_logged_extents(log, log_transid);
|
||||
mutex_unlock(&root->log_mutex);
|
||||
goto out;
|
||||
}
|
||||
|
||||
log_transid = root->log_transid;
|
||||
if (log_transid % 2 == 0)
|
||||
mark = EXTENT_DIRTY;
|
||||
else
|
||||
|
@ -2324,6 +2323,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
|||
ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, root, ret);
|
||||
btrfs_free_logged_extents(log, log_transid);
|
||||
mutex_unlock(&root->log_mutex);
|
||||
goto out;
|
||||
}
|
||||
|
@ -2363,6 +2363,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
root->fs_info->last_trans_log_full_commit = trans->transid;
|
||||
btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
|
||||
btrfs_free_logged_extents(log, log_transid);
|
||||
mutex_unlock(&log_root_tree->log_mutex);
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
|
@ -2373,6 +2374,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
|||
btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
|
||||
wait_log_commit(trans, log_root_tree,
|
||||
log_root_tree->log_transid);
|
||||
btrfs_free_logged_extents(log, log_transid);
|
||||
mutex_unlock(&log_root_tree->log_mutex);
|
||||
ret = 0;
|
||||
goto out;
|
||||
|
@ -2392,6 +2394,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
|||
*/
|
||||
if (root->fs_info->last_trans_log_full_commit == trans->transid) {
|
||||
btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
|
||||
btrfs_free_logged_extents(log, log_transid);
|
||||
mutex_unlock(&log_root_tree->log_mutex);
|
||||
ret = -EAGAIN;
|
||||
goto out_wake_log_root;
|
||||
|
@ -2402,10 +2405,12 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
|||
EXTENT_DIRTY | EXTENT_NEW);
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, root, ret);
|
||||
btrfs_free_logged_extents(log, log_transid);
|
||||
mutex_unlock(&log_root_tree->log_mutex);
|
||||
goto out_wake_log_root;
|
||||
}
|
||||
btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
|
||||
btrfs_wait_logged_extents(log, log_transid);
|
||||
|
||||
btrfs_set_super_log_root(root->fs_info->super_for_commit,
|
||||
log_root_tree->node->start);
|
||||
|
@ -2461,8 +2466,10 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
|
|||
.process_func = process_one_buffer
|
||||
};
|
||||
|
||||
ret = walk_log_tree(trans, log, &wc);
|
||||
BUG_ON(ret);
|
||||
if (trans) {
|
||||
ret = walk_log_tree(trans, log, &wc);
|
||||
BUG_ON(ret);
|
||||
}
|
||||
|
||||
while (1) {
|
||||
ret = find_first_extent_bit(&log->dirty_log_pages,
|
||||
|
@ -2475,6 +2482,14 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
|
|||
EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
|
||||
}
|
||||
|
||||
/*
|
||||
* We may have short-circuited the log tree with the full commit logic
|
||||
* and left ordered extents on our list, so clear these out to keep us
|
||||
* from leaking inodes and memory.
|
||||
*/
|
||||
btrfs_free_logged_extents(log, 0);
|
||||
btrfs_free_logged_extents(log, 1);
|
||||
|
||||
free_extent_buffer(log->node);
|
||||
kfree(log);
|
||||
}
|
||||
|
@ -2724,7 +2739,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
|
|||
path->keep_locks = 1;
|
||||
|
||||
ret = btrfs_search_forward(root, &min_key, &max_key,
|
||||
path, 0, trans->transid);
|
||||
path, trans->transid);
|
||||
|
||||
/*
|
||||
* we didn't find anything from this transaction, see if there
|
||||
|
@ -3271,16 +3286,21 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_root *log = root->log_root;
|
||||
struct btrfs_file_extent_item *fi;
|
||||
struct extent_buffer *leaf;
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
struct list_head ordered_sums;
|
||||
struct btrfs_map_token token;
|
||||
struct btrfs_key key;
|
||||
u64 csum_offset = em->mod_start - em->start;
|
||||
u64 csum_len = em->mod_len;
|
||||
u64 mod_start = em->mod_start;
|
||||
u64 mod_len = em->mod_len;
|
||||
u64 csum_offset;
|
||||
u64 csum_len;
|
||||
u64 extent_offset = em->start - em->orig_start;
|
||||
u64 block_len;
|
||||
int ret;
|
||||
int index = log->log_transid % 2;
|
||||
bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
|
||||
|
||||
insert:
|
||||
INIT_LIST_HEAD(&ordered_sums);
|
||||
btrfs_init_map_token(&token);
|
||||
key.objectid = btrfs_ino(inode);
|
||||
|
@ -3296,6 +3316,23 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
|
|||
leaf = path->nodes[0];
|
||||
fi = btrfs_item_ptr(leaf, path->slots[0],
|
||||
struct btrfs_file_extent_item);
|
||||
|
||||
/*
|
||||
* If we are overwriting an inline extent with a real one then we need
|
||||
* to just delete the inline extent as it may not be large enough to
|
||||
* have the entire file_extent_item.
|
||||
*/
|
||||
if (ret && btrfs_token_file_extent_type(leaf, fi, &token) ==
|
||||
BTRFS_FILE_EXTENT_INLINE) {
|
||||
ret = btrfs_del_item(trans, log, path);
|
||||
btrfs_release_path(path);
|
||||
if (ret) {
|
||||
path->really_keep_locks = 0;
|
||||
return ret;
|
||||
}
|
||||
goto insert;
|
||||
}
|
||||
|
||||
btrfs_set_token_file_extent_generation(leaf, fi, em->generation,
|
||||
&token);
|
||||
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
|
||||
|
@ -3362,6 +3399,92 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
|
|||
csum_len = block_len;
|
||||
}
|
||||
|
||||
/*
|
||||
* First check and see if our csums are on our outstanding ordered
|
||||
* extents.
|
||||
*/
|
||||
again:
|
||||
spin_lock_irq(&log->log_extents_lock[index]);
|
||||
list_for_each_entry(ordered, &log->logged_list[index], log_list) {
|
||||
struct btrfs_ordered_sum *sum;
|
||||
|
||||
if (!mod_len)
|
||||
break;
|
||||
|
||||
if (ordered->inode != inode)
|
||||
continue;
|
||||
|
||||
if (ordered->file_offset + ordered->len <= mod_start ||
|
||||
mod_start + mod_len <= ordered->file_offset)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* We are going to copy all the csums on this ordered extent, so
|
||||
* go ahead and adjust mod_start and mod_len in case this
|
||||
* ordered extent has already been logged.
|
||||
*/
|
||||
if (ordered->file_offset > mod_start) {
|
||||
if (ordered->file_offset + ordered->len >=
|
||||
mod_start + mod_len)
|
||||
mod_len = ordered->file_offset - mod_start;
|
||||
/*
|
||||
* If we have this case
|
||||
*
|
||||
* |--------- logged extent ---------|
|
||||
* |----- ordered extent ----|
|
||||
*
|
||||
* Just don't mess with mod_start and mod_len, we'll
|
||||
* just end up logging more csums than we need and it
|
||||
* will be ok.
|
||||
*/
|
||||
} else {
|
||||
if (ordered->file_offset + ordered->len <
|
||||
mod_start + mod_len) {
|
||||
mod_len = (mod_start + mod_len) -
|
||||
(ordered->file_offset + ordered->len);
|
||||
mod_start = ordered->file_offset +
|
||||
ordered->len;
|
||||
} else {
|
||||
mod_len = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* To keep us from looping for the above case of an ordered
|
||||
* extent that falls inside of the logged extent.
|
||||
*/
|
||||
if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM,
|
||||
&ordered->flags))
|
||||
continue;
|
||||
atomic_inc(&ordered->refs);
|
||||
spin_unlock_irq(&log->log_extents_lock[index]);
|
||||
/*
|
||||
* we've dropped the lock, we must either break or
|
||||
* start over after this.
|
||||
*/
|
||||
|
||||
wait_event(ordered->wait, ordered->csum_bytes_left == 0);
|
||||
|
||||
list_for_each_entry(sum, &ordered->list, list) {
|
||||
ret = btrfs_csum_file_blocks(trans, log, sum);
|
||||
if (ret) {
|
||||
btrfs_put_ordered_extent(ordered);
|
||||
goto unlocked;
|
||||
}
|
||||
}
|
||||
btrfs_put_ordered_extent(ordered);
|
||||
goto again;
|
||||
|
||||
}
|
||||
spin_unlock_irq(&log->log_extents_lock[index]);
|
||||
unlocked:
|
||||
|
||||
if (!mod_len || ret)
|
||||
return ret;
|
||||
|
||||
csum_offset = mod_start - em->start;
|
||||
csum_len = mod_len;
|
||||
|
||||
/* block start is already adjusted for the file extent offset. */
|
||||
ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
|
||||
em->block_start + csum_offset,
|
||||
|
@ -3393,6 +3516,7 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
|
|||
struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
|
||||
u64 test_gen;
|
||||
int ret = 0;
|
||||
int num = 0;
|
||||
|
||||
INIT_LIST_HEAD(&extents);
|
||||
|
||||
|
@ -3401,16 +3525,31 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
|
|||
|
||||
list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
|
||||
list_del_init(&em->list);
|
||||
|
||||
/*
|
||||
* Just an arbitrary number, this can be really CPU intensive
|
||||
* once we start getting a lot of extents, and really once we
|
||||
* have a bunch of extents we just want to commit since it will
|
||||
* be faster.
|
||||
*/
|
||||
if (++num > 32768) {
|
||||
list_del_init(&tree->modified_extents);
|
||||
ret = -EFBIG;
|
||||
goto process;
|
||||
}
|
||||
|
||||
if (em->generation <= test_gen)
|
||||
continue;
|
||||
/* Need a ref to keep it from getting evicted from cache */
|
||||
atomic_inc(&em->refs);
|
||||
set_bit(EXTENT_FLAG_LOGGING, &em->flags);
|
||||
list_add_tail(&em->list, &extents);
|
||||
num++;
|
||||
}
|
||||
|
||||
list_sort(NULL, &extents, extent_cmp);
|
||||
|
||||
process:
|
||||
while (!list_empty(&extents)) {
|
||||
em = list_entry(extents.next, struct extent_map, list);
|
||||
|
||||
|
@ -3513,6 +3652,8 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
|
|||
|
||||
mutex_lock(&BTRFS_I(inode)->log_mutex);
|
||||
|
||||
btrfs_get_logged_extents(log, inode);
|
||||
|
||||
/*
|
||||
* a brute force approach to making sure we get the most uptodate
|
||||
* copies of everything.
|
||||
|
@ -3558,7 +3699,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
|
|||
while (1) {
|
||||
ins_nr = 0;
|
||||
ret = btrfs_search_forward(root, &min_key, &max_key,
|
||||
path, 0, trans->transid);
|
||||
path, trans->transid);
|
||||
if (ret != 0)
|
||||
break;
|
||||
again:
|
||||
|
@ -3656,6 +3797,8 @@ log_extents:
|
|||
BTRFS_I(inode)->logged_trans = trans->transid;
|
||||
BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
|
||||
out_unlock:
|
||||
if (err)
|
||||
btrfs_free_logged_extents(log, log->log_transid);
|
||||
mutex_unlock(&BTRFS_I(inode)->log_mutex);
|
||||
|
||||
btrfs_free_path(path);
|
||||
|
@ -3822,7 +3965,6 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
|
|||
end_trans:
|
||||
dput(old_parent);
|
||||
if (ret < 0) {
|
||||
WARN_ON(ret != -ENOSPC);
|
||||
root->fs_info->last_trans_log_full_commit = trans->transid;
|
||||
ret = 1;
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/export.h>
|
||||
#include "ulist.h"
|
||||
|
||||
/*
|
||||
|
|
|
@ -25,6 +25,8 @@
|
|||
#include <linux/capability.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/raid/pq.h>
|
||||
#include <asm/div64.h>
|
||||
#include "compat.h"
|
||||
#include "ctree.h"
|
||||
#include "extent_map.h"
|
||||
|
@ -32,6 +34,7 @@
|
|||
#include "transaction.h"
|
||||
#include "print-tree.h"
|
||||
#include "volumes.h"
|
||||
#include "raid56.h"
|
||||
#include "async-thread.h"
|
||||
#include "check-integrity.h"
|
||||
#include "rcu-string.h"
|
||||
|
@ -647,6 +650,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
|
|||
new_device->writeable = 0;
|
||||
new_device->in_fs_metadata = 0;
|
||||
new_device->can_discard = 0;
|
||||
spin_lock_init(&new_device->io_lock);
|
||||
list_replace_rcu(&device->dev_list, &new_device->dev_list);
|
||||
|
||||
call_rcu(&device->rcu, free_device);
|
||||
|
@ -792,26 +796,75 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Look for a btrfs signature on a device. This may be called out of the mount path
|
||||
* and we are not allowed to call set_blocksize during the scan. The superblock
|
||||
* is read via pagecache
|
||||
*/
|
||||
int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
|
||||
struct btrfs_fs_devices **fs_devices_ret)
|
||||
{
|
||||
struct btrfs_super_block *disk_super;
|
||||
struct block_device *bdev;
|
||||
struct buffer_head *bh;
|
||||
int ret;
|
||||
struct page *page;
|
||||
void *p;
|
||||
int ret = -EINVAL;
|
||||
u64 devid;
|
||||
u64 transid;
|
||||
u64 total_devices;
|
||||
u64 bytenr;
|
||||
pgoff_t index;
|
||||
|
||||
/*
|
||||
* we would like to check all the supers, but that would make
|
||||
* a btrfs mount succeed after a mkfs from a different FS.
|
||||
* So, we need to add a special mount option to scan for
|
||||
* later supers, using BTRFS_SUPER_MIRROR_MAX instead
|
||||
*/
|
||||
bytenr = btrfs_sb_offset(0);
|
||||
flags |= FMODE_EXCL;
|
||||
mutex_lock(&uuid_mutex);
|
||||
ret = btrfs_get_bdev_and_sb(path, flags, holder, 0, &bdev, &bh);
|
||||
if (ret)
|
||||
|
||||
bdev = blkdev_get_by_path(path, flags, holder);
|
||||
|
||||
if (IS_ERR(bdev)) {
|
||||
ret = PTR_ERR(bdev);
|
||||
goto error;
|
||||
disk_super = (struct btrfs_super_block *)bh->b_data;
|
||||
}
|
||||
|
||||
/* make sure our super fits in the device */
|
||||
if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode))
|
||||
goto error_bdev_put;
|
||||
|
||||
/* make sure our super fits in the page */
|
||||
if (sizeof(*disk_super) > PAGE_CACHE_SIZE)
|
||||
goto error_bdev_put;
|
||||
|
||||
/* make sure our super doesn't straddle pages on disk */
|
||||
index = bytenr >> PAGE_CACHE_SHIFT;
|
||||
if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index)
|
||||
goto error_bdev_put;
|
||||
|
||||
/* pull in the page with our super */
|
||||
page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
|
||||
index, GFP_NOFS);
|
||||
|
||||
if (IS_ERR_OR_NULL(page))
|
||||
goto error_bdev_put;
|
||||
|
||||
p = kmap(page);
|
||||
|
||||
/* align our pointer to the offset of the super block */
|
||||
disk_super = p + (bytenr & ~PAGE_CACHE_MASK);
|
||||
|
||||
if (btrfs_super_bytenr(disk_super) != bytenr ||
|
||||
disk_super->magic != cpu_to_le64(BTRFS_MAGIC))
|
||||
goto error_unmap;
|
||||
|
||||
devid = btrfs_stack_device_id(&disk_super->dev_item);
|
||||
transid = btrfs_super_generation(disk_super);
|
||||
total_devices = btrfs_super_num_devices(disk_super);
|
||||
|
||||
if (disk_super->label[0]) {
|
||||
if (disk_super->label[BTRFS_LABEL_SIZE - 1])
|
||||
disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
|
||||
|
@ -819,12 +872,19 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
|
|||
} else {
|
||||
printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
|
||||
}
|
||||
|
||||
printk(KERN_CONT "devid %llu transid %llu %s\n",
|
||||
(unsigned long long)devid, (unsigned long long)transid, path);
|
||||
|
||||
ret = device_list_add(path, disk_super, devid, fs_devices_ret);
|
||||
if (!ret && fs_devices_ret)
|
||||
(*fs_devices_ret)->total_devices = total_devices;
|
||||
brelse(bh);
|
||||
|
||||
error_unmap:
|
||||
kunmap(page);
|
||||
page_cache_release(page);
|
||||
|
||||
error_bdev_put:
|
||||
blkdev_put(bdev, flags);
|
||||
error:
|
||||
mutex_unlock(&uuid_mutex);
|
||||
|
@ -1372,14 +1432,19 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
|
|||
u64 devid;
|
||||
u64 num_devices;
|
||||
u8 *dev_uuid;
|
||||
unsigned seq;
|
||||
int ret = 0;
|
||||
bool clear_super = false;
|
||||
|
||||
mutex_lock(&uuid_mutex);
|
||||
|
||||
all_avail = root->fs_info->avail_data_alloc_bits |
|
||||
root->fs_info->avail_system_alloc_bits |
|
||||
root->fs_info->avail_metadata_alloc_bits;
|
||||
do {
|
||||
seq = read_seqbegin(&root->fs_info->profiles_lock);
|
||||
|
||||
all_avail = root->fs_info->avail_data_alloc_bits |
|
||||
root->fs_info->avail_system_alloc_bits |
|
||||
root->fs_info->avail_metadata_alloc_bits;
|
||||
} while (read_seqretry(&root->fs_info->profiles_lock, seq));
|
||||
|
||||
num_devices = root->fs_info->fs_devices->num_devices;
|
||||
btrfs_dev_replace_lock(&root->fs_info->dev_replace);
|
||||
|
@ -1403,6 +1468,21 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
|
||||
root->fs_info->fs_devices->rw_devices <= 2) {
|
||||
printk(KERN_ERR "btrfs: unable to go below two "
|
||||
"devices on raid5\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
|
||||
root->fs_info->fs_devices->rw_devices <= 3) {
|
||||
printk(KERN_ERR "btrfs: unable to go below three "
|
||||
"devices on raid6\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (strcmp(device_path, "missing") == 0) {
|
||||
struct list_head *devices;
|
||||
struct btrfs_device *tmp;
|
||||
|
@ -2616,7 +2696,7 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
|
|||
chunk_used = btrfs_block_group_used(&cache->item);
|
||||
|
||||
if (bargs->usage == 0)
|
||||
user_thresh = 0;
|
||||
user_thresh = 1;
|
||||
else if (bargs->usage > 100)
|
||||
user_thresh = cache->key.offset;
|
||||
else
|
||||
|
@ -2664,11 +2744,15 @@ static int chunk_drange_filter(struct extent_buffer *leaf,
|
|||
return 0;
|
||||
|
||||
if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
|
||||
BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
|
||||
factor = 2;
|
||||
else
|
||||
factor = 1;
|
||||
factor = num_stripes / factor;
|
||||
BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
|
||||
factor = num_stripes / 2;
|
||||
} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
|
||||
factor = num_stripes - 1;
|
||||
} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
|
||||
factor = num_stripes - 2;
|
||||
} else {
|
||||
factor = num_stripes;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_stripes; i++) {
|
||||
stripe = btrfs_stripe_nr(chunk, i);
|
||||
|
@ -2985,6 +3069,7 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
|
|||
int mixed = 0;
|
||||
int ret;
|
||||
u64 num_devices;
|
||||
unsigned seq;
|
||||
|
||||
if (btrfs_fs_closing(fs_info) ||
|
||||
atomic_read(&fs_info->balance_pause_req) ||
|
||||
|
@ -3027,7 +3112,9 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
|
|||
allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
|
||||
else
|
||||
allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
|
||||
BTRFS_BLOCK_GROUP_RAID10);
|
||||
BTRFS_BLOCK_GROUP_RAID10 |
|
||||
BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6);
|
||||
|
||||
if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
|
||||
(!alloc_profile_is_valid(bctl->data.target, 1) ||
|
||||
|
@ -3067,23 +3154,29 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
|
|||
|
||||
/* allow to reduce meta or sys integrity only if force set */
|
||||
allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
|
||||
BTRFS_BLOCK_GROUP_RAID10;
|
||||
if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
|
||||
(fs_info->avail_system_alloc_bits & allowed) &&
|
||||
!(bctl->sys.target & allowed)) ||
|
||||
((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
|
||||
(fs_info->avail_metadata_alloc_bits & allowed) &&
|
||||
!(bctl->meta.target & allowed))) {
|
||||
if (bctl->flags & BTRFS_BALANCE_FORCE) {
|
||||
printk(KERN_INFO "btrfs: force reducing metadata "
|
||||
"integrity\n");
|
||||
} else {
|
||||
printk(KERN_ERR "btrfs: balance will reduce metadata "
|
||||
"integrity, use force if you want this\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
BTRFS_BLOCK_GROUP_RAID10 |
|
||||
BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6;
|
||||
do {
|
||||
seq = read_seqbegin(&fs_info->profiles_lock);
|
||||
|
||||
if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
|
||||
(fs_info->avail_system_alloc_bits & allowed) &&
|
||||
!(bctl->sys.target & allowed)) ||
|
||||
((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
|
||||
(fs_info->avail_metadata_alloc_bits & allowed) &&
|
||||
!(bctl->meta.target & allowed))) {
|
||||
if (bctl->flags & BTRFS_BALANCE_FORCE) {
|
||||
printk(KERN_INFO "btrfs: force reducing metadata "
|
||||
"integrity\n");
|
||||
} else {
|
||||
printk(KERN_ERR "btrfs: balance will reduce metadata "
|
||||
"integrity, use force if you want this\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
} while (read_seqretry(&fs_info->profiles_lock, seq));
|
||||
|
||||
if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
|
||||
int num_tolerated_disk_barrier_failures;
|
||||
|
@ -3127,21 +3220,16 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
|
|||
mutex_lock(&fs_info->balance_mutex);
|
||||
atomic_dec(&fs_info->balance_running);
|
||||
|
||||
if (bargs) {
|
||||
memset(bargs, 0, sizeof(*bargs));
|
||||
update_ioctl_balance_args(fs_info, 0, bargs);
|
||||
}
|
||||
|
||||
if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
|
||||
balance_need_close(fs_info)) {
|
||||
__cancel_balance(fs_info);
|
||||
}
|
||||
|
||||
if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
|
||||
fs_info->num_tolerated_disk_barrier_failures =
|
||||
btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
|
||||
}
|
||||
|
||||
if (bargs) {
|
||||
memset(bargs, 0, sizeof(*bargs));
|
||||
update_ioctl_balance_args(fs_info, 0, bargs);
|
||||
}
|
||||
|
||||
wake_up(&fs_info->balance_wait_q);
|
||||
|
||||
return ret;
|
||||
|
@ -3504,13 +3592,86 @@ static int btrfs_cmp_device_info(const void *a, const void *b)
|
|||
}
|
||||
|
||||
struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
|
||||
{ 2, 1, 0, 4, 2, 2 /* raid10 */ },
|
||||
{ 1, 1, 2, 2, 2, 2 /* raid1 */ },
|
||||
{ 1, 2, 1, 1, 1, 2 /* dup */ },
|
||||
{ 1, 1, 0, 2, 1, 1 /* raid0 */ },
|
||||
{ 1, 1, 1, 1, 1, 1 /* single */ },
|
||||
[BTRFS_RAID_RAID10] = {
|
||||
.sub_stripes = 2,
|
||||
.dev_stripes = 1,
|
||||
.devs_max = 0, /* 0 == as many as possible */
|
||||
.devs_min = 4,
|
||||
.devs_increment = 2,
|
||||
.ncopies = 2,
|
||||
},
|
||||
[BTRFS_RAID_RAID1] = {
|
||||
.sub_stripes = 1,
|
||||
.dev_stripes = 1,
|
||||
.devs_max = 2,
|
||||
.devs_min = 2,
|
||||
.devs_increment = 2,
|
||||
.ncopies = 2,
|
||||
},
|
||||
[BTRFS_RAID_DUP] = {
|
||||
.sub_stripes = 1,
|
||||
.dev_stripes = 2,
|
||||
.devs_max = 1,
|
||||
.devs_min = 1,
|
||||
.devs_increment = 1,
|
||||
.ncopies = 2,
|
||||
},
|
||||
[BTRFS_RAID_RAID0] = {
|
||||
.sub_stripes = 1,
|
||||
.dev_stripes = 1,
|
||||
.devs_max = 0,
|
||||
.devs_min = 2,
|
||||
.devs_increment = 1,
|
||||
.ncopies = 1,
|
||||
},
|
||||
[BTRFS_RAID_SINGLE] = {
|
||||
.sub_stripes = 1,
|
||||
.dev_stripes = 1,
|
||||
.devs_max = 1,
|
||||
.devs_min = 1,
|
||||
.devs_increment = 1,
|
||||
.ncopies = 1,
|
||||
},
|
||||
[BTRFS_RAID_RAID5] = {
|
||||
.sub_stripes = 1,
|
||||
.dev_stripes = 1,
|
||||
.devs_max = 0,
|
||||
.devs_min = 2,
|
||||
.devs_increment = 1,
|
||||
.ncopies = 2,
|
||||
},
|
||||
[BTRFS_RAID_RAID6] = {
|
||||
.sub_stripes = 1,
|
||||
.dev_stripes = 1,
|
||||
.devs_max = 0,
|
||||
.devs_min = 3,
|
||||
.devs_increment = 1,
|
||||
.ncopies = 3,
|
||||
},
|
||||
};
|
||||
|
||||
static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
|
||||
{
|
||||
/* TODO allow them to set a preferred stripe size */
|
||||
return 64 * 1024;
|
||||
}
|
||||
|
||||
static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
|
||||
{
|
||||
u64 features;
|
||||
|
||||
if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)))
|
||||
return;
|
||||
|
||||
features = btrfs_super_incompat_flags(info->super_copy);
|
||||
if (features & BTRFS_FEATURE_INCOMPAT_RAID56)
|
||||
return;
|
||||
|
||||
features |= BTRFS_FEATURE_INCOMPAT_RAID56;
|
||||
btrfs_set_super_incompat_flags(info->super_copy, features);
|
||||
printk(KERN_INFO "btrfs: setting RAID5/6 feature flag\n");
|
||||
}
|
||||
|
||||
static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *extent_root,
|
||||
struct map_lookup **map_ret,
|
||||
|
@ -3526,6 +3687,8 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_device_info *devices_info = NULL;
|
||||
u64 total_avail;
|
||||
int num_stripes; /* total number of stripes to allocate */
|
||||
int data_stripes; /* number of stripes that count for
|
||||
block group size */
|
||||
int sub_stripes; /* sub_stripes info for map */
|
||||
int dev_stripes; /* stripes per dev */
|
||||
int devs_max; /* max devs to use */
|
||||
|
@ -3537,6 +3700,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
|||
u64 max_chunk_size;
|
||||
u64 stripe_size;
|
||||
u64 num_bytes;
|
||||
u64 raid_stripe_len = BTRFS_STRIPE_LEN;
|
||||
int ndevs;
|
||||
int i;
|
||||
int j;
|
||||
|
@ -3631,12 +3795,16 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
|||
if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
|
||||
continue;
|
||||
|
||||
if (ndevs == fs_devices->rw_devices) {
|
||||
WARN(1, "%s: found more than %llu devices\n",
|
||||
__func__, fs_devices->rw_devices);
|
||||
break;
|
||||
}
|
||||
devices_info[ndevs].dev_offset = dev_offset;
|
||||
devices_info[ndevs].max_avail = max_avail;
|
||||
devices_info[ndevs].total_avail = total_avail;
|
||||
devices_info[ndevs].dev = device;
|
||||
++ndevs;
|
||||
WARN_ON(ndevs > fs_devices->rw_devices);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3662,16 +3830,48 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
|||
stripe_size = devices_info[ndevs-1].max_avail;
|
||||
num_stripes = ndevs * dev_stripes;
|
||||
|
||||
if (stripe_size * ndevs > max_chunk_size * ncopies) {
|
||||
stripe_size = max_chunk_size * ncopies;
|
||||
do_div(stripe_size, ndevs);
|
||||
/*
|
||||
* this will have to be fixed for RAID1 and RAID10 over
|
||||
* more drives
|
||||
*/
|
||||
data_stripes = num_stripes / ncopies;
|
||||
|
||||
if (type & BTRFS_BLOCK_GROUP_RAID5) {
|
||||
raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
|
||||
btrfs_super_stripesize(info->super_copy));
|
||||
data_stripes = num_stripes - 1;
|
||||
}
|
||||
if (type & BTRFS_BLOCK_GROUP_RAID6) {
|
||||
raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
|
||||
btrfs_super_stripesize(info->super_copy));
|
||||
data_stripes = num_stripes - 2;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use the number of data stripes to figure out how big this chunk
|
||||
* is really going to be in terms of logical address space,
|
||||
* and compare that answer with the max chunk size
|
||||
*/
|
||||
if (stripe_size * data_stripes > max_chunk_size) {
|
||||
u64 mask = (1ULL << 24) - 1;
|
||||
stripe_size = max_chunk_size;
|
||||
do_div(stripe_size, data_stripes);
|
||||
|
||||
/* bump the answer up to a 16MB boundary */
|
||||
stripe_size = (stripe_size + mask) & ~mask;
|
||||
|
||||
/* but don't go higher than the limits we found
|
||||
* while searching for free extents
|
||||
*/
|
||||
if (stripe_size > devices_info[ndevs-1].max_avail)
|
||||
stripe_size = devices_info[ndevs-1].max_avail;
|
||||
}
|
||||
|
||||
do_div(stripe_size, dev_stripes);
|
||||
|
||||
/* align to BTRFS_STRIPE_LEN */
|
||||
do_div(stripe_size, BTRFS_STRIPE_LEN);
|
||||
stripe_size *= BTRFS_STRIPE_LEN;
|
||||
do_div(stripe_size, raid_stripe_len);
|
||||
stripe_size *= raid_stripe_len;
|
||||
|
||||
map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
|
||||
if (!map) {
|
||||
|
@ -3689,14 +3889,14 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
}
|
||||
map->sector_size = extent_root->sectorsize;
|
||||
map->stripe_len = BTRFS_STRIPE_LEN;
|
||||
map->io_align = BTRFS_STRIPE_LEN;
|
||||
map->io_width = BTRFS_STRIPE_LEN;
|
||||
map->stripe_len = raid_stripe_len;
|
||||
map->io_align = raid_stripe_len;
|
||||
map->io_width = raid_stripe_len;
|
||||
map->type = type;
|
||||
map->sub_stripes = sub_stripes;
|
||||
|
||||
*map_ret = map;
|
||||
num_bytes = stripe_size * (num_stripes / ncopies);
|
||||
num_bytes = stripe_size * data_stripes;
|
||||
|
||||
*stripe_size_out = stripe_size;
|
||||
*num_bytes_out = num_bytes;
|
||||
|
@ -3718,15 +3918,10 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
|||
write_lock(&em_tree->lock);
|
||||
ret = add_extent_mapping(em_tree, em);
|
||||
write_unlock(&em_tree->lock);
|
||||
free_extent_map(em);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = btrfs_make_block_group(trans, extent_root, 0, type,
|
||||
BTRFS_FIRST_CHUNK_TREE_OBJECTID,
|
||||
start, num_bytes);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
free_extent_map(em);
|
||||
goto error;
|
||||
}
|
||||
|
||||
for (i = 0; i < map->num_stripes; ++i) {
|
||||
struct btrfs_device *device;
|
||||
|
@ -3739,15 +3934,44 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
|||
info->chunk_root->root_key.objectid,
|
||||
BTRFS_FIRST_CHUNK_TREE_OBJECTID,
|
||||
start, dev_offset, stripe_size);
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, extent_root, ret);
|
||||
goto error;
|
||||
}
|
||||
if (ret)
|
||||
goto error_dev_extent;
|
||||
}
|
||||
|
||||
ret = btrfs_make_block_group(trans, extent_root, 0, type,
|
||||
BTRFS_FIRST_CHUNK_TREE_OBJECTID,
|
||||
start, num_bytes);
|
||||
if (ret) {
|
||||
i = map->num_stripes - 1;
|
||||
goto error_dev_extent;
|
||||
}
|
||||
|
||||
free_extent_map(em);
|
||||
check_raid56_incompat_flag(extent_root->fs_info, type);
|
||||
|
||||
kfree(devices_info);
|
||||
return 0;
|
||||
|
||||
error_dev_extent:
|
||||
for (; i >= 0; i--) {
|
||||
struct btrfs_device *device;
|
||||
int err;
|
||||
|
||||
device = map->stripes[i].dev;
|
||||
err = btrfs_free_dev_extent(trans, device, start);
|
||||
if (err) {
|
||||
btrfs_abort_transaction(trans, extent_root, err);
|
||||
break;
|
||||
}
|
||||
}
|
||||
write_lock(&em_tree->lock);
|
||||
remove_extent_mapping(em_tree, em);
|
||||
write_unlock(&em_tree->lock);
|
||||
|
||||
/* One for our allocation */
|
||||
free_extent_map(em);
|
||||
/* One for the tree reference */
|
||||
free_extent_map(em);
|
||||
error:
|
||||
kfree(map);
|
||||
kfree(devices_info);
|
||||
|
@ -3887,10 +4111,7 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
|
||||
fs_info->avail_metadata_alloc_bits;
|
||||
alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
|
||||
|
||||
alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
|
||||
ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
|
||||
&stripe_size, chunk_offset, alloc_profile);
|
||||
if (ret)
|
||||
|
@ -3898,10 +4119,7 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
|
|||
|
||||
sys_chunk_offset = chunk_offset + chunk_size;
|
||||
|
||||
alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
|
||||
fs_info->avail_system_alloc_bits;
|
||||
alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
|
||||
|
||||
alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
|
||||
ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
|
||||
&sys_chunk_size, &sys_stripe_size,
|
||||
sys_chunk_offset, alloc_profile);
|
||||
|
@ -4014,6 +4232,10 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
|
|||
ret = map->num_stripes;
|
||||
else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
|
||||
ret = map->sub_stripes;
|
||||
else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
|
||||
ret = 2;
|
||||
else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
|
||||
ret = 3;
|
||||
else
|
||||
ret = 1;
|
||||
free_extent_map(em);
|
||||
|
@ -4026,6 +4248,52 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
|
|||
return ret;
|
||||
}
|
||||
|
||||
unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
|
||||
struct btrfs_mapping_tree *map_tree,
|
||||
u64 logical)
|
||||
{
|
||||
struct extent_map *em;
|
||||
struct map_lookup *map;
|
||||
struct extent_map_tree *em_tree = &map_tree->map_tree;
|
||||
unsigned long len = root->sectorsize;
|
||||
|
||||
read_lock(&em_tree->lock);
|
||||
em = lookup_extent_mapping(em_tree, logical, len);
|
||||
read_unlock(&em_tree->lock);
|
||||
BUG_ON(!em);
|
||||
|
||||
BUG_ON(em->start > logical || em->start + em->len < logical);
|
||||
map = (struct map_lookup *)em->bdev;
|
||||
if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6)) {
|
||||
len = map->stripe_len * nr_data_stripes(map);
|
||||
}
|
||||
free_extent_map(em);
|
||||
return len;
|
||||
}
|
||||
|
||||
int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
|
||||
u64 logical, u64 len, int mirror_num)
|
||||
{
|
||||
struct extent_map *em;
|
||||
struct map_lookup *map;
|
||||
struct extent_map_tree *em_tree = &map_tree->map_tree;
|
||||
int ret = 0;
|
||||
|
||||
read_lock(&em_tree->lock);
|
||||
em = lookup_extent_mapping(em_tree, logical, len);
|
||||
read_unlock(&em_tree->lock);
|
||||
BUG_ON(!em);
|
||||
|
||||
BUG_ON(em->start > logical || em->start + em->len < logical);
|
||||
map = (struct map_lookup *)em->bdev;
|
||||
if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6))
|
||||
ret = 1;
|
||||
free_extent_map(em);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int find_live_mirror(struct btrfs_fs_info *fs_info,
|
||||
struct map_lookup *map, int first, int num,
|
||||
int optimal, int dev_replace_is_ongoing)
|
||||
|
@ -4063,10 +4331,39 @@ static int find_live_mirror(struct btrfs_fs_info *fs_info,
|
|||
return optimal;
|
||||
}
|
||||
|
||||
static inline int parity_smaller(u64 a, u64 b)
|
||||
{
|
||||
return a > b;
|
||||
}
|
||||
|
||||
/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
|
||||
static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
|
||||
{
|
||||
struct btrfs_bio_stripe s;
|
||||
int i;
|
||||
u64 l;
|
||||
int again = 1;
|
||||
|
||||
while (again) {
|
||||
again = 0;
|
||||
for (i = 0; i < bbio->num_stripes - 1; i++) {
|
||||
if (parity_smaller(raid_map[i], raid_map[i+1])) {
|
||||
s = bbio->stripes[i];
|
||||
l = raid_map[i];
|
||||
bbio->stripes[i] = bbio->stripes[i+1];
|
||||
raid_map[i] = raid_map[i+1];
|
||||
bbio->stripes[i+1] = s;
|
||||
raid_map[i+1] = l;
|
||||
again = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
||||
u64 logical, u64 *length,
|
||||
struct btrfs_bio **bbio_ret,
|
||||
int mirror_num)
|
||||
int mirror_num, u64 **raid_map_ret)
|
||||
{
|
||||
struct extent_map *em;
|
||||
struct map_lookup *map;
|
||||
|
@ -4078,6 +4375,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
u64 stripe_nr;
|
||||
u64 stripe_nr_orig;
|
||||
u64 stripe_nr_end;
|
||||
u64 stripe_len;
|
||||
u64 *raid_map = NULL;
|
||||
int stripe_index;
|
||||
int i;
|
||||
int ret = 0;
|
||||
|
@ -4089,6 +4388,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
int num_alloc_stripes;
|
||||
int patch_the_first_stripe_for_dev_replace = 0;
|
||||
u64 physical_to_patch_in_first_stripe = 0;
|
||||
u64 raid56_full_stripe_start = (u64)-1;
|
||||
|
||||
read_lock(&em_tree->lock);
|
||||
em = lookup_extent_mapping(em_tree, logical, *length);
|
||||
|
@ -4105,29 +4405,63 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
map = (struct map_lookup *)em->bdev;
|
||||
offset = logical - em->start;
|
||||
|
||||
if (mirror_num > map->num_stripes)
|
||||
mirror_num = 0;
|
||||
|
||||
stripe_len = map->stripe_len;
|
||||
stripe_nr = offset;
|
||||
/*
|
||||
* stripe_nr counts the total number of stripes we have to stride
|
||||
* to get to this block
|
||||
*/
|
||||
do_div(stripe_nr, map->stripe_len);
|
||||
do_div(stripe_nr, stripe_len);
|
||||
|
||||
stripe_offset = stripe_nr * map->stripe_len;
|
||||
stripe_offset = stripe_nr * stripe_len;
|
||||
BUG_ON(offset < stripe_offset);
|
||||
|
||||
/* stripe_offset is the offset of this block in its stripe*/
|
||||
stripe_offset = offset - stripe_offset;
|
||||
|
||||
if (rw & REQ_DISCARD)
|
||||
/* if we're here for raid56, we need to know the stripe aligned start */
|
||||
if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
|
||||
unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
|
||||
raid56_full_stripe_start = offset;
|
||||
|
||||
/* allow a write of a full stripe, but make sure we don't
|
||||
* allow straddling of stripes
|
||||
*/
|
||||
do_div(raid56_full_stripe_start, full_stripe_len);
|
||||
raid56_full_stripe_start *= full_stripe_len;
|
||||
}
|
||||
|
||||
if (rw & REQ_DISCARD) {
|
||||
/* we don't discard raid56 yet */
|
||||
if (map->type &
|
||||
(BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
|
||||
ret = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
*length = min_t(u64, em->len - offset, *length);
|
||||
else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
|
||||
/* we limit the length of each bio to what fits in a stripe */
|
||||
*length = min_t(u64, em->len - offset,
|
||||
map->stripe_len - stripe_offset);
|
||||
} else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
|
||||
u64 max_len;
|
||||
/* For writes to RAID[56], allow a full stripeset across all disks.
|
||||
For other RAID types and for RAID[56] reads, just allow a single
|
||||
stripe (on a single disk). */
|
||||
if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) &&
|
||||
(rw & REQ_WRITE)) {
|
||||
max_len = stripe_len * nr_data_stripes(map) -
|
||||
(offset - raid56_full_stripe_start);
|
||||
} else {
|
||||
/* we limit the length of each bio to what fits in a stripe */
|
||||
max_len = stripe_len - stripe_offset;
|
||||
}
|
||||
*length = min_t(u64, em->len - offset, max_len);
|
||||
} else {
|
||||
*length = em->len - offset;
|
||||
}
|
||||
|
||||
/* This is for when we're called from btrfs_merge_bio_hook() and all
|
||||
it cares about is the length */
|
||||
if (!bbio_ret)
|
||||
goto out;
|
||||
|
||||
|
@ -4160,7 +4494,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
u64 physical_of_found = 0;
|
||||
|
||||
ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
|
||||
logical, &tmp_length, &tmp_bbio, 0);
|
||||
logical, &tmp_length, &tmp_bbio, 0, NULL);
|
||||
if (ret) {
|
||||
WARN_ON(tmp_bbio != NULL);
|
||||
goto out;
|
||||
|
@ -4221,11 +4555,11 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
num_stripes = 1;
|
||||
stripe_index = 0;
|
||||
stripe_nr_orig = stripe_nr;
|
||||
stripe_nr_end = (offset + *length + map->stripe_len - 1) &
|
||||
(~(map->stripe_len - 1));
|
||||
stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
|
||||
do_div(stripe_nr_end, map->stripe_len);
|
||||
stripe_end_offset = stripe_nr_end * map->stripe_len -
|
||||
(offset + *length);
|
||||
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
|
||||
if (rw & REQ_DISCARD)
|
||||
num_stripes = min_t(u64, map->num_stripes,
|
||||
|
@ -4276,6 +4610,65 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
dev_replace_is_ongoing);
|
||||
mirror_num = stripe_index - old_stripe_index + 1;
|
||||
}
|
||||
|
||||
} else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6)) {
|
||||
u64 tmp;
|
||||
|
||||
if (bbio_ret && ((rw & REQ_WRITE) || mirror_num > 1)
|
||||
&& raid_map_ret) {
|
||||
int i, rot;
|
||||
|
||||
/* push stripe_nr back to the start of the full stripe */
|
||||
stripe_nr = raid56_full_stripe_start;
|
||||
do_div(stripe_nr, stripe_len);
|
||||
|
||||
stripe_index = do_div(stripe_nr, nr_data_stripes(map));
|
||||
|
||||
/* RAID[56] write or recovery. Return all stripes */
|
||||
num_stripes = map->num_stripes;
|
||||
max_errors = nr_parity_stripes(map);
|
||||
|
||||
raid_map = kmalloc(sizeof(u64) * num_stripes,
|
||||
GFP_NOFS);
|
||||
if (!raid_map) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Work out the disk rotation on this stripe-set */
|
||||
tmp = stripe_nr;
|
||||
rot = do_div(tmp, num_stripes);
|
||||
|
||||
/* Fill in the logical address of each stripe */
|
||||
tmp = stripe_nr * nr_data_stripes(map);
|
||||
for (i = 0; i < nr_data_stripes(map); i++)
|
||||
raid_map[(i+rot) % num_stripes] =
|
||||
em->start + (tmp + i) * map->stripe_len;
|
||||
|
||||
raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID6)
|
||||
raid_map[(i+rot+1) % num_stripes] =
|
||||
RAID6_Q_STRIPE;
|
||||
|
||||
*length = map->stripe_len;
|
||||
stripe_index = 0;
|
||||
stripe_offset = 0;
|
||||
} else {
|
||||
/*
|
||||
* Mirror #0 or #1 means the original data block.
|
||||
* Mirror #2 is RAID5 parity block.
|
||||
* Mirror #3 is RAID6 Q block.
|
||||
*/
|
||||
stripe_index = do_div(stripe_nr, nr_data_stripes(map));
|
||||
if (mirror_num > 1)
|
||||
stripe_index = nr_data_stripes(map) +
|
||||
mirror_num - 2;
|
||||
|
||||
/* We distribute the parity blocks across stripes */
|
||||
tmp = stripe_nr + stripe_index;
|
||||
stripe_index = do_div(tmp, map->num_stripes);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* after this do_div call, stripe_nr is the number of stripes
|
||||
|
@ -4384,8 +4777,11 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) {
|
||||
if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
|
||||
BTRFS_BLOCK_GROUP_RAID10 |
|
||||
BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_DUP)) {
|
||||
max_errors = 1;
|
||||
} else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
|
||||
max_errors = 2;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4486,6 +4882,10 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
|
||||
bbio->mirror_num = map->num_stripes + 1;
|
||||
}
|
||||
if (raid_map) {
|
||||
sort_parity_stripes(bbio, raid_map);
|
||||
*raid_map_ret = raid_map;
|
||||
}
|
||||
out:
|
||||
if (dev_replace_is_ongoing)
|
||||
btrfs_dev_replace_unlock(dev_replace);
|
||||
|
@ -4498,7 +4898,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
struct btrfs_bio **bbio_ret, int mirror_num)
|
||||
{
|
||||
return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
|
||||
mirror_num);
|
||||
mirror_num, NULL);
|
||||
}
|
||||
|
||||
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
|
||||
|
@ -4512,6 +4912,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
|
|||
u64 bytenr;
|
||||
u64 length;
|
||||
u64 stripe_nr;
|
||||
u64 rmap_len;
|
||||
int i, j, nr = 0;
|
||||
|
||||
read_lock(&em_tree->lock);
|
||||
|
@ -4522,10 +4923,17 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
|
|||
map = (struct map_lookup *)em->bdev;
|
||||
|
||||
length = em->len;
|
||||
rmap_len = map->stripe_len;
|
||||
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID10)
|
||||
do_div(length, map->num_stripes / map->sub_stripes);
|
||||
else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
|
||||
do_div(length, map->num_stripes);
|
||||
else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6)) {
|
||||
do_div(length, nr_data_stripes(map));
|
||||
rmap_len = map->stripe_len * nr_data_stripes(map);
|
||||
}
|
||||
|
||||
buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
|
||||
BUG_ON(!buf); /* -ENOMEM */
|
||||
|
@ -4545,8 +4953,11 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
|
|||
do_div(stripe_nr, map->sub_stripes);
|
||||
} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
|
||||
stripe_nr = stripe_nr * map->num_stripes + i;
|
||||
}
|
||||
bytenr = chunk_start + stripe_nr * map->stripe_len;
|
||||
} /* else if RAID[56], multiply by nr_data_stripes().
|
||||
* Alternatively, just use rmap_len below instead of
|
||||
* map->stripe_len */
|
||||
|
||||
bytenr = chunk_start + stripe_nr * rmap_len;
|
||||
WARN_ON(nr >= map->num_stripes);
|
||||
for (j = 0; j < nr; j++) {
|
||||
if (buf[j] == bytenr)
|
||||
|
@ -4560,7 +4971,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
|
|||
|
||||
*logical = buf;
|
||||
*naddrs = nr;
|
||||
*stripe_len = map->stripe_len;
|
||||
*stripe_len = rmap_len;
|
||||
|
||||
free_extent_map(em);
|
||||
return 0;
|
||||
|
@ -4634,7 +5045,7 @@ static void btrfs_end_bio(struct bio *bio, int err)
|
|||
bio->bi_bdev = (struct block_device *)
|
||||
(unsigned long)bbio->mirror_num;
|
||||
/* only send an error to the higher layers if it is
|
||||
* beyond the tolerance of the multi-bio
|
||||
* beyond the tolerance of the btrfs bio
|
||||
*/
|
||||
if (atomic_read(&bbio->error) > bbio->max_errors) {
|
||||
err = -EIO;
|
||||
|
@ -4668,13 +5079,18 @@ struct async_sched {
|
|||
* This will add one bio to the pending list for a device and make sure
|
||||
* the work struct is scheduled.
|
||||
*/
|
||||
static noinline void schedule_bio(struct btrfs_root *root,
|
||||
noinline void btrfs_schedule_bio(struct btrfs_root *root,
|
||||
struct btrfs_device *device,
|
||||
int rw, struct bio *bio)
|
||||
{
|
||||
int should_queue = 1;
|
||||
struct btrfs_pending_bios *pending_bios;
|
||||
|
||||
if (device->missing || !device->bdev) {
|
||||
bio_endio(bio, -EIO);
|
||||
return;
|
||||
}
|
||||
|
||||
/* don't bother with additional async steps for reads, right now */
|
||||
if (!(rw & REQ_WRITE)) {
|
||||
bio_get(bio);
|
||||
|
@ -4772,7 +5188,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
|
|||
#endif
|
||||
bio->bi_bdev = dev->bdev;
|
||||
if (async)
|
||||
schedule_bio(root, dev, rw, bio);
|
||||
btrfs_schedule_bio(root, dev, rw, bio);
|
||||
else
|
||||
btrfsic_submit_bio(rw, bio);
|
||||
}
|
||||
|
@ -4831,6 +5247,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
|
|||
u64 logical = (u64)bio->bi_sector << 9;
|
||||
u64 length = 0;
|
||||
u64 map_length;
|
||||
u64 *raid_map = NULL;
|
||||
int ret;
|
||||
int dev_nr = 0;
|
||||
int total_devs = 1;
|
||||
|
@ -4839,12 +5256,30 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
|
|||
length = bio->bi_size;
|
||||
map_length = length;
|
||||
|
||||
ret = btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
|
||||
mirror_num);
|
||||
if (ret)
|
||||
ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
|
||||
mirror_num, &raid_map);
|
||||
if (ret) /* -ENOMEM */
|
||||
return ret;
|
||||
|
||||
total_devs = bbio->num_stripes;
|
||||
bbio->orig_bio = first_bio;
|
||||
bbio->private = first_bio->bi_private;
|
||||
bbio->end_io = first_bio->bi_end_io;
|
||||
atomic_set(&bbio->stripes_pending, bbio->num_stripes);
|
||||
|
||||
if (raid_map) {
|
||||
/* In this case, map_length has been set to the length of
|
||||
a single stripe; not the whole write */
|
||||
if (rw & WRITE) {
|
||||
return raid56_parity_write(root, bio, bbio,
|
||||
raid_map, map_length);
|
||||
} else {
|
||||
return raid56_parity_recover(root, bio, bbio,
|
||||
raid_map, map_length,
|
||||
mirror_num);
|
||||
}
|
||||
}
|
||||
|
||||
if (map_length < length) {
|
||||
printk(KERN_CRIT "btrfs: mapping failed logical %llu bio len %llu "
|
||||
"len %llu\n", (unsigned long long)logical,
|
||||
|
@ -4853,11 +5288,6 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
|
|||
BUG();
|
||||
}
|
||||
|
||||
bbio->orig_bio = first_bio;
|
||||
bbio->private = first_bio->bi_private;
|
||||
bbio->end_io = first_bio->bi_end_io;
|
||||
atomic_set(&bbio->stripes_pending, bbio->num_stripes);
|
||||
|
||||
while (dev_nr < total_devs) {
|
||||
dev = bbio->stripes[dev_nr].dev;
|
||||
if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
|
||||
|
|
|
@ -21,8 +21,8 @@
|
|||
|
||||
#include <linux/bio.h>
|
||||
#include <linux/sort.h>
|
||||
#include <linux/btrfs.h>
|
||||
#include "async-thread.h"
|
||||
#include "ioctl.h"
|
||||
|
||||
#define BTRFS_STRIPE_LEN (64 * 1024)
|
||||
|
||||
|
@ -321,7 +321,14 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
|
|||
void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_device *tgtdev);
|
||||
int btrfs_scratch_superblock(struct btrfs_device *device);
|
||||
|
||||
void btrfs_schedule_bio(struct btrfs_root *root,
|
||||
struct btrfs_device *device,
|
||||
int rw, struct bio *bio);
|
||||
int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
|
||||
u64 logical, u64 len, int mirror_num);
|
||||
unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
|
||||
struct btrfs_mapping_tree *map_tree,
|
||||
u64 logical);
|
||||
static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
|
||||
int index)
|
||||
{
|
||||
|
|
6
include/linux/btrfs.h
Normal file
6
include/linux/btrfs.h
Normal file
|
@ -0,0 +1,6 @@
|
|||
#ifndef _LINUX_BTRFS_H
|
||||
#define _LINUX_BTRFS_H
|
||||
|
||||
#include <uapi/linux/btrfs.h>
|
||||
|
||||
#endif /* _LINUX_BTRFS_H */
|
|
@ -68,6 +68,7 @@ header-y += blkpg.h
|
|||
header-y += blktrace_api.h
|
||||
header-y += bpqether.h
|
||||
header-y += bsg.h
|
||||
header-y += btrfs.h
|
||||
header-y += can.h
|
||||
header-y += capability.h
|
||||
header-y += capi.h
|
||||
|
|
|
@ -16,8 +16,9 @@
|
|||
* Boston, MA 021110-1307, USA.
|
||||
*/
|
||||
|
||||
#ifndef __IOCTL_
|
||||
#define __IOCTL_
|
||||
#ifndef _UAPI_LINUX_BTRFS_H
|
||||
#define _UAPI_LINUX_BTRFS_H
|
||||
#include <linux/types.h>
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
#define BTRFS_IOCTL_MAGIC 0x94
|
||||
|
@ -406,6 +407,13 @@ struct btrfs_ioctl_received_subvol_args {
|
|||
__u64 reserved[16]; /* in */
|
||||
};
|
||||
|
||||
/*
|
||||
* Caller doesn't want file data in the send stream, even if the
|
||||
* search of clone sources doesn't find an extent. UPDATE_EXTENT
|
||||
* commands will be sent instead of WRITE commands.
|
||||
*/
|
||||
#define BTRFS_SEND_FLAG_NO_FILE_DATA 0x1
|
||||
|
||||
struct btrfs_ioctl_send_args {
|
||||
__s64 send_fd; /* in */
|
||||
__u64 clone_sources_count; /* in */
|
||||
|
@ -494,9 +502,13 @@ struct btrfs_ioctl_send_args {
|
|||
struct btrfs_ioctl_qgroup_create_args)
|
||||
#define BTRFS_IOC_QGROUP_LIMIT _IOR(BTRFS_IOCTL_MAGIC, 43, \
|
||||
struct btrfs_ioctl_qgroup_limit_args)
|
||||
#define BTRFS_IOC_GET_FSLABEL _IOR(BTRFS_IOCTL_MAGIC, 49, \
|
||||
char[BTRFS_LABEL_SIZE])
|
||||
#define BTRFS_IOC_SET_FSLABEL _IOW(BTRFS_IOCTL_MAGIC, 50, \
|
||||
char[BTRFS_LABEL_SIZE])
|
||||
#define BTRFS_IOC_GET_DEV_STATS _IOWR(BTRFS_IOCTL_MAGIC, 52, \
|
||||
struct btrfs_ioctl_get_dev_stats)
|
||||
#define BTRFS_IOC_DEV_REPLACE _IOWR(BTRFS_IOCTL_MAGIC, 53, \
|
||||
struct btrfs_ioctl_dev_replace_args)
|
||||
|
||||
#endif
|
||||
#endif /* _UAPI_LINUX_BTRFS_H */
|
Loading…
Reference in a new issue