Merge branch 'cleanups-4.7' into for-chris-4.7-20160525
This commit is contained in:
commit
42f31734eb
36 changed files with 193 additions and 196 deletions
|
@ -1939,7 +1939,7 @@ static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
|
||||||
* from ipath->fspath->val[i].
|
* from ipath->fspath->val[i].
|
||||||
* when it returns, there are ipath->fspath->elem_cnt number of paths available
|
* when it returns, there are ipath->fspath->elem_cnt number of paths available
|
||||||
* in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
|
* in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
|
||||||
* number of missed paths in recored in ipath->fspath->elem_missed, otherwise,
|
* number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
|
||||||
* it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
|
* it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
|
||||||
* have been needed to return all paths.
|
* have been needed to return all paths.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -313,7 +313,7 @@ struct btrfs_dio_private {
|
||||||
struct bio *dio_bio;
|
struct bio *dio_bio;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The original bio may be splited to several sub-bios, this is
|
* The original bio may be split to several sub-bios, this is
|
||||||
* done during endio of sub-bios
|
* done during endio of sub-bios
|
||||||
*/
|
*/
|
||||||
int (*subio_endio)(struct inode *, struct btrfs_io_bio *, int);
|
int (*subio_endio)(struct inode *, struct btrfs_io_bio *, int);
|
||||||
|
|
|
@ -1939,7 +1939,7 @@ again:
|
||||||
/*
|
/*
|
||||||
* Clear all references of this block. Do not free
|
* Clear all references of this block. Do not free
|
||||||
* the block itself even if is not referenced anymore
|
* the block itself even if is not referenced anymore
|
||||||
* because it still carries valueable information
|
* because it still carries valuable information
|
||||||
* like whether it was ever written and IO completed.
|
* like whether it was ever written and IO completed.
|
||||||
*/
|
*/
|
||||||
list_for_each_entry_safe(l, tmp, &block->ref_to_list,
|
list_for_each_entry_safe(l, tmp, &block->ref_to_list,
|
||||||
|
|
|
@ -156,7 +156,7 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* RCU really hurts here, we could free up the root node because
|
* RCU really hurts here, we could free up the root node because
|
||||||
* it was cow'ed but we may not get the new root node yet so do
|
* it was COWed but we may not get the new root node yet so do
|
||||||
* the inc_not_zero dance and if it doesn't work then
|
* the inc_not_zero dance and if it doesn't work then
|
||||||
* synchronize_rcu and try again.
|
* synchronize_rcu and try again.
|
||||||
*/
|
*/
|
||||||
|
@ -955,7 +955,7 @@ int btrfs_block_can_be_shared(struct btrfs_root *root,
|
||||||
struct extent_buffer *buf)
|
struct extent_buffer *buf)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Tree blocks not in refernece counted trees and tree roots
|
* Tree blocks not in reference counted trees and tree roots
|
||||||
* are never shared. If a block was allocated after the last
|
* are never shared. If a block was allocated after the last
|
||||||
* snapshot and the block was not allocated by tree relocation,
|
* snapshot and the block was not allocated by tree relocation,
|
||||||
* we know the block is not shared.
|
* we know the block is not shared.
|
||||||
|
@ -1270,7 +1270,7 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* tm is a pointer to the first operation to rewind within eb. then, all
|
* tm is a pointer to the first operation to rewind within eb. then, all
|
||||||
* previous operations will be rewinded (until we reach something older than
|
* previous operations will be rewound (until we reach something older than
|
||||||
* time_seq).
|
* time_seq).
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
|
@ -1345,7 +1345,7 @@ __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called with eb read locked. If the buffer cannot be rewinded, the same buffer
|
* Called with eb read locked. If the buffer cannot be rewound, the same buffer
|
||||||
* is returned. If rewind operations happen, a fresh buffer is returned. The
|
* is returned. If rewind operations happen, a fresh buffer is returned. The
|
||||||
* returned buffer is always read-locked. If the returned buffer is not the
|
* returned buffer is always read-locked. If the returned buffer is not the
|
||||||
* input buffer, the lock on the input buffer is released and the input buffer
|
* input buffer, the lock on the input buffer is released and the input buffer
|
||||||
|
@ -1516,7 +1516,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
|
||||||
* 3) the root is not forced COW.
|
* 3) the root is not forced COW.
|
||||||
*
|
*
|
||||||
* What is forced COW:
|
* What is forced COW:
|
||||||
* when we create snapshot during commiting the transaction,
|
* when we create snapshot during committing the transaction,
|
||||||
* after we've finished coping src root, we must COW the shared
|
* after we've finished coping src root, we must COW the shared
|
||||||
* block to ensure the metadata consistency.
|
* block to ensure the metadata consistency.
|
||||||
*/
|
*/
|
||||||
|
@ -1531,7 +1531,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* cows a single block, see __btrfs_cow_block for the real work.
|
* cows a single block, see __btrfs_cow_block for the real work.
|
||||||
* This version of it has extra checks so that a block isn't cow'd more than
|
* This version of it has extra checks so that a block isn't COWed more than
|
||||||
* once per transaction, as long as it hasn't been written yet
|
* once per transaction, as long as it hasn't been written yet
|
||||||
*/
|
*/
|
||||||
noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
|
noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
|
||||||
|
@ -2986,7 +2986,7 @@ again:
|
||||||
btrfs_unlock_up_safe(p, level + 1);
|
btrfs_unlock_up_safe(p, level + 1);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Since we can unwind eb's we want to do a real search every
|
* Since we can unwind ebs we want to do a real search every
|
||||||
* time.
|
* time.
|
||||||
*/
|
*/
|
||||||
prev_cmp = -1;
|
prev_cmp = -1;
|
||||||
|
|
|
@ -89,7 +89,7 @@ static const int btrfs_csum_sizes[] = { 4 };
|
||||||
/* four bytes for CRC32 */
|
/* four bytes for CRC32 */
|
||||||
#define BTRFS_EMPTY_DIR_SIZE 0
|
#define BTRFS_EMPTY_DIR_SIZE 0
|
||||||
|
|
||||||
/* spefic to btrfs_map_block(), therefore not in include/linux/blk_types.h */
|
/* specific to btrfs_map_block(), therefore not in include/linux/blk_types.h */
|
||||||
#define REQ_GET_READ_MIRRORS (1 << 30)
|
#define REQ_GET_READ_MIRRORS (1 << 30)
|
||||||
|
|
||||||
/* ioprio of readahead is set to idle */
|
/* ioprio of readahead is set to idle */
|
||||||
|
@ -431,7 +431,7 @@ struct btrfs_space_info {
|
||||||
* bytes_pinned does not reflect the bytes that will be pinned once the
|
* bytes_pinned does not reflect the bytes that will be pinned once the
|
||||||
* delayed refs are flushed, so this counter is inc'ed every time we
|
* delayed refs are flushed, so this counter is inc'ed every time we
|
||||||
* call btrfs_free_extent so it is a realtime count of what will be
|
* call btrfs_free_extent so it is a realtime count of what will be
|
||||||
* freed once the transaction is committed. It will be zero'ed every
|
* freed once the transaction is committed. It will be zeroed every
|
||||||
* time the transaction commits.
|
* time the transaction commits.
|
||||||
*/
|
*/
|
||||||
struct percpu_counter total_bytes_pinned;
|
struct percpu_counter total_bytes_pinned;
|
||||||
|
@ -1401,7 +1401,7 @@ static inline void btrfs_init_map_token (struct btrfs_map_token *token)
|
||||||
token->kaddr = NULL;
|
token->kaddr = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* some macros to generate set/get funcs for the struct fields. This
|
/* some macros to generate set/get functions for the struct fields. This
|
||||||
* assumes there is a lefoo_to_cpu for every type, so lets make a simple
|
* assumes there is a lefoo_to_cpu for every type, so lets make a simple
|
||||||
* one for u8:
|
* one for u8:
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -188,7 +188,7 @@ struct btrfs_delayed_ref_root {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* To make qgroup to skip given root.
|
* To make qgroup to skip given root.
|
||||||
* This is for snapshot, as btrfs_qgroup_inherit() will manully
|
* This is for snapshot, as btrfs_qgroup_inherit() will manually
|
||||||
* modify counters for snapshot and its source, so we should skip
|
* modify counters for snapshot and its source, so we should skip
|
||||||
* the snapshot in new_root/old_roots or it will get calculated twice
|
* the snapshot in new_root/old_roots or it will get calculated twice
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -450,7 +450,7 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_root *root,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* blocked until all flighting bios are finished.
|
* blocked until all in-flight bios operations are finished.
|
||||||
*/
|
*/
|
||||||
static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info)
|
static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info)
|
||||||
{
|
{
|
||||||
|
|
|
@ -384,7 +384,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
|
||||||
/*
|
/*
|
||||||
* Things reading via commit roots that don't have normal protection,
|
* Things reading via commit roots that don't have normal protection,
|
||||||
* like send, can have a really old block in cache that may point at a
|
* like send, can have a really old block in cache that may point at a
|
||||||
* block that has been free'd and re-allocated. So don't clear uptodate
|
* block that has been freed and re-allocated. So don't clear uptodate
|
||||||
* if we find an eb that is under IO (dirty/writeback) because we could
|
* if we find an eb that is under IO (dirty/writeback) because we could
|
||||||
* end up reading in the stale data and then writing it back out and
|
* end up reading in the stale data and then writing it back out and
|
||||||
* making everybody very sad.
|
* making everybody very sad.
|
||||||
|
@ -418,7 +418,7 @@ static int btrfs_check_super_csum(char *raw_disk_sb)
|
||||||
/*
|
/*
|
||||||
* The super_block structure does not span the whole
|
* The super_block structure does not span the whole
|
||||||
* BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
|
* BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
|
||||||
* is filled with zeros and is included in the checkum.
|
* is filled with zeros and is included in the checksum.
|
||||||
*/
|
*/
|
||||||
crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
|
crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
|
||||||
crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
|
crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
|
||||||
|
@ -600,7 +600,7 @@ static noinline int check_leaf(struct btrfs_root *root,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check to make sure that we don't point outside of the leaf,
|
* Check to make sure that we don't point outside of the leaf,
|
||||||
* just incase all the items are consistent to eachother, but
|
* just in case all the items are consistent to each other, but
|
||||||
* all point outside of the leaf.
|
* all point outside of the leaf.
|
||||||
*/
|
*/
|
||||||
if (btrfs_item_end_nr(leaf, slot) >
|
if (btrfs_item_end_nr(leaf, slot) >
|
||||||
|
@ -3022,7 +3022,7 @@ retry_root_backup:
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mount does not set all options immediatelly, we can do it now and do
|
* Mount does not set all options immediately, we can do it now and do
|
||||||
* not have to wait for transaction commit
|
* not have to wait for transaction commit
|
||||||
*/
|
*/
|
||||||
btrfs_apply_pending_changes(fs_info);
|
btrfs_apply_pending_changes(fs_info);
|
||||||
|
@ -3255,7 +3255,7 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
|
||||||
btrfs_warn_rl_in_rcu(device->dev_root->fs_info,
|
btrfs_warn_rl_in_rcu(device->dev_root->fs_info,
|
||||||
"lost page write due to IO error on %s",
|
"lost page write due to IO error on %s",
|
||||||
rcu_str_deref(device->name));
|
rcu_str_deref(device->name));
|
||||||
/* note, we dont' set_buffer_write_io_error because we have
|
/* note, we don't set_buffer_write_io_error because we have
|
||||||
* our own ways of dealing with the IO errors
|
* our own ways of dealing with the IO errors
|
||||||
*/
|
*/
|
||||||
clear_buffer_uptodate(bh);
|
clear_buffer_uptodate(bh);
|
||||||
|
@ -4367,7 +4367,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
|
clear_extent_bits(dirty_pages, start, end, mark);
|
||||||
while (start <= end) {
|
while (start <= end) {
|
||||||
eb = btrfs_find_tree_block(root->fs_info, start);
|
eb = btrfs_find_tree_block(root->fs_info, start);
|
||||||
start += root->nodesize;
|
start += root->nodesize;
|
||||||
|
@ -4402,7 +4402,7 @@ again:
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
clear_extent_dirty(unpin, start, end, GFP_NOFS);
|
clear_extent_dirty(unpin, start, end);
|
||||||
btrfs_error_unpin_extent_range(root, start, end);
|
btrfs_error_unpin_extent_range(root, start, end);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
}
|
||||||
|
|
|
@ -231,9 +231,9 @@ static int add_excluded_extent(struct btrfs_root *root,
|
||||||
{
|
{
|
||||||
u64 end = start + num_bytes - 1;
|
u64 end = start + num_bytes - 1;
|
||||||
set_extent_bits(&root->fs_info->freed_extents[0],
|
set_extent_bits(&root->fs_info->freed_extents[0],
|
||||||
start, end, EXTENT_UPTODATE, GFP_NOFS);
|
start, end, EXTENT_UPTODATE);
|
||||||
set_extent_bits(&root->fs_info->freed_extents[1],
|
set_extent_bits(&root->fs_info->freed_extents[1],
|
||||||
start, end, EXTENT_UPTODATE, GFP_NOFS);
|
start, end, EXTENT_UPTODATE);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -246,9 +246,9 @@ static void free_excluded_extents(struct btrfs_root *root,
|
||||||
end = start + cache->key.offset - 1;
|
end = start + cache->key.offset - 1;
|
||||||
|
|
||||||
clear_extent_bits(&root->fs_info->freed_extents[0],
|
clear_extent_bits(&root->fs_info->freed_extents[0],
|
||||||
start, end, EXTENT_UPTODATE, GFP_NOFS);
|
start, end, EXTENT_UPTODATE);
|
||||||
clear_extent_bits(&root->fs_info->freed_extents[1],
|
clear_extent_bits(&root->fs_info->freed_extents[1],
|
||||||
start, end, EXTENT_UPTODATE, GFP_NOFS);
|
start, end, EXTENT_UPTODATE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int exclude_super_stripes(struct btrfs_root *root,
|
static int exclude_super_stripes(struct btrfs_root *root,
|
||||||
|
@ -980,7 +980,7 @@ out_free:
|
||||||
* event that tree block loses its owner tree's reference and do the
|
* event that tree block loses its owner tree's reference and do the
|
||||||
* back refs conversion.
|
* back refs conversion.
|
||||||
*
|
*
|
||||||
* When a tree block is COW'd through a tree, there are four cases:
|
* When a tree block is COWed through a tree, there are four cases:
|
||||||
*
|
*
|
||||||
* The reference count of the block is one and the tree is the block's
|
* The reference count of the block is one and the tree is the block's
|
||||||
* owner tree. Nothing to do in this case.
|
* owner tree. Nothing to do in this case.
|
||||||
|
@ -2595,7 +2595,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Need to drop our head ref lock and re-aqcuire the
|
* Need to drop our head ref lock and re-acquire the
|
||||||
* delayed ref lock and then re-check to make sure
|
* delayed ref lock and then re-check to make sure
|
||||||
* nobody got added.
|
* nobody got added.
|
||||||
*/
|
*/
|
||||||
|
@ -2747,7 +2747,7 @@ static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We don't ever fill up leaves all the way so multiply by 2 just to be
|
* We don't ever fill up leaves all the way so multiply by 2 just to be
|
||||||
* closer to what we're really going to want to ouse.
|
* closer to what we're really going to want to use.
|
||||||
*/
|
*/
|
||||||
return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
|
return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
|
||||||
}
|
}
|
||||||
|
@ -2851,7 +2851,7 @@ static void delayed_ref_async_start(struct btrfs_work *work)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* trans->sync means that when we call end_transaciton, we won't
|
* trans->sync means that when we call end_transaction, we won't
|
||||||
* wait on delayed refs
|
* wait on delayed refs
|
||||||
*/
|
*/
|
||||||
trans->sync = true;
|
trans->sync = true;
|
||||||
|
@ -4296,7 +4296,7 @@ void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
|
||||||
* Called if we need to clear a data reservation for this inode
|
* Called if we need to clear a data reservation for this inode
|
||||||
* Normally in a error case.
|
* Normally in a error case.
|
||||||
*
|
*
|
||||||
* This one will handle the per-indoe data rsv map for accurate reserved
|
* This one will handle the per-inode data rsv map for accurate reserved
|
||||||
* space framework.
|
* space framework.
|
||||||
*/
|
*/
|
||||||
void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
|
void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
|
||||||
|
@ -4967,7 +4967,7 @@ void btrfs_init_async_reclaim_work(struct work_struct *work)
|
||||||
* @orig_bytes - the number of bytes we want
|
* @orig_bytes - the number of bytes we want
|
||||||
* @flush - whether or not we can flush to make our reservation
|
* @flush - whether or not we can flush to make our reservation
|
||||||
*
|
*
|
||||||
* This will reserve orgi_bytes number of bytes from the space info associated
|
* This will reserve orig_bytes number of bytes from the space info associated
|
||||||
* with the block_rsv. If there is not enough space it will make an attempt to
|
* with the block_rsv. If there is not enough space it will make an attempt to
|
||||||
* flush out space to make room. It will do this by flushing delalloc if
|
* flush out space to make room. It will do this by flushing delalloc if
|
||||||
* possible or committing the transaction. If flush is 0 then no attempts to
|
* possible or committing the transaction. If flush is 0 then no attempts to
|
||||||
|
@ -5572,7 +5572,7 @@ void btrfs_orphan_release_metadata(struct inode *inode)
|
||||||
* common file/directory operations, they change two fs/file trees
|
* common file/directory operations, they change two fs/file trees
|
||||||
* and root tree, the number of items that the qgroup reserves is
|
* and root tree, the number of items that the qgroup reserves is
|
||||||
* different with the free space reservation. So we can not use
|
* different with the free space reservation. So we can not use
|
||||||
* the space reseravtion mechanism in start_transaction().
|
* the space reservation mechanism in start_transaction().
|
||||||
*/
|
*/
|
||||||
int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
|
int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
|
||||||
struct btrfs_block_rsv *rsv,
|
struct btrfs_block_rsv *rsv,
|
||||||
|
@ -5621,7 +5621,7 @@ void btrfs_subvolume_release_metadata(struct btrfs_root *root,
|
||||||
/**
|
/**
|
||||||
* drop_outstanding_extent - drop an outstanding extent
|
* drop_outstanding_extent - drop an outstanding extent
|
||||||
* @inode: the inode we're dropping the extent for
|
* @inode: the inode we're dropping the extent for
|
||||||
* @num_bytes: the number of bytes we're relaseing.
|
* @num_bytes: the number of bytes we're releasing.
|
||||||
*
|
*
|
||||||
* This is called when we are freeing up an outstanding extent, either called
|
* This is called when we are freeing up an outstanding extent, either called
|
||||||
* after an error or after an extent is written. This will return the number of
|
* after an error or after an extent is written. This will return the number of
|
||||||
|
@ -5647,7 +5647,7 @@ static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
|
||||||
drop_inode_space = 1;
|
drop_inode_space = 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we have more or the same amount of outsanding extents than we have
|
* If we have more or the same amount of outstanding extents than we have
|
||||||
* reserved then we need to leave the reserved extents count alone.
|
* reserved then we need to leave the reserved extents count alone.
|
||||||
*/
|
*/
|
||||||
if (BTRFS_I(inode)->outstanding_extents >=
|
if (BTRFS_I(inode)->outstanding_extents >=
|
||||||
|
@ -5661,8 +5661,8 @@ static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* calc_csum_metadata_size - return the amount of metada space that must be
|
* calc_csum_metadata_size - return the amount of metadata space that must be
|
||||||
* reserved/free'd for the given bytes.
|
* reserved/freed for the given bytes.
|
||||||
* @inode: the inode we're manipulating
|
* @inode: the inode we're manipulating
|
||||||
* @num_bytes: the number of bytes in question
|
* @num_bytes: the number of bytes in question
|
||||||
* @reserve: 1 if we are reserving space, 0 if we are freeing space
|
* @reserve: 1 if we are reserving space, 0 if we are freeing space
|
||||||
|
@ -5814,7 +5814,7 @@ out_fail:
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is tricky, but first we need to figure out how much we
|
* This is tricky, but first we need to figure out how much we
|
||||||
* free'd from any free-ers that occurred during this
|
* freed from any free-ers that occurred during this
|
||||||
* reservation, so we reset ->csum_bytes to the csum_bytes
|
* reservation, so we reset ->csum_bytes to the csum_bytes
|
||||||
* before we dropped our lock, and then call the free for the
|
* before we dropped our lock, and then call the free for the
|
||||||
* number of bytes that were freed while we were trying our
|
* number of bytes that were freed while we were trying our
|
||||||
|
@ -5836,7 +5836,7 @@ out_fail:
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now reset ->csum_bytes to what it should be. If bytes is
|
* Now reset ->csum_bytes to what it should be. If bytes is
|
||||||
* more than to_free then we would have free'd more space had we
|
* more than to_free then we would have freed more space had we
|
||||||
* not had an artificially high ->csum_bytes, so we need to free
|
* not had an artificially high ->csum_bytes, so we need to free
|
||||||
* the remainder. If bytes is the same or less then we don't
|
* the remainder. If bytes is the same or less then we don't
|
||||||
* need to do anything, the other free-ers did the correct
|
* need to do anything, the other free-ers did the correct
|
||||||
|
@ -6515,7 +6515,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
|
||||||
ret = btrfs_discard_extent(root, start,
|
ret = btrfs_discard_extent(root, start,
|
||||||
end + 1 - start, NULL);
|
end + 1 - start, NULL);
|
||||||
|
|
||||||
clear_extent_dirty(unpin, start, end, GFP_NOFS);
|
clear_extent_dirty(unpin, start, end);
|
||||||
unpin_extent_range(root, start, end, true);
|
unpin_extent_range(root, start, end, true);
|
||||||
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
|
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
@ -7578,7 +7578,7 @@ loop:
|
||||||
if (loop == LOOP_CACHING_NOWAIT) {
|
if (loop == LOOP_CACHING_NOWAIT) {
|
||||||
/*
|
/*
|
||||||
* We want to skip the LOOP_CACHING_WAIT step if we
|
* We want to skip the LOOP_CACHING_WAIT step if we
|
||||||
* don't have any unached bgs and we've alrelady done a
|
* don't have any uncached bgs and we've already done a
|
||||||
* full search through.
|
* full search through.
|
||||||
*/
|
*/
|
||||||
if (orig_have_caching_bg || !full_search)
|
if (orig_have_caching_bg || !full_search)
|
||||||
|
@ -7982,7 +7982,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mixed block groups will exclude before processing the log so we only
|
* Mixed block groups will exclude before processing the log so we only
|
||||||
* need to do the exlude dance if this fs isn't mixed.
|
* need to do the exclude dance if this fs isn't mixed.
|
||||||
*/
|
*/
|
||||||
if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
|
if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
|
||||||
ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
|
ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
|
||||||
|
@ -8032,7 +8032,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
||||||
buf->start + buf->len - 1, GFP_NOFS);
|
buf->start + buf->len - 1, GFP_NOFS);
|
||||||
else
|
else
|
||||||
set_extent_new(&root->dirty_log_pages, buf->start,
|
set_extent_new(&root->dirty_log_pages, buf->start,
|
||||||
buf->start + buf->len - 1, GFP_NOFS);
|
buf->start + buf->len - 1);
|
||||||
} else {
|
} else {
|
||||||
buf->log_index = -1;
|
buf->log_index = -1;
|
||||||
set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
|
set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
|
||||||
|
@ -9426,7 +9426,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
|
||||||
u64 free_bytes = 0;
|
u64 free_bytes = 0;
|
||||||
int factor;
|
int factor;
|
||||||
|
|
||||||
/* It's df, we don't care if it's racey */
|
/* It's df, we don't care if it's racy */
|
||||||
if (list_empty(&sinfo->ro_bgs))
|
if (list_empty(&sinfo->ro_bgs))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -10635,14 +10635,14 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
|
||||||
*/
|
*/
|
||||||
mutex_lock(&fs_info->unused_bg_unpin_mutex);
|
mutex_lock(&fs_info->unused_bg_unpin_mutex);
|
||||||
ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
|
ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
|
||||||
EXTENT_DIRTY, GFP_NOFS);
|
EXTENT_DIRTY);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
|
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
|
||||||
btrfs_dec_block_group_ro(root, block_group);
|
btrfs_dec_block_group_ro(root, block_group);
|
||||||
goto end_trans;
|
goto end_trans;
|
||||||
}
|
}
|
||||||
ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
|
ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
|
||||||
EXTENT_DIRTY, GFP_NOFS);
|
EXTENT_DIRTY);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
|
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
|
||||||
btrfs_dec_block_group_ro(root, block_group);
|
btrfs_dec_block_group_ro(root, block_group);
|
||||||
|
|
|
@ -726,14 +726,6 @@ next:
|
||||||
start = last_end + 1;
|
start = last_end + 1;
|
||||||
if (start <= end && state && !need_resched())
|
if (start <= end && state && !need_resched())
|
||||||
goto hit_next;
|
goto hit_next;
|
||||||
goto search_again;
|
|
||||||
|
|
||||||
out:
|
|
||||||
spin_unlock(&tree->lock);
|
|
||||||
if (prealloc)
|
|
||||||
free_extent_state(prealloc);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
search_again:
|
search_again:
|
||||||
if (start > end)
|
if (start > end)
|
||||||
|
@ -742,6 +734,14 @@ search_again:
|
||||||
if (gfpflags_allow_blocking(mask))
|
if (gfpflags_allow_blocking(mask))
|
||||||
cond_resched();
|
cond_resched();
|
||||||
goto again;
|
goto again;
|
||||||
|
|
||||||
|
out:
|
||||||
|
spin_unlock(&tree->lock);
|
||||||
|
if (prealloc)
|
||||||
|
free_extent_state(prealloc);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void wait_on_state(struct extent_io_tree *tree,
|
static void wait_on_state(struct extent_io_tree *tree,
|
||||||
|
@ -873,8 +873,14 @@ __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
bits |= EXTENT_FIRST_DELALLOC;
|
bits |= EXTENT_FIRST_DELALLOC;
|
||||||
again:
|
again:
|
||||||
if (!prealloc && gfpflags_allow_blocking(mask)) {
|
if (!prealloc && gfpflags_allow_blocking(mask)) {
|
||||||
|
/*
|
||||||
|
* Don't care for allocation failure here because we might end
|
||||||
|
* up not needing the pre-allocated extent state at all, which
|
||||||
|
* is the case if we only have in the tree extent states that
|
||||||
|
* cover our input range and don't cover too any other range.
|
||||||
|
* If we end up needing a new extent state we allocate it later.
|
||||||
|
*/
|
||||||
prealloc = alloc_extent_state(mask);
|
prealloc = alloc_extent_state(mask);
|
||||||
BUG_ON(!prealloc);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&tree->lock);
|
spin_lock(&tree->lock);
|
||||||
|
@ -1037,7 +1043,13 @@ hit_next:
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
goto search_again;
|
search_again:
|
||||||
|
if (start > end)
|
||||||
|
goto out;
|
||||||
|
spin_unlock(&tree->lock);
|
||||||
|
if (gfpflags_allow_blocking(mask))
|
||||||
|
cond_resched();
|
||||||
|
goto again;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock(&tree->lock);
|
spin_unlock(&tree->lock);
|
||||||
|
@ -1046,13 +1058,6 @@ out:
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
search_again:
|
|
||||||
if (start > end)
|
|
||||||
goto out;
|
|
||||||
spin_unlock(&tree->lock);
|
|
||||||
if (gfpflags_allow_blocking(mask))
|
|
||||||
cond_resched();
|
|
||||||
goto again;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
|
@ -1073,17 +1078,18 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
* @bits: the bits to set in this range
|
* @bits: the bits to set in this range
|
||||||
* @clear_bits: the bits to clear in this range
|
* @clear_bits: the bits to clear in this range
|
||||||
* @cached_state: state that we're going to cache
|
* @cached_state: state that we're going to cache
|
||||||
* @mask: the allocation mask
|
|
||||||
*
|
*
|
||||||
* This will go through and set bits for the given range. If any states exist
|
* This will go through and set bits for the given range. If any states exist
|
||||||
* already in this range they are set with the given bit and cleared of the
|
* already in this range they are set with the given bit and cleared of the
|
||||||
* clear_bits. This is only meant to be used by things that are mergeable, ie
|
* clear_bits. This is only meant to be used by things that are mergeable, ie
|
||||||
* converting from say DELALLOC to DIRTY. This is not meant to be used with
|
* converting from say DELALLOC to DIRTY. This is not meant to be used with
|
||||||
* boundary bits like LOCK.
|
* boundary bits like LOCK.
|
||||||
|
*
|
||||||
|
* All allocations are done with GFP_NOFS.
|
||||||
*/
|
*/
|
||||||
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, unsigned clear_bits,
|
unsigned bits, unsigned clear_bits,
|
||||||
struct extent_state **cached_state, gfp_t mask)
|
struct extent_state **cached_state)
|
||||||
{
|
{
|
||||||
struct extent_state *state;
|
struct extent_state *state;
|
||||||
struct extent_state *prealloc = NULL;
|
struct extent_state *prealloc = NULL;
|
||||||
|
@ -1098,7 +1104,7 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
btrfs_debug_check_extent_io_range(tree, start, end);
|
btrfs_debug_check_extent_io_range(tree, start, end);
|
||||||
|
|
||||||
again:
|
again:
|
||||||
if (!prealloc && gfpflags_allow_blocking(mask)) {
|
if (!prealloc) {
|
||||||
/*
|
/*
|
||||||
* Best effort, don't worry if extent state allocation fails
|
* Best effort, don't worry if extent state allocation fails
|
||||||
* here for the first iteration. We might have a cached state
|
* here for the first iteration. We might have a cached state
|
||||||
|
@ -1106,7 +1112,7 @@ again:
|
||||||
* extent state allocations are needed. We'll only know this
|
* extent state allocations are needed. We'll only know this
|
||||||
* after locking the tree.
|
* after locking the tree.
|
||||||
*/
|
*/
|
||||||
prealloc = alloc_extent_state(mask);
|
prealloc = alloc_extent_state(GFP_NOFS);
|
||||||
if (!prealloc && !first_iteration)
|
if (!prealloc && !first_iteration)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
@ -1263,7 +1269,13 @@ hit_next:
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
goto search_again;
|
search_again:
|
||||||
|
if (start > end)
|
||||||
|
goto out;
|
||||||
|
spin_unlock(&tree->lock);
|
||||||
|
cond_resched();
|
||||||
|
first_iteration = false;
|
||||||
|
goto again;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock(&tree->lock);
|
spin_unlock(&tree->lock);
|
||||||
|
@ -1271,21 +1283,11 @@ out:
|
||||||
free_extent_state(prealloc);
|
free_extent_state(prealloc);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
search_again:
|
|
||||||
if (start > end)
|
|
||||||
goto out;
|
|
||||||
spin_unlock(&tree->lock);
|
|
||||||
if (gfpflags_allow_blocking(mask))
|
|
||||||
cond_resched();
|
|
||||||
first_iteration = false;
|
|
||||||
goto again;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* wrappers around set/clear extent bit */
|
/* wrappers around set/clear extent bit */
|
||||||
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, gfp_t mask,
|
unsigned bits, struct extent_changeset *changeset)
|
||||||
struct extent_changeset *changeset)
|
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* We don't support EXTENT_LOCKED yet, as current changeset will
|
* We don't support EXTENT_LOCKED yet, as current changeset will
|
||||||
|
@ -1295,7 +1297,7 @@ int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
*/
|
*/
|
||||||
BUG_ON(bits & EXTENT_LOCKED);
|
BUG_ON(bits & EXTENT_LOCKED);
|
||||||
|
|
||||||
return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, mask,
|
return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
|
||||||
changeset);
|
changeset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1308,8 +1310,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
}
|
}
|
||||||
|
|
||||||
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, gfp_t mask,
|
unsigned bits, struct extent_changeset *changeset)
|
||||||
struct extent_changeset *changeset)
|
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Don't support EXTENT_LOCKED case, same reason as
|
* Don't support EXTENT_LOCKED case, same reason as
|
||||||
|
@ -1317,7 +1318,7 @@ int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
*/
|
*/
|
||||||
BUG_ON(bits & EXTENT_LOCKED);
|
BUG_ON(bits & EXTENT_LOCKED);
|
||||||
|
|
||||||
return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask,
|
return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
|
||||||
changeset);
|
changeset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1975,13 +1976,13 @@ int free_io_failure(struct inode *inode, struct io_failure_record *rec)
|
||||||
set_state_failrec(failure_tree, rec->start, NULL);
|
set_state_failrec(failure_tree, rec->start, NULL);
|
||||||
ret = clear_extent_bits(failure_tree, rec->start,
|
ret = clear_extent_bits(failure_tree, rec->start,
|
||||||
rec->start + rec->len - 1,
|
rec->start + rec->len - 1,
|
||||||
EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
|
EXTENT_LOCKED | EXTENT_DIRTY);
|
||||||
if (ret)
|
if (ret)
|
||||||
err = ret;
|
err = ret;
|
||||||
|
|
||||||
ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
|
ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
|
||||||
rec->start + rec->len - 1,
|
rec->start + rec->len - 1,
|
||||||
EXTENT_DAMAGED, GFP_NOFS);
|
EXTENT_DAMAGED);
|
||||||
if (ret && !err)
|
if (ret && !err)
|
||||||
err = ret;
|
err = ret;
|
||||||
|
|
||||||
|
@ -2232,13 +2233,12 @@ int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
|
||||||
|
|
||||||
/* set the bits in the private failure tree */
|
/* set the bits in the private failure tree */
|
||||||
ret = set_extent_bits(failure_tree, start, end,
|
ret = set_extent_bits(failure_tree, start, end,
|
||||||
EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
|
EXTENT_LOCKED | EXTENT_DIRTY);
|
||||||
if (ret >= 0)
|
if (ret >= 0)
|
||||||
ret = set_state_failrec(failure_tree, start, failrec);
|
ret = set_state_failrec(failure_tree, start, failrec);
|
||||||
/* set the bits in the inode's tree */
|
/* set the bits in the inode's tree */
|
||||||
if (ret >= 0)
|
if (ret >= 0)
|
||||||
ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
|
ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED);
|
||||||
GFP_NOFS);
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
kfree(failrec);
|
kfree(failrec);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -4605,7 +4605,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
|
||||||
if (mapped)
|
if (mapped)
|
||||||
spin_unlock(&page->mapping->private_lock);
|
spin_unlock(&page->mapping->private_lock);
|
||||||
|
|
||||||
/* One for when we alloced the page */
|
/* One for when we allocated the page */
|
||||||
put_page(page);
|
put_page(page);
|
||||||
} while (index != 0);
|
} while (index != 0);
|
||||||
}
|
}
|
||||||
|
@ -5765,7 +5765,7 @@ int try_release_extent_buffer(struct page *page)
|
||||||
struct extent_buffer *eb;
|
struct extent_buffer *eb;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to make sure noboody is attaching this page to an eb right
|
* We need to make sure nobody is attaching this page to an eb right
|
||||||
* now.
|
* now.
|
||||||
*/
|
*/
|
||||||
spin_lock(&page->mapping->private_lock);
|
spin_lock(&page->mapping->private_lock);
|
||||||
|
|
|
@ -220,8 +220,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, int filled,
|
unsigned bits, int filled,
|
||||||
struct extent_state *cached_state);
|
struct extent_state *cached_state);
|
||||||
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, gfp_t mask,
|
unsigned bits, struct extent_changeset *changeset);
|
||||||
struct extent_changeset *changeset);
|
|
||||||
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, int wake, int delete,
|
unsigned bits, int wake, int delete,
|
||||||
struct extent_state **cached, gfp_t mask);
|
struct extent_state **cached, gfp_t mask);
|
||||||
|
@ -240,27 +239,27 @@ static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
|
static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
|
||||||
u64 end, unsigned bits, gfp_t mask)
|
u64 end, unsigned bits)
|
||||||
{
|
{
|
||||||
int wake = 0;
|
int wake = 0;
|
||||||
|
|
||||||
if (bits & EXTENT_LOCKED)
|
if (bits & EXTENT_LOCKED)
|
||||||
wake = 1;
|
wake = 1;
|
||||||
|
|
||||||
return clear_extent_bit(tree, start, end, bits, wake, 0, NULL, mask);
|
return clear_extent_bit(tree, start, end, bits, wake, 0, NULL,
|
||||||
|
GFP_NOFS);
|
||||||
}
|
}
|
||||||
|
|
||||||
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, gfp_t mask,
|
unsigned bits, struct extent_changeset *changeset);
|
||||||
struct extent_changeset *changeset);
|
|
||||||
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, u64 *failed_start,
|
unsigned bits, u64 *failed_start,
|
||||||
struct extent_state **cached_state, gfp_t mask);
|
struct extent_state **cached_state, gfp_t mask);
|
||||||
|
|
||||||
static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
|
static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
|
||||||
u64 end, unsigned bits, gfp_t mask)
|
u64 end, unsigned bits)
|
||||||
{
|
{
|
||||||
return set_extent_bit(tree, start, end, bits, NULL, NULL, mask);
|
return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
|
static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
|
||||||
|
@ -278,37 +277,38 @@ static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
|
static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
|
||||||
u64 end, gfp_t mask)
|
u64 end)
|
||||||
{
|
{
|
||||||
return clear_extent_bit(tree, start, end,
|
return clear_extent_bit(tree, start, end,
|
||||||
EXTENT_DIRTY | EXTENT_DELALLOC |
|
EXTENT_DIRTY | EXTENT_DELALLOC |
|
||||||
EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
|
EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
|
||||||
}
|
}
|
||||||
|
|
||||||
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||||
unsigned bits, unsigned clear_bits,
|
unsigned bits, unsigned clear_bits,
|
||||||
struct extent_state **cached_state, gfp_t mask);
|
struct extent_state **cached_state);
|
||||||
|
|
||||||
static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
|
static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
|
||||||
u64 end, struct extent_state **cached_state, gfp_t mask)
|
u64 end, struct extent_state **cached_state)
|
||||||
{
|
{
|
||||||
return set_extent_bit(tree, start, end,
|
return set_extent_bit(tree, start, end,
|
||||||
EXTENT_DELALLOC | EXTENT_UPTODATE,
|
EXTENT_DELALLOC | EXTENT_UPTODATE,
|
||||||
NULL, cached_state, mask);
|
NULL, cached_state, GFP_NOFS);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
|
static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
|
||||||
u64 end, struct extent_state **cached_state, gfp_t mask)
|
u64 end, struct extent_state **cached_state)
|
||||||
{
|
{
|
||||||
return set_extent_bit(tree, start, end,
|
return set_extent_bit(tree, start, end,
|
||||||
EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
|
EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
|
||||||
NULL, cached_state, mask);
|
NULL, cached_state, GFP_NOFS);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
|
static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
|
||||||
u64 end, gfp_t mask)
|
u64 end)
|
||||||
{
|
{
|
||||||
return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL, mask);
|
return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
|
||||||
|
GFP_NOFS);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
|
static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
|
||||||
|
|
|
@ -62,7 +62,7 @@ struct extent_map *alloc_extent_map(void)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* free_extent_map - drop reference count of an extent_map
|
* free_extent_map - drop reference count of an extent_map
|
||||||
* @em: extent map being releasead
|
* @em: extent map being released
|
||||||
*
|
*
|
||||||
* Drops the reference out on @em by one and free the structure
|
* Drops the reference out on @em by one and free the structure
|
||||||
* if the reference count hits zero.
|
* if the reference count hits zero.
|
||||||
|
|
|
@ -248,7 +248,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
|
||||||
BTRFS_DATA_RELOC_TREE_OBJECTID) {
|
BTRFS_DATA_RELOC_TREE_OBJECTID) {
|
||||||
set_extent_bits(io_tree, offset,
|
set_extent_bits(io_tree, offset,
|
||||||
offset + root->sectorsize - 1,
|
offset + root->sectorsize - 1,
|
||||||
EXTENT_NODATASUM, GFP_NOFS);
|
EXTENT_NODATASUM);
|
||||||
} else {
|
} else {
|
||||||
btrfs_info(BTRFS_I(inode)->root->fs_info,
|
btrfs_info(BTRFS_I(inode)->root->fs_info,
|
||||||
"no csum found for inode %llu start %llu",
|
"no csum found for inode %llu start %llu",
|
||||||
|
|
|
@ -2026,7 +2026,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
||||||
BTRFS_I(inode)->last_trans
|
BTRFS_I(inode)->last_trans
|
||||||
<= root->fs_info->last_trans_committed)) {
|
<= root->fs_info->last_trans_committed)) {
|
||||||
/*
|
/*
|
||||||
* We'v had everything committed since the last time we were
|
* We've had everything committed since the last time we were
|
||||||
* modified so clear this flag in case it was set for whatever
|
* modified so clear this flag in case it was set for whatever
|
||||||
* reason, it's no longer relevant.
|
* reason, it's no longer relevant.
|
||||||
*/
|
*/
|
||||||
|
@ -2374,7 +2374,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
||||||
|
|
||||||
/* Check the aligned pages after the first unaligned page,
|
/* Check the aligned pages after the first unaligned page,
|
||||||
* if offset != orig_start, which means the first unaligned page
|
* if offset != orig_start, which means the first unaligned page
|
||||||
* including serveral following pages are already in holes,
|
* including several following pages are already in holes,
|
||||||
* the extra check can be skipped */
|
* the extra check can be skipped */
|
||||||
if (offset == orig_start) {
|
if (offset == orig_start) {
|
||||||
/* after truncate page, check hole again */
|
/* after truncate page, check hole again */
|
||||||
|
|
|
@ -1983,7 +1983,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
|
||||||
/*
|
/*
|
||||||
* If this block group has some small extents we don't want to
|
* If this block group has some small extents we don't want to
|
||||||
* use up all of our free slots in the cache with them, we want
|
* use up all of our free slots in the cache with them, we want
|
||||||
* to reserve them to larger extents, however if we have plent
|
* to reserve them to larger extents, however if we have plenty
|
||||||
* of cache left then go ahead an dadd them, no sense in adding
|
* of cache left then go ahead an dadd them, no sense in adding
|
||||||
* the overhead of a bitmap if we don't have to.
|
* the overhead of a bitmap if we don't have to.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -123,7 +123,7 @@ int btrfs_return_cluster_to_free_space(
|
||||||
int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
|
int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
|
||||||
u64 *trimmed, u64 start, u64 end, u64 minlen);
|
u64 *trimmed, u64 start, u64 end, u64 minlen);
|
||||||
|
|
||||||
/* Support functions for runnint our sanity tests */
|
/* Support functions for running our sanity tests */
|
||||||
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
|
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
|
||||||
int test_add_free_space_entry(struct btrfs_block_group_cache *cache,
|
int test_add_free_space_entry(struct btrfs_block_group_cache *cache,
|
||||||
u64 offset, u64 bytes, bool bitmap);
|
u64 offset, u64 bytes, bool bitmap);
|
||||||
|
|
|
@ -455,7 +455,7 @@ again:
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* skip compression for a small file range(<=blocksize) that
|
* skip compression for a small file range(<=blocksize) that
|
||||||
* isn't an inline extent, since it dosen't save disk space at all.
|
* isn't an inline extent, since it doesn't save disk space at all.
|
||||||
*/
|
*/
|
||||||
if (total_compressed <= blocksize &&
|
if (total_compressed <= blocksize &&
|
||||||
(start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
|
(start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
|
||||||
|
@ -1978,7 +1978,7 @@ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
|
||||||
{
|
{
|
||||||
WARN_ON((end & (PAGE_SIZE - 1)) == 0);
|
WARN_ON((end & (PAGE_SIZE - 1)) == 0);
|
||||||
return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
|
return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
|
||||||
cached_state, GFP_NOFS);
|
cached_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* see btrfs_writepage_start_hook for details on why this is required */
|
/* see btrfs_writepage_start_hook for details on why this is required */
|
||||||
|
@ -3119,8 +3119,7 @@ static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
|
||||||
|
|
||||||
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
|
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
|
||||||
test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
|
test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
|
||||||
clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
|
clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM);
|
||||||
GFP_NOFS);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3722,7 +3721,7 @@ cache_index:
|
||||||
* and doesn't have an inode ref with the name "bar" anymore.
|
* and doesn't have an inode ref with the name "bar" anymore.
|
||||||
*
|
*
|
||||||
* Setting last_unlink_trans to last_trans is a pessimistic approach,
|
* Setting last_unlink_trans to last_trans is a pessimistic approach,
|
||||||
* but it guarantees correctness at the expense of ocassional full
|
* but it guarantees correctness at the expense of occasional full
|
||||||
* transaction commits on fsync if our inode is a directory, or if our
|
* transaction commits on fsync if our inode is a directory, or if our
|
||||||
* inode is not a directory, logging its parent unnecessarily.
|
* inode is not a directory, logging its parent unnecessarily.
|
||||||
*/
|
*/
|
||||||
|
@ -4978,7 +4977,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
|
||||||
* be instantly completed which will give us extents that need
|
* be instantly completed which will give us extents that need
|
||||||
* to be truncated. If we fail to get an orphan inode down we
|
* to be truncated. If we fail to get an orphan inode down we
|
||||||
* could have left over extents that were never meant to live,
|
* could have left over extents that were never meant to live,
|
||||||
* so we need to garuntee from this point on that everything
|
* so we need to guarantee from this point on that everything
|
||||||
* will be consistent.
|
* will be consistent.
|
||||||
*/
|
*/
|
||||||
ret = btrfs_orphan_add(trans, inode);
|
ret = btrfs_orphan_add(trans, inode);
|
||||||
|
@ -5248,7 +5247,7 @@ void btrfs_evict_inode(struct inode *inode)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We can't just steal from the global reserve, we need tomake
|
* We can't just steal from the global reserve, we need to make
|
||||||
* sure there is room to do it, if not we need to commit and try
|
* sure there is room to do it, if not we need to commit and try
|
||||||
* again.
|
* again.
|
||||||
*/
|
*/
|
||||||
|
@ -7433,7 +7432,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
|
||||||
cached_state);
|
cached_state);
|
||||||
/*
|
/*
|
||||||
* We're concerned with the entire range that we're going to be
|
* We're concerned with the entire range that we're going to be
|
||||||
* doing DIO to, so we need to make sure theres no ordered
|
* doing DIO to, so we need to make sure there's no ordered
|
||||||
* extents in this range.
|
* extents in this range.
|
||||||
*/
|
*/
|
||||||
ordered = btrfs_lookup_ordered_range(inode, lockstart,
|
ordered = btrfs_lookup_ordered_range(inode, lockstart,
|
||||||
|
@ -7595,7 +7594,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
|
||||||
if (current->journal_info) {
|
if (current->journal_info) {
|
||||||
/*
|
/*
|
||||||
* Need to pull our outstanding extents and set journal_info to NULL so
|
* Need to pull our outstanding extents and set journal_info to NULL so
|
||||||
* that anything that needs to check if there's a transction doesn't get
|
* that anything that needs to check if there's a transaction doesn't get
|
||||||
* confused.
|
* confused.
|
||||||
*/
|
*/
|
||||||
dio_data = current->journal_info;
|
dio_data = current->journal_info;
|
||||||
|
@ -7628,7 +7627,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
|
||||||
* decompress it, so there will be buffering required no matter what we
|
* decompress it, so there will be buffering required no matter what we
|
||||||
* do, so go ahead and fallback to buffered.
|
* do, so go ahead and fallback to buffered.
|
||||||
*
|
*
|
||||||
* We return -ENOTBLK because thats what makes DIO go ahead and go back
|
* We return -ENOTBLK because that's what makes DIO go ahead and go back
|
||||||
* to buffered IO. Don't blame me, this is the price we pay for using
|
* to buffered IO. Don't blame me, this is the price we pay for using
|
||||||
* the generic code.
|
* the generic code.
|
||||||
*/
|
*/
|
||||||
|
@ -9041,7 +9040,7 @@ static int btrfs_truncate(struct inode *inode)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Yes ladies and gentelment, this is indeed ugly. The fact is we have
|
* Yes ladies and gentlemen, this is indeed ugly. The fact is we have
|
||||||
* 3 things going on here
|
* 3 things going on here
|
||||||
*
|
*
|
||||||
* 1) We need to reserve space for our orphan item and the space to
|
* 1) We need to reserve space for our orphan item and the space to
|
||||||
|
@ -9055,15 +9054,15 @@ static int btrfs_truncate(struct inode *inode)
|
||||||
* space reserved in case it uses space during the truncate (thank you
|
* space reserved in case it uses space during the truncate (thank you
|
||||||
* very much snapshotting).
|
* very much snapshotting).
|
||||||
*
|
*
|
||||||
* And we need these to all be seperate. The fact is we can use alot of
|
* And we need these to all be separate. The fact is we can use a lot of
|
||||||
* space doing the truncate, and we have no earthly idea how much space
|
* space doing the truncate, and we have no earthly idea how much space
|
||||||
* we will use, so we need the truncate reservation to be seperate so it
|
* we will use, so we need the truncate reservation to be separate so it
|
||||||
* doesn't end up using space reserved for updating the inode or
|
* doesn't end up using space reserved for updating the inode or
|
||||||
* removing the orphan item. We also need to be able to stop the
|
* removing the orphan item. We also need to be able to stop the
|
||||||
* transaction and start a new one, which means we need to be able to
|
* transaction and start a new one, which means we need to be able to
|
||||||
* update the inode several times, and we have no idea of knowing how
|
* update the inode several times, and we have no idea of knowing how
|
||||||
* many times that will be, so we can't just reserve 1 item for the
|
* many times that will be, so we can't just reserve 1 item for the
|
||||||
* entirety of the opration, so that has to be done seperately as well.
|
* entirety of the operation, so that has to be done separately as well.
|
||||||
* Then there is the orphan item, which does indeed need to be held on
|
* Then there is the orphan item, which does indeed need to be held on
|
||||||
* to for the whole operation, and we need nobody to touch this reserved
|
* to for the whole operation, and we need nobody to touch this reserved
|
||||||
* space except the orphan code.
|
* space except the orphan code.
|
||||||
|
|
|
@ -296,7 +296,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* Revert back under same assuptions as above
|
* Revert back under same assumptions as above
|
||||||
*/
|
*/
|
||||||
if (S_ISREG(mode)) {
|
if (S_ISREG(mode)) {
|
||||||
if (inode->i_size == 0)
|
if (inode->i_size == 0)
|
||||||
|
@ -465,7 +465,7 @@ static noinline int create_subvol(struct inode *dir,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Don't create subvolume whose level is not zero. Or qgroup will be
|
* Don't create subvolume whose level is not zero. Or qgroup will be
|
||||||
* screwed up since it assume subvolme qgroup's level to be 0.
|
* screwed up since it assumes subvolume qgroup's level to be 0.
|
||||||
*/
|
*/
|
||||||
if (btrfs_qgroup_level(objectid)) {
|
if (btrfs_qgroup_level(objectid)) {
|
||||||
ret = -ENOSPC;
|
ret = -ENOSPC;
|
||||||
|
@ -780,7 +780,7 @@ free_pending:
|
||||||
* a. be owner of dir, or
|
* a. be owner of dir, or
|
||||||
* b. be owner of victim, or
|
* b. be owner of victim, or
|
||||||
* c. have CAP_FOWNER capability
|
* c. have CAP_FOWNER capability
|
||||||
* 6. If the victim is append-only or immutable we can't do antyhing with
|
* 6. If the victim is append-only or immutable we can't do anything with
|
||||||
* links pointing to it.
|
* links pointing to it.
|
||||||
* 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
|
* 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
|
||||||
* 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
|
* 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
|
||||||
|
@ -1237,7 +1237,7 @@ again:
|
||||||
|
|
||||||
|
|
||||||
set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1,
|
set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1,
|
||||||
&cached_state, GFP_NOFS);
|
&cached_state);
|
||||||
|
|
||||||
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
|
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
|
||||||
page_start, page_end - 1, &cached_state,
|
page_start, page_end - 1, &cached_state,
|
||||||
|
@ -4650,7 +4650,7 @@ again:
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* mut. excl. ops lock is locked. Three possibilites:
|
* mut. excl. ops lock is locked. Three possibilities:
|
||||||
* (1) some other op is running
|
* (1) some other op is running
|
||||||
* (2) balance is running
|
* (2) balance is running
|
||||||
* (3) balance is paused -- special case (think resume)
|
* (3) balance is paused -- special case (think resume)
|
||||||
|
@ -5567,7 +5567,7 @@ long btrfs_ioctl(struct file *file, unsigned int
|
||||||
ret = btrfs_sync_fs(file_inode(file)->i_sb, 1);
|
ret = btrfs_sync_fs(file_inode(file)->i_sb, 1);
|
||||||
/*
|
/*
|
||||||
* The transaction thread may want to do more work,
|
* The transaction thread may want to do more work,
|
||||||
* namely it pokes the cleaner ktread that will start
|
* namely it pokes the cleaner kthread that will start
|
||||||
* processing uncleaned subvols.
|
* processing uncleaned subvols.
|
||||||
*/
|
*/
|
||||||
wake_up_process(root->fs_info->transaction_kthread);
|
wake_up_process(root->fs_info->transaction_kthread);
|
||||||
|
|
|
@ -58,7 +58,7 @@ struct btrfs_ordered_sum {
|
||||||
|
|
||||||
#define BTRFS_ORDERED_COMPRESSED 3 /* writing a zlib compressed extent */
|
#define BTRFS_ORDERED_COMPRESSED 3 /* writing a zlib compressed extent */
|
||||||
|
|
||||||
#define BTRFS_ORDERED_PREALLOC 4 /* set when writing to prealloced extent */
|
#define BTRFS_ORDERED_PREALLOC 4 /* set when writing to preallocated extent */
|
||||||
|
|
||||||
#define BTRFS_ORDERED_DIRECT 5 /* set when we're doing DIO with this extent */
|
#define BTRFS_ORDERED_DIRECT 5 /* set when we're doing DIO with this extent */
|
||||||
|
|
||||||
|
|
|
@ -85,7 +85,7 @@ struct btrfs_qgroup {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* temp variables for accounting operations
|
* temp variables for accounting operations
|
||||||
* Refer to qgroup_shared_accouting() for details.
|
* Refer to qgroup_shared_accounting() for details.
|
||||||
*/
|
*/
|
||||||
u64 old_refcnt;
|
u64 old_refcnt;
|
||||||
u64 new_refcnt;
|
u64 new_refcnt;
|
||||||
|
@ -499,7 +499,7 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* we call btrfs_free_qgroup_config() when umounting
|
* we call btrfs_free_qgroup_config() when umounting
|
||||||
* filesystem and disabling quota, so we set qgroup_ulit
|
* filesystem and disabling quota, so we set qgroup_ulist
|
||||||
* to be null here to avoid double free.
|
* to be null here to avoid double free.
|
||||||
*/
|
*/
|
||||||
ulist_free(fs_info->qgroup_ulist);
|
ulist_free(fs_info->qgroup_ulist);
|
||||||
|
@ -1036,7 +1036,7 @@ static void qgroup_dirty(struct btrfs_fs_info *fs_info,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The easy accounting, if we are adding/removing the only ref for an extent
|
* The easy accounting, if we are adding/removing the only ref for an extent
|
||||||
* then this qgroup and all of the parent qgroups get their refrence and
|
* then this qgroup and all of the parent qgroups get their reference and
|
||||||
* exclusive counts adjusted.
|
* exclusive counts adjusted.
|
||||||
*
|
*
|
||||||
* Caller should hold fs_info->qgroup_lock.
|
* Caller should hold fs_info->qgroup_lock.
|
||||||
|
@ -1436,7 +1436,7 @@ int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* No need to do lock, since this function will only be called in
|
* No need to do lock, since this function will only be called in
|
||||||
* btrfs_commmit_transaction().
|
* btrfs_commit_transaction().
|
||||||
*/
|
*/
|
||||||
node = rb_first(&delayed_refs->dirty_extent_root);
|
node = rb_first(&delayed_refs->dirty_extent_root);
|
||||||
while (node) {
|
while (node) {
|
||||||
|
@ -1557,7 +1557,7 @@ static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
|
||||||
* A: cur_old_roots < nr_old_roots (not exclusive before)
|
* A: cur_old_roots < nr_old_roots (not exclusive before)
|
||||||
* !A: cur_old_roots == nr_old_roots (possible exclusive before)
|
* !A: cur_old_roots == nr_old_roots (possible exclusive before)
|
||||||
* B: cur_new_roots < nr_new_roots (not exclusive now)
|
* B: cur_new_roots < nr_new_roots (not exclusive now)
|
||||||
* !B: cur_new_roots == nr_new_roots (possible exclsuive now)
|
* !B: cur_new_roots == nr_new_roots (possible exclusive now)
|
||||||
*
|
*
|
||||||
* Results:
|
* Results:
|
||||||
* +: Possible sharing -> exclusive -: Possible exclusive -> sharing
|
* +: Possible sharing -> exclusive -: Possible exclusive -> sharing
|
||||||
|
@ -1851,7 +1851,7 @@ out:
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copy the acounting information between qgroups. This is necessary
|
* Copy the accounting information between qgroups. This is necessary
|
||||||
* when a snapshot or a subvolume is created. Throwing an error will
|
* when a snapshot or a subvolume is created. Throwing an error will
|
||||||
* cause a transaction abort so we take extra care here to only error
|
* cause a transaction abort so we take extra care here to only error
|
||||||
* when a readonly fs is a reasonable outcome.
|
* when a readonly fs is a reasonable outcome.
|
||||||
|
@ -2340,7 +2340,7 @@ out:
|
||||||
mutex_unlock(&fs_info->qgroup_rescan_lock);
|
mutex_unlock(&fs_info->qgroup_rescan_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* only update status, since the previous part has alreay updated the
|
* only update status, since the previous part has already updated the
|
||||||
* qgroup info.
|
* qgroup info.
|
||||||
*/
|
*/
|
||||||
trans = btrfs_start_transaction(fs_info->quota_root, 1);
|
trans = btrfs_start_transaction(fs_info->quota_root, 1);
|
||||||
|
@ -2542,8 +2542,7 @@ int btrfs_qgroup_reserve_data(struct inode *inode, u64 start, u64 len)
|
||||||
changeset.bytes_changed = 0;
|
changeset.bytes_changed = 0;
|
||||||
changeset.range_changed = ulist_alloc(GFP_NOFS);
|
changeset.range_changed = ulist_alloc(GFP_NOFS);
|
||||||
ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
|
ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
|
||||||
start + len -1, EXTENT_QGROUP_RESERVED, GFP_NOFS,
|
start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
|
||||||
&changeset);
|
|
||||||
trace_btrfs_qgroup_reserve_data(inode, start, len,
|
trace_btrfs_qgroup_reserve_data(inode, start, len,
|
||||||
changeset.bytes_changed,
|
changeset.bytes_changed,
|
||||||
QGROUP_RESERVE);
|
QGROUP_RESERVE);
|
||||||
|
@ -2580,8 +2579,7 @@ static int __btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
|
ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
|
||||||
start + len -1, EXTENT_QGROUP_RESERVED, GFP_NOFS,
|
start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
|
||||||
&changeset);
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -2672,7 +2670,7 @@ void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check qgroup reserved space leaking, normally at destory inode
|
* Check qgroup reserved space leaking, normally at destroy inode
|
||||||
* time
|
* time
|
||||||
*/
|
*/
|
||||||
void btrfs_qgroup_check_reserved_leak(struct inode *inode)
|
void btrfs_qgroup_check_reserved_leak(struct inode *inode)
|
||||||
|
@ -2688,7 +2686,7 @@ void btrfs_qgroup_check_reserved_leak(struct inode *inode)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
|
ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
|
||||||
EXTENT_QGROUP_RESERVED, GFP_NOFS, &changeset);
|
EXTENT_QGROUP_RESERVED, &changeset);
|
||||||
|
|
||||||
WARN_ON(ret < 0);
|
WARN_ON(ret < 0);
|
||||||
if (WARN_ON(changeset.bytes_changed)) {
|
if (WARN_ON(changeset.bytes_changed)) {
|
||||||
|
|
|
@ -576,7 +576,7 @@ static int rbio_can_merge(struct btrfs_raid_bio *last,
|
||||||
* we can't merge with cached rbios, since the
|
* we can't merge with cached rbios, since the
|
||||||
* idea is that when we merge the destination
|
* idea is that when we merge the destination
|
||||||
* rbio is going to run our IO for us. We can
|
* rbio is going to run our IO for us. We can
|
||||||
* steal from cached rbio's though, other functions
|
* steal from cached rbios though, other functions
|
||||||
* handle that.
|
* handle that.
|
||||||
*/
|
*/
|
||||||
if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
|
if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
|
||||||
|
@ -2368,7 +2368,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
|
||||||
run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
|
run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check scrubbing pairty and repair it */
|
/* Check scrubbing parity and repair it */
|
||||||
p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
|
p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
|
||||||
parity = kmap(p);
|
parity = kmap(p);
|
||||||
if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
|
if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
|
||||||
|
@ -2493,7 +2493,7 @@ static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
|
||||||
/*
|
/*
|
||||||
* Here means we got one corrupted data stripe and one
|
* Here means we got one corrupted data stripe and one
|
||||||
* corrupted parity on RAID6, if the corrupted parity
|
* corrupted parity on RAID6, if the corrupted parity
|
||||||
* is scrubbing parity, luckly, use the other one to repair
|
* is scrubbing parity, luckily, use the other one to repair
|
||||||
* the data, or we can not repair the data stripe.
|
* the data, or we can not repair the data stripe.
|
||||||
*/
|
*/
|
||||||
if (failp != rbio->scrubp)
|
if (failp != rbio->scrubp)
|
||||||
|
|
|
@ -668,8 +668,8 @@ int find_inline_backref(struct extent_buffer *leaf, int slot,
|
||||||
* roots of b-trees that reference the tree block.
|
* roots of b-trees that reference the tree block.
|
||||||
*
|
*
|
||||||
* the basic idea of this function is check backrefs of a given block
|
* the basic idea of this function is check backrefs of a given block
|
||||||
* to find upper level blocks that refernece the block, and then check
|
* to find upper level blocks that reference the block, and then check
|
||||||
* bakcrefs of these upper level blocks recursively. the recursion stop
|
* backrefs of these upper level blocks recursively. the recursion stop
|
||||||
* when tree root is reached or backrefs for the block is cached.
|
* when tree root is reached or backrefs for the block is cached.
|
||||||
*
|
*
|
||||||
* NOTE: if we find backrefs for a block are cached, we know backrefs
|
* NOTE: if we find backrefs for a block are cached, we know backrefs
|
||||||
|
@ -1160,7 +1160,7 @@ out:
|
||||||
if (!RB_EMPTY_NODE(&upper->rb_node))
|
if (!RB_EMPTY_NODE(&upper->rb_node))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* Add this guy's upper edges to the list to proces */
|
/* Add this guy's upper edges to the list to process */
|
||||||
list_for_each_entry(edge, &upper->upper, list[LOWER])
|
list_for_each_entry(edge, &upper->upper, list[LOWER])
|
||||||
list_add_tail(&edge->list[UPPER], &list);
|
list_add_tail(&edge->list[UPPER], &list);
|
||||||
if (list_empty(&upper->upper))
|
if (list_empty(&upper->upper))
|
||||||
|
@ -2396,7 +2396,7 @@ again:
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* we keep the old last snapshod transid in rtranid when we
|
* we keep the old last snapshot transid in rtranid when we
|
||||||
* created the relocation tree.
|
* created the relocation tree.
|
||||||
*/
|
*/
|
||||||
last_snap = btrfs_root_rtransid(&reloc_root->root_item);
|
last_snap = btrfs_root_rtransid(&reloc_root->root_item);
|
||||||
|
@ -2616,7 +2616,7 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
|
||||||
* only one thread can access block_rsv at this point,
|
* only one thread can access block_rsv at this point,
|
||||||
* so we don't need hold lock to protect block_rsv.
|
* so we don't need hold lock to protect block_rsv.
|
||||||
* we expand more reservation size here to allow enough
|
* we expand more reservation size here to allow enough
|
||||||
* space for relocation and we will return eailer in
|
* space for relocation and we will return earlier in
|
||||||
* enospc case.
|
* enospc case.
|
||||||
*/
|
*/
|
||||||
rc->block_rsv->size = tmp + rc->extent_root->nodesize *
|
rc->block_rsv->size = tmp + rc->extent_root->nodesize *
|
||||||
|
@ -2814,7 +2814,7 @@ static void mark_block_processed(struct reloc_control *rc,
|
||||||
u64 bytenr, u32 blocksize)
|
u64 bytenr, u32 blocksize)
|
||||||
{
|
{
|
||||||
set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1,
|
set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1,
|
||||||
EXTENT_DIRTY, GFP_NOFS);
|
EXTENT_DIRTY);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __mark_block_processed(struct reloc_control *rc,
|
static void __mark_block_processed(struct reloc_control *rc,
|
||||||
|
@ -3182,7 +3182,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
|
||||||
page_start + offset == cluster->boundary[nr]) {
|
page_start + offset == cluster->boundary[nr]) {
|
||||||
set_extent_bits(&BTRFS_I(inode)->io_tree,
|
set_extent_bits(&BTRFS_I(inode)->io_tree,
|
||||||
page_start, page_end,
|
page_start, page_end,
|
||||||
EXTENT_BOUNDARY, GFP_NOFS);
|
EXTENT_BOUNDARY);
|
||||||
nr++;
|
nr++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4059,8 +4059,7 @@ restart:
|
||||||
}
|
}
|
||||||
|
|
||||||
btrfs_release_path(path);
|
btrfs_release_path(path);
|
||||||
clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
|
clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
|
||||||
GFP_NOFS);
|
|
||||||
|
|
||||||
if (trans) {
|
if (trans) {
|
||||||
btrfs_end_transaction_throttle(trans, rc->extent_root);
|
btrfs_end_transaction_throttle(trans, rc->extent_root);
|
||||||
|
@ -4591,7 +4590,7 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* called before creating snapshot. it calculates metadata reservation
|
* called before creating snapshot. it calculates metadata reservation
|
||||||
* requried for relocating tree blocks in the snapshot
|
* required for relocating tree blocks in the snapshot
|
||||||
*/
|
*/
|
||||||
void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
|
void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
|
||||||
u64 *bytes_to_reserve)
|
u64 *bytes_to_reserve)
|
||||||
|
|
|
@ -71,9 +71,9 @@ static void btrfs_read_root_item(struct extent_buffer *eb, int slot,
|
||||||
* search_key: the key to search
|
* search_key: the key to search
|
||||||
* path: the path we search
|
* path: the path we search
|
||||||
* root_item: the root item of the tree we look for
|
* root_item: the root item of the tree we look for
|
||||||
* root_key: the reak key of the tree we look for
|
* root_key: the root key of the tree we look for
|
||||||
*
|
*
|
||||||
* If ->offset of 'seach_key' is -1ULL, it means we are not sure the offset
|
* If ->offset of 'search_key' is -1ULL, it means we are not sure the offset
|
||||||
* of the search key, just lookup the root with the highest offset for a
|
* of the search key, just lookup the root with the highest offset for a
|
||||||
* given objectid.
|
* given objectid.
|
||||||
*
|
*
|
||||||
|
|
|
@ -745,7 +745,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
|
||||||
* sure we read the bad mirror.
|
* sure we read the bad mirror.
|
||||||
*/
|
*/
|
||||||
ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
|
ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
|
||||||
EXTENT_DAMAGED, GFP_NOFS);
|
EXTENT_DAMAGED);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
/* set_extent_bits should give proper error */
|
/* set_extent_bits should give proper error */
|
||||||
WARN_ON(ret > 0);
|
WARN_ON(ret > 0);
|
||||||
|
@ -763,7 +763,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
|
||||||
end, EXTENT_DAMAGED, 0, NULL);
|
end, EXTENT_DAMAGED, 0, NULL);
|
||||||
if (!corrected)
|
if (!corrected)
|
||||||
clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
|
clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
|
||||||
EXTENT_DAMAGED, GFP_NOFS);
|
EXTENT_DAMAGED);
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -1044,7 +1044,7 @@ nodatasum_case:
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* !is_metadata and !have_csum, this means that the data
|
* !is_metadata and !have_csum, this means that the data
|
||||||
* might not be COW'ed, that it might be modified
|
* might not be COWed, that it might be modified
|
||||||
* concurrently. The general strategy to work on the
|
* concurrently. The general strategy to work on the
|
||||||
* commit root does not help in the case when COW is not
|
* commit root does not help in the case when COW is not
|
||||||
* used.
|
* used.
|
||||||
|
@ -1125,7 +1125,7 @@ nodatasum_case:
|
||||||
* the 2nd page of mirror #1 faces I/O errors, and the 2nd page
|
* the 2nd page of mirror #1 faces I/O errors, and the 2nd page
|
||||||
* of mirror #2 is readable but the final checksum test fails,
|
* of mirror #2 is readable but the final checksum test fails,
|
||||||
* then the 2nd page of mirror #3 could be tried, whether now
|
* then the 2nd page of mirror #3 could be tried, whether now
|
||||||
* the final checksum succeedes. But this would be a rare
|
* the final checksum succeeds. But this would be a rare
|
||||||
* exception and is therefore not implemented. At least it is
|
* exception and is therefore not implemented. At least it is
|
||||||
* avoided that the good copy is overwritten.
|
* avoided that the good copy is overwritten.
|
||||||
* A more useful improvement would be to pick the sectors
|
* A more useful improvement would be to pick the sectors
|
||||||
|
|
|
@ -1831,7 +1831,7 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we have a parent root we need to verify that the parent dir was
|
* If we have a parent root we need to verify that the parent dir was
|
||||||
* not delted and then re-created, if it was then we have no overwrite
|
* not deleted and then re-created, if it was then we have no overwrite
|
||||||
* and we can just unlink this entry.
|
* and we can just unlink this entry.
|
||||||
*/
|
*/
|
||||||
if (sctx->parent_root) {
|
if (sctx->parent_root) {
|
||||||
|
@ -4192,9 +4192,9 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This hack is needed because empty acl's are stored as zero byte
|
* This hack is needed because empty acls are stored as zero byte
|
||||||
* data in xattrs. Problem with that is, that receiving these zero byte
|
* data in xattrs. Problem with that is, that receiving these zero byte
|
||||||
* acl's will fail later. To fix this, we send a dummy acl list that
|
* acls will fail later. To fix this, we send a dummy acl list that
|
||||||
* only contains the version number and no entries.
|
* only contains the version number and no entries.
|
||||||
*/
|
*/
|
||||||
if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
|
if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
|
||||||
|
|
|
@ -36,7 +36,7 @@ static inline void put_unaligned_le8(u8 val, void *p)
|
||||||
*
|
*
|
||||||
* The end result is that anyone who #includes ctree.h gets a
|
* The end result is that anyone who #includes ctree.h gets a
|
||||||
* declaration for the btrfs_set_foo functions and btrfs_foo functions,
|
* declaration for the btrfs_set_foo functions and btrfs_foo functions,
|
||||||
* which are wappers of btrfs_set_token_#bits functions and
|
* which are wrappers of btrfs_set_token_#bits functions and
|
||||||
* btrfs_get_token_#bits functions, which are defined in this file.
|
* btrfs_get_token_#bits functions, which are defined in this file.
|
||||||
*
|
*
|
||||||
* These setget functions do all the extent_buffer related mapping
|
* These setget functions do all the extent_buffer related mapping
|
||||||
|
|
|
@ -112,7 +112,7 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
|
||||||
* Note that a running device replace operation is not
|
* Note that a running device replace operation is not
|
||||||
* canceled here although there is no way to update
|
* canceled here although there is no way to update
|
||||||
* the progress. It would add the risk of a deadlock,
|
* the progress. It would add the risk of a deadlock,
|
||||||
* therefore the canceling is ommited. The only penalty
|
* therefore the canceling is omitted. The only penalty
|
||||||
* is that some I/O remains active until the procedure
|
* is that some I/O remains active until the procedure
|
||||||
* completes. The next time when the filesystem is
|
* completes. The next time when the filesystem is
|
||||||
* mounted writeable again, the device replace
|
* mounted writeable again, the device replace
|
||||||
|
@ -1877,7 +1877,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We aren't under the device list lock, so this is racey-ish, but good
|
* We aren't under the device list lock, so this is racy-ish, but good
|
||||||
* enough for our purposes.
|
* enough for our purposes.
|
||||||
*/
|
*/
|
||||||
nr_devices = fs_info->fs_devices->open_devices;
|
nr_devices = fs_info->fs_devices->open_devices;
|
||||||
|
@ -1896,7 +1896,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
|
||||||
if (!devices_info)
|
if (!devices_info)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* calc min stripe number for data space alloction */
|
/* calc min stripe number for data space allocation */
|
||||||
type = btrfs_get_alloc_profile(root, 1);
|
type = btrfs_get_alloc_profile(root, 1);
|
||||||
if (type & BTRFS_BLOCK_GROUP_RAID0) {
|
if (type & BTRFS_BLOCK_GROUP_RAID0) {
|
||||||
min_stripes = 2;
|
min_stripes = 2;
|
||||||
|
@ -1932,7 +1932,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
|
||||||
avail_space *= BTRFS_STRIPE_LEN;
|
avail_space *= BTRFS_STRIPE_LEN;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In order to avoid overwritting the superblock on the drive,
|
* In order to avoid overwriting the superblock on the drive,
|
||||||
* btrfs starts at an offset of at least 1MB when doing chunk
|
* btrfs starts at an offset of at least 1MB when doing chunk
|
||||||
* allocation.
|
* allocation.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -113,7 +113,7 @@ static int test_find_delalloc(void)
|
||||||
* |--- delalloc ---|
|
* |--- delalloc ---|
|
||||||
* |--- search ---|
|
* |--- search ---|
|
||||||
*/
|
*/
|
||||||
set_extent_delalloc(&tmp, 0, 4095, NULL, GFP_KERNEL);
|
set_extent_delalloc(&tmp, 0, 4095, NULL);
|
||||||
start = 0;
|
start = 0;
|
||||||
end = 0;
|
end = 0;
|
||||||
found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
|
found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
|
||||||
|
@ -144,7 +144,7 @@ static int test_find_delalloc(void)
|
||||||
test_msg("Couldn't find the locked page\n");
|
test_msg("Couldn't find the locked page\n");
|
||||||
goto out_bits;
|
goto out_bits;
|
||||||
}
|
}
|
||||||
set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL, GFP_KERNEL);
|
set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL);
|
||||||
start = test_start;
|
start = test_start;
|
||||||
end = 0;
|
end = 0;
|
||||||
found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
|
found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
|
||||||
|
@ -176,7 +176,7 @@ static int test_find_delalloc(void)
|
||||||
locked_page = find_lock_page(inode->i_mapping, test_start >>
|
locked_page = find_lock_page(inode->i_mapping, test_start >>
|
||||||
PAGE_SHIFT);
|
PAGE_SHIFT);
|
||||||
if (!locked_page) {
|
if (!locked_page) {
|
||||||
test_msg("Could'nt find the locked page\n");
|
test_msg("Couldn't find the locked page\n");
|
||||||
goto out_bits;
|
goto out_bits;
|
||||||
}
|
}
|
||||||
start = test_start;
|
start = test_start;
|
||||||
|
@ -199,7 +199,7 @@ static int test_find_delalloc(void)
|
||||||
*
|
*
|
||||||
* We are re-using our test_start from above since it works out well.
|
* We are re-using our test_start from above since it works out well.
|
||||||
*/
|
*/
|
||||||
set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL, GFP_KERNEL);
|
set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL);
|
||||||
start = test_start;
|
start = test_start;
|
||||||
end = 0;
|
end = 0;
|
||||||
found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
|
found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
|
||||||
|
@ -262,7 +262,7 @@ static int test_find_delalloc(void)
|
||||||
}
|
}
|
||||||
ret = 0;
|
ret = 0;
|
||||||
out_bits:
|
out_bits:
|
||||||
clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_KERNEL);
|
clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1);
|
||||||
out:
|
out:
|
||||||
if (locked_page)
|
if (locked_page)
|
||||||
put_page(locked_page);
|
put_page(locked_page);
|
||||||
|
|
|
@ -25,7 +25,7 @@
|
||||||
#define BITS_PER_BITMAP (PAGE_SIZE * 8)
|
#define BITS_PER_BITMAP (PAGE_SIZE * 8)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This test just does basic sanity checking, making sure we can add an exten
|
* This test just does basic sanity checking, making sure we can add an extent
|
||||||
* entry and remove space from either end and the middle, and make sure we can
|
* entry and remove space from either end and the middle, and make sure we can
|
||||||
* remove space that covers adjacent extent entries.
|
* remove space that covers adjacent extent entries.
|
||||||
*/
|
*/
|
||||||
|
@ -396,8 +396,9 @@ static int check_cache_empty(struct btrfs_block_group_cache *cache)
|
||||||
* wasn't optimal as they could be spread all over the block group while under
|
* wasn't optimal as they could be spread all over the block group while under
|
||||||
* concurrency (extra overhead and fragmentation).
|
* concurrency (extra overhead and fragmentation).
|
||||||
*
|
*
|
||||||
* This stealing approach is benefical, since we always prefer to allocate from
|
* This stealing approach is beneficial, since we always prefer to allocate
|
||||||
* extent entries, both for clustered and non-clustered allocation requests.
|
* from extent entries, both for clustered and non-clustered allocation
|
||||||
|
* requests.
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
|
test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
|
||||||
|
|
|
@ -264,7 +264,7 @@ static noinline int test_btrfs_get_extent(void)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We will just free a dummy node if it's ref count is 2 so we need an
|
* We will just free a dummy node if it's ref count is 2 so we need an
|
||||||
* extra ref so our searches don't accidently release our page.
|
* extra ref so our searches don't accidentally release our page.
|
||||||
*/
|
*/
|
||||||
extent_buffer_get(root->node);
|
extent_buffer_get(root->node);
|
||||||
btrfs_set_header_nritems(root->node, 0);
|
btrfs_set_header_nritems(root->node, 0);
|
||||||
|
|
|
@ -234,7 +234,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Since the test trans doesn't havee the complicated delayed refs,
|
* Since the test trans doesn't have the complicated delayed refs,
|
||||||
* we can only call btrfs_qgroup_account_extent() directly to test
|
* we can only call btrfs_qgroup_account_extent() directly to test
|
||||||
* quota.
|
* quota.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -944,7 +944,7 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
|
||||||
|
|
||||||
err = convert_extent_bit(dirty_pages, start, end,
|
err = convert_extent_bit(dirty_pages, start, end,
|
||||||
EXTENT_NEED_WAIT,
|
EXTENT_NEED_WAIT,
|
||||||
mark, &cached_state, GFP_NOFS);
|
mark, &cached_state);
|
||||||
/*
|
/*
|
||||||
* convert_extent_bit can return -ENOMEM, which is most of the
|
* convert_extent_bit can return -ENOMEM, which is most of the
|
||||||
* time a temporary error. So when it happens, ignore the error
|
* time a temporary error. So when it happens, ignore the error
|
||||||
|
|
|
@ -144,7 +144,7 @@ struct btrfs_pending_snapshot {
|
||||||
/* block reservation for the operation */
|
/* block reservation for the operation */
|
||||||
struct btrfs_block_rsv block_rsv;
|
struct btrfs_block_rsv block_rsv;
|
||||||
u64 qgroup_reserved;
|
u64 qgroup_reserved;
|
||||||
/* extra metadata reseration for relocation */
|
/* extra metadata reservation for relocation */
|
||||||
int error;
|
int error;
|
||||||
bool readonly;
|
bool readonly;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
|
|
|
@ -2330,7 +2330,7 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/* for regular files, make sure corresponding
|
/* for regular files, make sure corresponding
|
||||||
* orhpan item exist. extents past the new EOF
|
* orphan item exist. extents past the new EOF
|
||||||
* will be truncated later by orphan cleanup.
|
* will be truncated later by orphan cleanup.
|
||||||
*/
|
*/
|
||||||
if (S_ISREG(mode)) {
|
if (S_ISREG(mode)) {
|
||||||
|
@ -3001,7 +3001,7 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
|
||||||
break;
|
break;
|
||||||
|
|
||||||
clear_extent_bits(&log->dirty_log_pages, start, end,
|
clear_extent_bits(&log->dirty_log_pages, start, end,
|
||||||
EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
|
EXTENT_DIRTY | EXTENT_NEW);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -4914,7 +4914,7 @@ out_unlock:
|
||||||
* the actual unlink operation, so if we do this check before a concurrent task
|
* the actual unlink operation, so if we do this check before a concurrent task
|
||||||
* sets last_unlink_trans it means we've logged a consistent version/state of
|
* sets last_unlink_trans it means we've logged a consistent version/state of
|
||||||
* all the inode items, otherwise we are not sure and must do a transaction
|
* all the inode items, otherwise we are not sure and must do a transaction
|
||||||
* commit (the concurrent task migth have only updated last_unlink_trans before
|
* commit (the concurrent task might have only updated last_unlink_trans before
|
||||||
* we logged the inode or it might have also done the unlink).
|
* we logged the inode or it might have also done the unlink).
|
||||||
*/
|
*/
|
||||||
static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
|
static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
|
||||||
|
@ -4973,7 +4973,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
|
||||||
while (1) {
|
while (1) {
|
||||||
/*
|
/*
|
||||||
* If we are logging a directory then we start with our inode,
|
* If we are logging a directory then we start with our inode,
|
||||||
* not our parents inode, so we need to skipp setting the
|
* not our parent's inode, so we need to skip setting the
|
||||||
* logged_trans so that further down in the log code we don't
|
* logged_trans so that further down in the log code we don't
|
||||||
* think this inode has already been logged.
|
* think this inode has already been logged.
|
||||||
*/
|
*/
|
||||||
|
@ -5357,7 +5357,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
|
||||||
log_dentries = true;
|
log_dentries = true;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* On unlink we must make sure all our current and old parent directores
|
* On unlink we must make sure all our current and old parent directory
|
||||||
* inodes are fully logged. This is to prevent leaving dangling
|
* inodes are fully logged. This is to prevent leaving dangling
|
||||||
* directory index entries in directories that were our parents but are
|
* directory index entries in directories that were our parents but are
|
||||||
* not anymore. Not doing this results in old parent directory being
|
* not anymore. Not doing this results in old parent directory being
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
* }
|
* }
|
||||||
* ulist_free(ulist);
|
* ulist_free(ulist);
|
||||||
*
|
*
|
||||||
* This assumes the graph nodes are adressable by u64. This stems from the
|
* This assumes the graph nodes are addressable by u64. This stems from the
|
||||||
* usage for tree enumeration in btrfs, where the logical addresses are
|
* usage for tree enumeration in btrfs, where the logical addresses are
|
||||||
* 64 bit.
|
* 64 bit.
|
||||||
*
|
*
|
||||||
|
|
|
@ -2190,7 +2190,7 @@ static int btrfs_prepare_sprout(struct btrfs_root *root)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* strore the expected generation for seed devices in device items.
|
* Store the expected generation for seed devices in device items.
|
||||||
*/
|
*/
|
||||||
static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
|
static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
|
||||||
struct btrfs_root *root)
|
struct btrfs_root *root)
|
||||||
|
@ -3387,7 +3387,7 @@ static int should_balance_chunk(struct btrfs_root *root,
|
||||||
} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
|
} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
|
||||||
/*
|
/*
|
||||||
* Same logic as the 'limit' filter; the minimum cannot be
|
* Same logic as the 'limit' filter; the minimum cannot be
|
||||||
* determined here because we do not have the global informatoin
|
* determined here because we do not have the global information
|
||||||
* about the count of all chunks that satisfy the filters.
|
* about the count of all chunks that satisfy the filters.
|
||||||
*/
|
*/
|
||||||
if (bargs->limit_max == 0)
|
if (bargs->limit_max == 0)
|
||||||
|
@ -6076,7 +6076,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
|
||||||
{
|
{
|
||||||
atomic_inc(&bbio->error);
|
atomic_inc(&bbio->error);
|
||||||
if (atomic_dec_and_test(&bbio->stripes_pending)) {
|
if (atomic_dec_and_test(&bbio->stripes_pending)) {
|
||||||
/* Shoud be the original bio. */
|
/* Should be the original bio. */
|
||||||
WARN_ON(bio != bbio->orig_bio);
|
WARN_ON(bio != bbio->orig_bio);
|
||||||
|
|
||||||
btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
|
btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
|
||||||
|
@ -6560,7 +6560,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
|
||||||
set_extent_buffer_uptodate(sb);
|
set_extent_buffer_uptodate(sb);
|
||||||
btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
|
btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
|
||||||
/*
|
/*
|
||||||
* The sb extent buffer is artifical and just used to read the system array.
|
* The sb extent buffer is artificial and just used to read the system array.
|
||||||
* set_extent_buffer_uptodate() call does not properly mark all it's
|
* set_extent_buffer_uptodate() call does not properly mark all it's
|
||||||
* pages up-to-date when the page is larger: extent does not cover the
|
* pages up-to-date when the page is larger: extent does not cover the
|
||||||
* whole page and consequently check_page_uptodate does not find all
|
* whole page and consequently check_page_uptodate does not find all
|
||||||
|
|
Loading…
Reference in a new issue