btrfs: extent_io: Move the BUG_ON() in flush_write_bio() one level up

We have a BUG_ON() in flush_write_bio() to handle the return value of
submit_one_bio().

Move the BUG_ON() one level up to all its callers.

This patch will introduce temporary variable, @flush_ret to keep code
change minimal in this patch. That variable will be cleaned up when
enhancing the error handling later.

Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Qu Wenruo 2019-03-20 14:27:41 +08:00 committed by David Sterba
parent 63489055e4
commit f4340622e0

View file

@ -170,15 +170,28 @@ static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
return blk_status_to_errno(ret); return blk_status_to_errno(ret);
} }
static void flush_write_bio(struct extent_page_data *epd) /*
* Submit bio from extent page data via submit_one_bio
*
* Return 0 if everything is OK.
* Return <0 for error.
*/
static int __must_check flush_write_bio(struct extent_page_data *epd)
{ {
if (epd->bio) { int ret = 0;
int ret;
if (epd->bio) {
ret = submit_one_bio(epd->bio, 0, 0); ret = submit_one_bio(epd->bio, 0, 0);
BUG_ON(ret < 0); /* -ENOMEM */ /*
* Clean up of epd->bio is handled by its endio function.
* And endio is either triggered by successful bio execution
* or the error handler of submit bio hook.
* So at this point, no matter what happened, we don't need
* to clean up epd->bio.
*/
epd->bio = NULL; epd->bio = NULL;
} }
return ret;
} }
int __init extent_io_init(void) int __init extent_io_init(void)
@ -3476,7 +3489,8 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
if (!btrfs_try_tree_write_lock(eb)) { if (!btrfs_try_tree_write_lock(eb)) {
flush = 1; flush = 1;
flush_write_bio(epd); ret = flush_write_bio(epd);
BUG_ON(ret < 0);
btrfs_tree_lock(eb); btrfs_tree_lock(eb);
} }
@ -3485,7 +3499,8 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
if (!epd->sync_io) if (!epd->sync_io)
return 0; return 0;
if (!flush) { if (!flush) {
flush_write_bio(epd); ret = flush_write_bio(epd);
BUG_ON(ret < 0);
flush = 1; flush = 1;
} }
while (1) { while (1) {
@ -3526,7 +3541,8 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
if (!trylock_page(p)) { if (!trylock_page(p)) {
if (!flush) { if (!flush) {
flush_write_bio(epd); ret = flush_write_bio(epd);
BUG_ON(ret < 0);
flush = 1; flush = 1;
} }
lock_page(p); lock_page(p);
@ -3718,6 +3734,7 @@ int btree_write_cache_pages(struct address_space *mapping,
.sync_io = wbc->sync_mode == WB_SYNC_ALL, .sync_io = wbc->sync_mode == WB_SYNC_ALL,
}; };
int ret = 0; int ret = 0;
int flush_ret;
int done = 0; int done = 0;
int nr_to_write_done = 0; int nr_to_write_done = 0;
struct pagevec pvec; struct pagevec pvec;
@ -3817,7 +3834,8 @@ retry:
index = 0; index = 0;
goto retry; goto retry;
} }
flush_write_bio(&epd); flush_ret = flush_write_bio(&epd);
BUG_ON(flush_ret < 0);
return ret; return ret;
} }
@ -3914,7 +3932,8 @@ retry:
* tmpfs file mapping * tmpfs file mapping
*/ */
if (!trylock_page(page)) { if (!trylock_page(page)) {
flush_write_bio(epd); ret = flush_write_bio(epd);
BUG_ON(ret < 0);
lock_page(page); lock_page(page);
} }
@ -3924,8 +3943,10 @@ retry:
} }
if (wbc->sync_mode != WB_SYNC_NONE) { if (wbc->sync_mode != WB_SYNC_NONE) {
if (PageWriteback(page)) if (PageWriteback(page)) {
flush_write_bio(epd); ret = flush_write_bio(epd);
BUG_ON(ret < 0);
}
wait_on_page_writeback(page); wait_on_page_writeback(page);
} }
@ -3986,6 +4007,7 @@ retry:
int extent_write_full_page(struct page *page, struct writeback_control *wbc) int extent_write_full_page(struct page *page, struct writeback_control *wbc)
{ {
int ret; int ret;
int flush_ret;
struct extent_page_data epd = { struct extent_page_data epd = {
.bio = NULL, .bio = NULL,
.tree = &BTRFS_I(page->mapping->host)->io_tree, .tree = &BTRFS_I(page->mapping->host)->io_tree,
@ -3995,7 +4017,8 @@ int extent_write_full_page(struct page *page, struct writeback_control *wbc)
ret = __extent_writepage(page, wbc, &epd); ret = __extent_writepage(page, wbc, &epd);
flush_write_bio(&epd); flush_ret = flush_write_bio(&epd);
BUG_ON(flush_ret < 0);
return ret; return ret;
} }
@ -4003,6 +4026,7 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
int mode) int mode)
{ {
int ret = 0; int ret = 0;
int flush_ret;
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
struct page *page; struct page *page;
@ -4035,7 +4059,8 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
start += PAGE_SIZE; start += PAGE_SIZE;
} }
flush_write_bio(&epd); flush_ret = flush_write_bio(&epd);
BUG_ON(flush_ret < 0);
return ret; return ret;
} }
@ -4043,6 +4068,7 @@ int extent_writepages(struct address_space *mapping,
struct writeback_control *wbc) struct writeback_control *wbc)
{ {
int ret = 0; int ret = 0;
int flush_ret;
struct extent_page_data epd = { struct extent_page_data epd = {
.bio = NULL, .bio = NULL,
.tree = &BTRFS_I(mapping->host)->io_tree, .tree = &BTRFS_I(mapping->host)->io_tree,
@ -4051,7 +4077,8 @@ int extent_writepages(struct address_space *mapping,
}; };
ret = extent_write_cache_pages(mapping, wbc, &epd); ret = extent_write_cache_pages(mapping, wbc, &epd);
flush_write_bio(&epd); flush_ret = flush_write_bio(&epd);
BUG_ON(flush_ret < 0);
return ret; return ret;
} }