Some miscellaneous ext4 fixes for 4.18; one fix is for a regression

introduced in 4.18-rc4.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEK2m5VNv+CHkogTfJ8vlZVpUNgaMFAlteF34ACgkQ8vlZVpUN
 gaMQugf+LjlbbncSEuPxZ+C3CnSGkEzjrg8IRylZA2uf04Z5Bax8K5gqvXLx7ZtF
 Qz3vzmrYpaUV8UiaMy0SGLCRWebwoxPEN7ZX3/W1PfeymP3wQ4DLw37059AzLfsq
 Vzh9w3N1At1plUee7iJ2MDBU830Q0a917jjnpZ+M0AtQx/BzP8QEISuzp4JWICqe
 NbJDVybMWoW2YOSpMPiihxSFqCDx5rMyAJ1vllboopZK+XAjpQ/visnLh3aT3o71
 7cTPl9gI2rbwYbJk8kM5fmXhWqSARHARV1bpZNOUnCAUU1E2Se7aETjggQ0QzJE/
 mIc7wCzFLrrY8+iakwdhb5Aw3qOPyg==
 =ZdXo
 -----END PGP SIGNATURE-----

Merge tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

Pull ext4 fixes from Ted Ts'o:
 "Some miscellaneous ext4 fixes for 4.18; one fix is for a regression
  introduced in 4.18-rc4.

  Sorry for the late-breaking pull. I was originally going to wait for
  the next merge window, but Eric Whitney found a regression introduced
  in 4.18-rc4, so I decided to push out the regression plus the other
  fixes now. (The other commits have been baking in linux-next since
  early July)"

* tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
  ext4: fix check to prevent initializing reserved inodes
  ext4: check for allocation block validity with block group locked
  ext4: fix inline data updates with checksums enabled
  ext4: clear mmp sequence number when remounting read-only
  ext4: fix false negatives *and* false positives in ext4_check_descriptors()
This commit is contained in:
Linus Torvalds 2018-07-29 13:13:45 -07:00
commit 3cfb6772d4
6 changed files with 35 additions and 33 deletions

View file

@ -368,6 +368,8 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
return -EFSCORRUPTED;
ext4_lock_group(sb, block_group);
if (buffer_verified(bh))
goto verified;
if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
desc, bh))) {
ext4_unlock_group(sb, block_group);
@ -386,6 +388,7 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
return -EFSCORRUPTED;
}
set_buffer_verified(bh);
verified:
ext4_unlock_group(sb, block_group);
return 0;
}

View file

@ -90,6 +90,8 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
return -EFSCORRUPTED;
ext4_lock_group(sb, block_group);
if (buffer_verified(bh))
goto verified;
blk = ext4_inode_bitmap(sb, desc);
if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
EXT4_INODES_PER_GROUP(sb) / 8)) {
@ -101,6 +103,7 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
return -EFSBADCRC;
}
set_buffer_verified(bh);
verified:
ext4_unlock_group(sb, block_group);
return 0;
}
@ -1385,7 +1388,10 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
ext4_itable_unused_count(sb, gdp)),
sbi->s_inodes_per_block);
if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
ext4_itable_unused_count(sb, gdp)) <
EXT4_FIRST_INO(sb)))) {
ext4_error(sb, "Something is wrong with group %u: "
"used itable blocks: %d; "
"itable unused count: %u",

View file

@ -682,6 +682,10 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
goto convert;
}
ret = ext4_journal_get_write_access(handle, iloc.bh);
if (ret)
goto out;
flags |= AOP_FLAG_NOFS;
page = grab_cache_page_write_begin(mapping, 0, flags);
@ -710,7 +714,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
out_up_read:
up_read(&EXT4_I(inode)->xattr_sem);
out:
if (handle)
if (handle && (ret != 1))
ext4_journal_stop(handle);
brelse(iloc.bh);
return ret;
@ -752,6 +756,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
ext4_write_unlock_xattr(inode, &no_expand);
brelse(iloc.bh);
mark_inode_dirty(inode);
out:
return copied;
}
@ -898,7 +903,6 @@ retry_journal:
goto out;
}
page = grab_cache_page_write_begin(mapping, 0, flags);
if (!page) {
ret = -ENOMEM;
@ -916,6 +920,9 @@ retry_journal:
if (ret < 0)
goto out_release_page;
}
ret = ext4_journal_get_write_access(handle, iloc.bh);
if (ret)
goto out_release_page;
up_read(&EXT4_I(inode)->xattr_sem);
*pagep = page;
@ -936,7 +943,6 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
unsigned len, unsigned copied,
struct page *page)
{
int i_size_changed = 0;
int ret;
ret = ext4_write_inline_data_end(inode, pos, len, copied, page);
@ -954,10 +960,8 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
* But it's important to update i_size while still holding page lock:
* page writeout could otherwise come in and zero beyond i_size.
*/
if (pos+copied > inode->i_size) {
if (pos+copied > inode->i_size)
i_size_write(inode, pos+copied);
i_size_changed = 1;
}
unlock_page(page);
put_page(page);
@ -967,8 +971,7 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
* ordering of page lock and transaction start for journaling
* filesystems.
*/
if (i_size_changed)
mark_inode_dirty(inode);
mark_inode_dirty(inode);
return copied;
}

View file

@ -1389,9 +1389,10 @@ static int ext4_write_end(struct file *file,
loff_t old_size = inode->i_size;
int ret = 0, ret2;
int i_size_changed = 0;
int inline_data = ext4_has_inline_data(inode);
trace_ext4_write_end(inode, pos, len, copied);
if (ext4_has_inline_data(inode)) {
if (inline_data) {
ret = ext4_write_inline_data_end(inode, pos, len,
copied, page);
if (ret < 0) {
@ -1419,7 +1420,7 @@ static int ext4_write_end(struct file *file,
* ordering of page lock and transaction start for journaling
* filesystems.
*/
if (i_size_changed)
if (i_size_changed || inline_data)
ext4_mark_inode_dirty(handle, inode);
if (pos + len > inode->i_size && ext4_can_truncate(inode))
@ -1493,6 +1494,7 @@ static int ext4_journalled_write_end(struct file *file,
int partial = 0;
unsigned from, to;
int size_changed = 0;
int inline_data = ext4_has_inline_data(inode);
trace_ext4_journalled_write_end(inode, pos, len, copied);
from = pos & (PAGE_SIZE - 1);
@ -1500,7 +1502,7 @@ static int ext4_journalled_write_end(struct file *file,
BUG_ON(!ext4_handle_valid(handle));
if (ext4_has_inline_data(inode)) {
if (inline_data) {
ret = ext4_write_inline_data_end(inode, pos, len,
copied, page);
if (ret < 0) {
@ -1531,7 +1533,7 @@ static int ext4_journalled_write_end(struct file *file,
if (old_size < pos)
pagecache_isize_extended(inode, old_size, pos);
if (size_changed) {
if (size_changed || inline_data) {
ret2 = ext4_mark_inode_dirty(handle, inode);
if (!ret)
ret = ret2;
@ -2028,11 +2030,7 @@ static int __ext4_journalled_writepage(struct page *page,
}
if (inline_data) {
BUFFER_TRACE(inode_bh, "get write access");
ret = ext4_journal_get_write_access(handle, inode_bh);
err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
ret = ext4_mark_inode_dirty(handle, inode);
} else {
ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
do_journal_get_write_access);

View file

@ -186,11 +186,8 @@ static int kmmpd(void *data)
goto exit_thread;
}
if (sb_rdonly(sb)) {
ext4_warning(sb, "kmmpd being stopped since filesystem "
"has been remounted as readonly.");
goto exit_thread;
}
if (sb_rdonly(sb))
break;
diff = jiffies - last_update_time;
if (diff < mmp_update_interval * HZ)

View file

@ -2342,7 +2342,7 @@ static int ext4_check_descriptors(struct super_block *sb,
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
ext4_fsblk_t last_block;
ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1;
ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
ext4_fsblk_t block_bitmap;
ext4_fsblk_t inode_bitmap;
ext4_fsblk_t inode_table;
@ -3141,14 +3141,8 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
if (!gdp)
continue;
if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
continue;
if (group != 0)
if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
break;
ext4_error(sb, "Inode table for bg 0 marked as "
"needing zeroing");
if (sb_rdonly(sb))
return ngroups;
}
return group;
@ -4085,14 +4079,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount2;
}
}
sbi->s_gdb_count = db_count;
if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
ret = -EFSCORRUPTED;
goto failed_mount2;
}
sbi->s_gdb_count = db_count;
timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
/* Register extent status tree shrinker */
@ -5213,6 +5206,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
if (sbi->s_journal)
ext4_mark_recovery_complete(sb, es);
if (sbi->s_mmp_tsk)
kthread_stop(sbi->s_mmp_tsk);
} else {
/* Make sure we can mount this feature set readwrite */
if (ext4_has_feature_readonly(sb) ||