Btrfs: check total number of devices when removing missing
If you have a disk failure in RAID1 and then add a new disk to the array, and then try to remove the missing volume, it will fail. The reason is the sanity check only looks at the total number of rw devices, which is just 2 because we have 2 good disks and 1 bad one. Instead check the total number of devices in the array to make sure we can actually remove the device. Tested this with a failed disk setup and with this test we can now run btrfs-vol -r missing /mount/point and it works fine. Signed-off-by: Josef Bacik <josef@redhat.com> Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
parent
7f59203abe
commit
035fe03a7a
1 changed files with 2 additions and 2 deletions
|
@ -1135,7 +1135,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
|
|||
root->fs_info->avail_metadata_alloc_bits;
|
||||
|
||||
if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
|
||||
root->fs_info->fs_devices->rw_devices <= 4) {
|
||||
root->fs_info->fs_devices->num_devices <= 4) {
|
||||
printk(KERN_ERR "btrfs: unable to go below four devices "
|
||||
"on raid10\n");
|
||||
ret = -EINVAL;
|
||||
|
@ -1143,7 +1143,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
|
|||
}
|
||||
|
||||
if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
|
||||
root->fs_info->fs_devices->rw_devices <= 2) {
|
||||
root->fs_info->fs_devices->num_devices <= 2) {
|
||||
printk(KERN_ERR "btrfs: unable to go below two "
|
||||
"devices on raid1\n");
|
||||
ret = -EINVAL;
|
||||
|
|
Loading…
Reference in a new issue